mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 14:50:19 +00:00
KVM: s390: pv: properly handle page flags for protected guests
Introduce variants of the convert and destroy page functions that also clear the PG_arch_1 bit used to mark them as secure pages. The PG_arch_1 flag is always allowed to overindicate; using the new functions introduced here allows to reduce the extent of overindication and thus improve performance. These new functions can only be called on pages for which a reference is already being held. Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Reviewed-by: Janosch Frank <frankja@linux.ibm.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Link: https://lore.kernel.org/r/20210920132502.36111-7-imbrenda@linux.ibm.com Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
parent
85f517b294
commit
380d97bd02
@ -1074,8 +1074,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
|||||||
pte_t res;
|
pte_t res;
|
||||||
|
|
||||||
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
|
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
|
||||||
|
/* At this point the reference through the mapping is still present */
|
||||||
if (mm_is_protected(mm) && pte_present(res))
|
if (mm_is_protected(mm) && pte_present(res))
|
||||||
uv_convert_from_secure(pte_val(res) & PAGE_MASK);
|
uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1091,8 +1092,9 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
|
|||||||
pte_t res;
|
pte_t res;
|
||||||
|
|
||||||
res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
|
res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
|
||||||
|
/* At this point the reference through the mapping is still present */
|
||||||
if (mm_is_protected(vma->vm_mm) && pte_present(res))
|
if (mm_is_protected(vma->vm_mm) && pte_present(res))
|
||||||
uv_convert_from_secure(pte_val(res) & PAGE_MASK);
|
uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1116,8 +1118,9 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
|
|||||||
} else {
|
} else {
|
||||||
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
|
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
|
||||||
}
|
}
|
||||||
|
/* At this point the reference through the mapping is still present */
|
||||||
if (mm_is_protected(mm) && pte_present(res))
|
if (mm_is_protected(mm) && pte_present(res))
|
||||||
uv_convert_from_secure(pte_val(res) & PAGE_MASK);
|
uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -356,8 +356,9 @@ static inline int is_prot_virt_host(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
|
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
|
||||||
int uv_destroy_page(unsigned long paddr);
|
int uv_destroy_owned_page(unsigned long paddr);
|
||||||
int uv_convert_from_secure(unsigned long paddr);
|
int uv_convert_from_secure(unsigned long paddr);
|
||||||
|
int uv_convert_owned_from_secure(unsigned long paddr);
|
||||||
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
|
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
|
||||||
|
|
||||||
void setup_uv(void);
|
void setup_uv(void);
|
||||||
@ -365,7 +366,7 @@ void setup_uv(void);
|
|||||||
#define is_prot_virt_host() 0
|
#define is_prot_virt_host() 0
|
||||||
static inline void setup_uv(void) {}
|
static inline void setup_uv(void) {}
|
||||||
|
|
||||||
static inline int uv_destroy_page(unsigned long paddr)
|
static inline int uv_destroy_owned_page(unsigned long paddr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -374,6 +375,11 @@ static inline int uv_convert_from_secure(unsigned long paddr)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int uv_convert_owned_from_secure(unsigned long paddr)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_S390_UV_H */
|
#endif /* _ASM_S390_UV_H */
|
||||||
|
@ -100,7 +100,7 @@ static int uv_pin_shared(unsigned long paddr)
|
|||||||
*
|
*
|
||||||
* @paddr: Absolute host address of page to be destroyed
|
* @paddr: Absolute host address of page to be destroyed
|
||||||
*/
|
*/
|
||||||
int uv_destroy_page(unsigned long paddr)
|
static int uv_destroy_page(unsigned long paddr)
|
||||||
{
|
{
|
||||||
struct uv_cb_cfs uvcb = {
|
struct uv_cb_cfs uvcb = {
|
||||||
.header.cmd = UVC_CMD_DESTR_SEC_STOR,
|
.header.cmd = UVC_CMD_DESTR_SEC_STOR,
|
||||||
@ -120,6 +120,22 @@ int uv_destroy_page(unsigned long paddr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The caller must already hold a reference to the page
|
||||||
|
*/
|
||||||
|
int uv_destroy_owned_page(unsigned long paddr)
|
||||||
|
{
|
||||||
|
struct page *page = phys_to_page(paddr);
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
get_page(page);
|
||||||
|
rc = uv_destroy_page(paddr);
|
||||||
|
if (!rc)
|
||||||
|
clear_bit(PG_arch_1, &page->flags);
|
||||||
|
put_page(page);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Requests the Ultravisor to encrypt a guest page and make it
|
* Requests the Ultravisor to encrypt a guest page and make it
|
||||||
* accessible to the host for paging (export).
|
* accessible to the host for paging (export).
|
||||||
@ -139,6 +155,22 @@ int uv_convert_from_secure(unsigned long paddr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The caller must already hold a reference to the page
|
||||||
|
*/
|
||||||
|
int uv_convert_owned_from_secure(unsigned long paddr)
|
||||||
|
{
|
||||||
|
struct page *page = phys_to_page(paddr);
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
get_page(page);
|
||||||
|
rc = uv_convert_from_secure(paddr);
|
||||||
|
if (!rc)
|
||||||
|
clear_bit(PG_arch_1, &page->flags);
|
||||||
|
put_page(page);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate the expected ref_count for a page that would otherwise have no
|
* Calculate the expected ref_count for a page that would otherwise have no
|
||||||
* further pins. This was cribbed from similar functions in other places in
|
* further pins. This was cribbed from similar functions in other places in
|
||||||
|
@ -2684,8 +2684,10 @@ static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
|
|||||||
{
|
{
|
||||||
pte_t pte = READ_ONCE(*ptep);
|
pte_t pte = READ_ONCE(*ptep);
|
||||||
|
|
||||||
|
/* There is a reference through the mapping */
|
||||||
if (pte_present(pte))
|
if (pte_present(pte))
|
||||||
WARN_ON_ONCE(uv_destroy_page(pte_val(pte) & PAGE_MASK));
|
WARN_ON_ONCE(uv_destroy_owned_page(pte_val(pte) & PAGE_MASK));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user