mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 02:36:02 +00:00
2650073f1b
iommu_dma_map_page() allocates swiotlb memory as a bounce buffer when an untrusted device wants to map only part of the memory in an granule. The goal is to disallow the untrusted device having DMA access to unrelated kernel data that may be sharing the granule. To meet this goal, the bounce buffer itself is zeroed, and any additional swiotlb memory up to alloc_size after the bounce buffer end (i.e., "post-padding") is also zeroed. However, as of commit901c7280ca
("Reinstate some of "swiotlb: rework "fix info leak with DMA_FROM_DEVICE"""), swiotlb_tbl_map_single() always initializes the contents of the bounce buffer to the original memory. Zeroing the bounce buffer is redundant and probably wrong per the discussion in that commit. Only the post-padding needs to be zeroed. Also, when the DMA min_align_mask is non-zero, the allocated bounce buffer space may not start on a granule boundary. The swiotlb memory from the granule boundary to the start of the allocated bounce buffer might belong to some unrelated bounce buffer. So as described in the "second issue" in [1], it can't be zeroed to protect against untrusted devices. But as of commitaf133562d5
("swiotlb: extend buffer pre-padding to alloc_align_mask if necessary"), swiotlb_tbl_map_single() allocates pre-padding slots when necessary to meet min_align_mask requirements, making it possible to zero the pre-padding area as well. Finally, iommu_dma_map_page() uses the swiotlb for untrusted devices and also for certain kmalloc() memory. Current code does the zeroing for both cases, but it is needed only for the untrusted device case. Fix all of this by updating iommu_dma_map_page() to zero both the pre-padding and post-padding areas, but not the actual bounce buffer. Do this only in the case where the bounce buffer is used because of an untrusted device. [1] https://lore.kernel.org/all/20210929023300.335969-1-stevensd@google.com/ Signed-off-by: Michael Kelley <mhklinux@outlook.com> Reviewed-by: Petr Tesarik <petr@tesarici.cz> Signed-off-by: Christoph Hellwig <hch@lst.de>
171 lines
4.2 KiB
C
171 lines
4.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
*
|
|
* Copyright (C) 2006-2008 Intel Corporation
|
|
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
|
*/
|
|
|
|
#ifndef _IOVA_H_
|
|
#define _IOVA_H_
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/rbtree.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
/* iova structure */
|
|
struct iova {
|
|
struct rb_node node;
|
|
unsigned long pfn_hi; /* Highest allocated pfn */
|
|
unsigned long pfn_lo; /* Lowest allocated pfn */
|
|
};
|
|
|
|
|
|
struct iova_rcache;
|
|
|
|
/* holds all the iova translations for a domain */
|
|
struct iova_domain {
|
|
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
|
|
struct rb_root rbroot; /* iova domain rbtree root */
|
|
struct rb_node *cached_node; /* Save last alloced node */
|
|
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
|
|
unsigned long granule; /* pfn granularity for this domain */
|
|
unsigned long start_pfn; /* Lower limit for this domain */
|
|
unsigned long dma_32bit_pfn;
|
|
unsigned long max32_alloc_size; /* Size of last failed allocation */
|
|
struct iova anchor; /* rbtree lookup anchor */
|
|
|
|
struct iova_rcache *rcaches;
|
|
struct hlist_node cpuhp_dead;
|
|
};
|
|
|
|
static inline unsigned long iova_size(struct iova *iova)
|
|
{
|
|
return iova->pfn_hi - iova->pfn_lo + 1;
|
|
}
|
|
|
|
static inline unsigned long iova_shift(struct iova_domain *iovad)
|
|
{
|
|
return __ffs(iovad->granule);
|
|
}
|
|
|
|
static inline unsigned long iova_mask(struct iova_domain *iovad)
|
|
{
|
|
return iovad->granule - 1;
|
|
}
|
|
|
|
static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
|
|
{
|
|
return iova & iova_mask(iovad);
|
|
}
|
|
|
|
static inline size_t iova_align(struct iova_domain *iovad, size_t size)
|
|
{
|
|
return ALIGN(size, iovad->granule);
|
|
}
|
|
|
|
static inline size_t iova_align_down(struct iova_domain *iovad, size_t size)
|
|
{
|
|
return ALIGN_DOWN(size, iovad->granule);
|
|
}
|
|
|
|
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
|
|
{
|
|
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
|
|
}
|
|
|
|
static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
|
|
{
|
|
return iova >> iova_shift(iovad);
|
|
}
|
|
|
|
#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
|
|
int iova_cache_get(void);
|
|
void iova_cache_put(void);
|
|
|
|
unsigned long iova_rcache_range(void);
|
|
|
|
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
void __free_iova(struct iova_domain *iovad, struct iova *iova);
|
|
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
|
|
unsigned long limit_pfn,
|
|
bool size_aligned);
|
|
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
|
|
unsigned long size);
|
|
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
|
|
unsigned long limit_pfn, bool flush_rcache);
|
|
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
|
unsigned long pfn_hi);
|
|
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
|
unsigned long start_pfn);
|
|
int iova_domain_init_rcaches(struct iova_domain *iovad);
|
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
void put_iova_domain(struct iova_domain *iovad);
|
|
#else
|
|
static inline int iova_cache_get(void)
|
|
{
|
|
return -ENOTSUPP;
|
|
}
|
|
|
|
static inline void iova_cache_put(void)
|
|
{
|
|
}
|
|
|
|
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
|
|
{
|
|
}
|
|
|
|
static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
|
|
{
|
|
}
|
|
|
|
static inline struct iova *alloc_iova(struct iova_domain *iovad,
|
|
unsigned long size,
|
|
unsigned long limit_pfn,
|
|
bool size_aligned)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void free_iova_fast(struct iova_domain *iovad,
|
|
unsigned long pfn,
|
|
unsigned long size)
|
|
{
|
|
}
|
|
|
|
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
|
|
unsigned long size,
|
|
unsigned long limit_pfn,
|
|
bool flush_rcache)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline struct iova *reserve_iova(struct iova_domain *iovad,
|
|
unsigned long pfn_lo,
|
|
unsigned long pfn_hi)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void init_iova_domain(struct iova_domain *iovad,
|
|
unsigned long granule,
|
|
unsigned long start_pfn)
|
|
{
|
|
}
|
|
|
|
static inline struct iova *find_iova(struct iova_domain *iovad,
|
|
unsigned long pfn)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void put_iova_domain(struct iova_domain *iovad)
|
|
{
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|