mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-12 08:00:09 +00:00
Merge branch 'amd-iommu/passthrough' into amd-iommu/2.6.32
Conflicts: arch/x86/kernel/amd_iommu.c arch/x86/kernel/amd_iommu_init.c
This commit is contained in:
commit
03362a05c5
@ -25,6 +25,7 @@
|
|||||||
#ifdef CONFIG_AMD_IOMMU
|
#ifdef CONFIG_AMD_IOMMU
|
||||||
extern int amd_iommu_init(void);
|
extern int amd_iommu_init(void);
|
||||||
extern int amd_iommu_init_dma_ops(void);
|
extern int amd_iommu_init_dma_ops(void);
|
||||||
|
extern int amd_iommu_init_passthrough(void);
|
||||||
extern void amd_iommu_detect(void);
|
extern void amd_iommu_detect(void);
|
||||||
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||||
extern void amd_iommu_flush_all_domains(void);
|
extern void amd_iommu_flush_all_domains(void);
|
||||||
|
@ -143,6 +143,7 @@
|
|||||||
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
|
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
|
||||||
#define EVT_LEN_MASK (0x9ULL << 56)
|
#define EVT_LEN_MASK (0x9ULL << 56)
|
||||||
|
|
||||||
|
#define PAGE_MODE_NONE 0x00
|
||||||
#define PAGE_MODE_1_LEVEL 0x01
|
#define PAGE_MODE_1_LEVEL 0x01
|
||||||
#define PAGE_MODE_2_LEVEL 0x02
|
#define PAGE_MODE_2_LEVEL 0x02
|
||||||
#define PAGE_MODE_3_LEVEL 0x03
|
#define PAGE_MODE_3_LEVEL 0x03
|
||||||
@ -194,6 +195,9 @@
|
|||||||
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
|
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
|
||||||
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
||||||
domain for an IOMMU */
|
domain for an IOMMU */
|
||||||
|
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
|
||||||
|
translation */
|
||||||
|
|
||||||
extern bool amd_iommu_dump;
|
extern bool amd_iommu_dump;
|
||||||
#define DUMP_printk(format, arg...) \
|
#define DUMP_printk(format, arg...) \
|
||||||
do { \
|
do { \
|
||||||
|
@ -41,6 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
|
|||||||
static LIST_HEAD(iommu_pd_list);
|
static LIST_HEAD(iommu_pd_list);
|
||||||
static DEFINE_SPINLOCK(iommu_pd_list_lock);
|
static DEFINE_SPINLOCK(iommu_pd_list_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Domain for untranslated devices - only allocated
|
||||||
|
* if iommu=pt passed on kernel cmd line.
|
||||||
|
*/
|
||||||
|
static struct protection_domain *pt_domain;
|
||||||
|
|
||||||
|
#ifdef CONFIG_IOMMU_API
|
||||||
static struct iommu_ops amd_iommu_ops;
|
static struct iommu_ops amd_iommu_ops;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1130,25 +1137,41 @@ static struct protection_domain *domain_for_device(u16 devid)
|
|||||||
* If a device is not yet associated with a domain, this function does
|
* If a device is not yet associated with a domain, this function does
|
||||||
* assigns it visible for the hardware
|
* assigns it visible for the hardware
|
||||||
*/
|
*/
|
||||||
static void attach_device(struct amd_iommu *iommu,
|
static void __attach_device(struct amd_iommu *iommu,
|
||||||
struct protection_domain *domain,
|
struct protection_domain *domain,
|
||||||
u16 devid)
|
u16 devid)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
u64 pte_root;
|
||||||
u64 pte_root = virt_to_phys(domain->pt_root);
|
|
||||||
|
|
||||||
domain->dev_cnt += 1;
|
/* lock domain */
|
||||||
|
spin_lock(&domain->lock);
|
||||||
|
|
||||||
|
pte_root = virt_to_phys(domain->pt_root);
|
||||||
|
|
||||||
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
|
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
|
||||||
<< DEV_ENTRY_MODE_SHIFT;
|
<< DEV_ENTRY_MODE_SHIFT;
|
||||||
pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
|
pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
|
||||||
|
|
||||||
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
|
||||||
amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
|
|
||||||
amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
|
|
||||||
amd_iommu_dev_table[devid].data[2] = domain->id;
|
amd_iommu_dev_table[devid].data[2] = domain->id;
|
||||||
|
amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
|
||||||
|
amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
|
||||||
|
|
||||||
amd_iommu_pd_table[devid] = domain;
|
amd_iommu_pd_table[devid] = domain;
|
||||||
|
|
||||||
|
domain->dev_cnt += 1;
|
||||||
|
|
||||||
|
/* ready */
|
||||||
|
spin_unlock(&domain->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void attach_device(struct amd_iommu *iommu,
|
||||||
|
struct protection_domain *domain,
|
||||||
|
u16 devid)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
||||||
|
__attach_device(iommu, domain, devid);
|
||||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1182,6 +1205,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
|
|||||||
|
|
||||||
/* ready */
|
/* ready */
|
||||||
spin_unlock(&domain->lock);
|
spin_unlock(&domain->lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we run in passthrough mode the device must be assigned to the
|
||||||
|
* passthrough domain if it is detached from any other domain
|
||||||
|
*/
|
||||||
|
if (iommu_pass_through) {
|
||||||
|
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
|
||||||
|
__attach_device(iommu, pt_domain, devid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1227,6 +1259,8 @@ static int device_change_notifier(struct notifier_block *nb,
|
|||||||
case BUS_NOTIFY_UNBOUND_DRIVER:
|
case BUS_NOTIFY_UNBOUND_DRIVER:
|
||||||
if (!domain)
|
if (!domain)
|
||||||
goto out;
|
goto out;
|
||||||
|
if (iommu_pass_through)
|
||||||
|
break;
|
||||||
detach_device(domain, devid);
|
detach_device(domain, devid);
|
||||||
break;
|
break;
|
||||||
case BUS_NOTIFY_ADD_DEVICE:
|
case BUS_NOTIFY_ADD_DEVICE:
|
||||||
@ -2051,19 +2085,47 @@ static void cleanup_domain(struct protection_domain *domain)
|
|||||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amd_iommu_domain_init(struct iommu_domain *dom)
|
static void protection_domain_free(struct protection_domain *domain)
|
||||||
|
{
|
||||||
|
if (!domain)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (domain->id)
|
||||||
|
domain_id_free(domain->id);
|
||||||
|
|
||||||
|
kfree(domain);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct protection_domain *protection_domain_alloc(void)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain;
|
struct protection_domain *domain;
|
||||||
|
|
||||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
|
|
||||||
spin_lock_init(&domain->lock);
|
spin_lock_init(&domain->lock);
|
||||||
domain->mode = PAGE_MODE_3_LEVEL;
|
|
||||||
domain->id = domain_id_alloc();
|
domain->id = domain_id_alloc();
|
||||||
if (!domain->id)
|
if (!domain->id)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
return domain;
|
||||||
|
|
||||||
|
out_err:
|
||||||
|
kfree(domain);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amd_iommu_domain_init(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
struct protection_domain *domain;
|
||||||
|
|
||||||
|
domain = protection_domain_alloc();
|
||||||
|
if (!domain)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
|
domain->mode = PAGE_MODE_3_LEVEL;
|
||||||
domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
|
domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
|
||||||
if (!domain->pt_root)
|
if (!domain->pt_root)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
@ -2073,7 +2135,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
kfree(domain);
|
protection_domain_free(domain);
|
||||||
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -2254,3 +2316,46 @@ static struct iommu_ops amd_iommu_ops = {
|
|||||||
.domain_has_cap = amd_iommu_domain_has_cap,
|
.domain_has_cap = amd_iommu_domain_has_cap,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*****************************************************************************
|
||||||
|
*
|
||||||
|
* The next functions do a basic initialization of IOMMU for pass through
|
||||||
|
* mode
|
||||||
|
*
|
||||||
|
* In passthrough mode the IOMMU is initialized and enabled but not used for
|
||||||
|
* DMA-API translation.
|
||||||
|
*
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
int __init amd_iommu_init_passthrough(void)
|
||||||
|
{
|
||||||
|
struct pci_dev *dev = NULL;
|
||||||
|
u16 devid, devid2;
|
||||||
|
|
||||||
|
/* allocate passthroug domain */
|
||||||
|
pt_domain = protection_domain_alloc();
|
||||||
|
if (!pt_domain)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pt_domain->mode |= PAGE_MODE_NONE;
|
||||||
|
|
||||||
|
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
|
||||||
|
struct amd_iommu *iommu;
|
||||||
|
|
||||||
|
devid = calc_devid(dev->bus->number, dev->devfn);
|
||||||
|
if (devid > amd_iommu_last_bdf)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
devid2 = amd_iommu_alias_table[devid];
|
||||||
|
|
||||||
|
iommu = amd_iommu_rlookup_table[devid2];
|
||||||
|
if (!iommu)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
__attach_device(iommu, pt_domain, devid);
|
||||||
|
__attach_device(iommu, pt_domain, devid2);
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
@ -1252,12 +1252,18 @@ int __init amd_iommu_init(void)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto free;
|
goto free;
|
||||||
|
|
||||||
|
if (iommu_pass_through)
|
||||||
|
ret = amd_iommu_init_passthrough();
|
||||||
|
else
|
||||||
ret = amd_iommu_init_dma_ops();
|
ret = amd_iommu_init_dma_ops();
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free;
|
goto free;
|
||||||
|
|
||||||
enable_iommus();
|
enable_iommus();
|
||||||
|
|
||||||
|
if (iommu_pass_through)
|
||||||
|
goto out;
|
||||||
|
|
||||||
printk(KERN_INFO "AMD-Vi: device isolation ");
|
printk(KERN_INFO "AMD-Vi: device isolation ");
|
||||||
if (amd_iommu_isolate)
|
if (amd_iommu_isolate)
|
||||||
printk("enabled\n");
|
printk("enabled\n");
|
||||||
|
@ -32,7 +32,14 @@ int no_iommu __read_mostly;
|
|||||||
/* Set this to 1 if there is a HW IOMMU in the system */
|
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||||
int iommu_detected __read_mostly = 0;
|
int iommu_detected __read_mostly = 0;
|
||||||
|
|
||||||
int iommu_pass_through;
|
/*
|
||||||
|
* This variable becomes 1 if iommu=pt is passed on the kernel command line.
|
||||||
|
* If this variable is 1, IOMMU implementations do no DMA ranslation for
|
||||||
|
* devices and allow every device to access to whole physical memory. This is
|
||||||
|
* useful if a user want to use an IOMMU only for KVM device assignment to
|
||||||
|
* guests and not for driver dma translation.
|
||||||
|
*/
|
||||||
|
int iommu_pass_through __read_mostly;
|
||||||
|
|
||||||
dma_addr_t bad_dma_address __read_mostly = 0;
|
dma_addr_t bad_dma_address __read_mostly = 0;
|
||||||
EXPORT_SYMBOL(bad_dma_address);
|
EXPORT_SYMBOL(bad_dma_address);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user