mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-19 14:56:21 +00:00
Merge branch 'topic/ppc-kvm' into next
Merge our KVM topic branch.
This commit is contained in:
commit
9a04b0febb
@ -1014,6 +1014,18 @@ static inline void kvmppc_fix_ee_before_entry(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void kvmppc_fix_ee_after_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Only need to enable IRQs by hard enabling them after this */
|
||||
local_paca->irq_happened = PACA_IRQ_HARD_DIS;
|
||||
irq_soft_mask_set(IRQS_ALL_DISABLED);
|
||||
#endif
|
||||
|
||||
trace_hardirqs_off();
|
||||
}
|
||||
|
||||
|
||||
static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
|
||||
{
|
||||
ulong ea;
|
||||
|
@ -1202,7 +1202,7 @@ static int resize_hpt_allocate(struct kvm_resize_hpt *resize)
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n",
|
||||
resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n", __func__,
|
||||
resize->hpt.virt);
|
||||
|
||||
return 0;
|
||||
@ -1443,7 +1443,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
|
||||
*/
|
||||
mutex_unlock(&kvm->arch.mmu_setup_lock);
|
||||
|
||||
resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
|
||||
resize_hpt_debug(resize, "%s(): order = %d\n", __func__,
|
||||
resize->order);
|
||||
|
||||
err = resize_hpt_allocate(resize);
|
||||
@ -1887,8 +1887,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
|
||||
ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
|
||||
tmp);
|
||||
if (ret != H_SUCCESS) {
|
||||
pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
|
||||
"r=%lx\n", ret, i, v, r);
|
||||
pr_err("%s ret %ld i=%ld v=%lx r=%lx\n", __func__, ret, i, v, r);
|
||||
goto out;
|
||||
}
|
||||
if (!mmu_ready && is_vrma_hpte(v)) {
|
||||
|
@ -294,14 +294,14 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
struct kvmppc_spapr_tce_table *stt = NULL;
|
||||
struct kvmppc_spapr_tce_table *siter;
|
||||
struct mm_struct *mm = kvm->mm;
|
||||
unsigned long npages, size = args->size;
|
||||
unsigned long npages;
|
||||
int ret;
|
||||
|
||||
if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
|
||||
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
|
||||
return -EINVAL;
|
||||
|
||||
npages = kvmppc_tce_pages(size);
|
||||
npages = kvmppc_tce_pages(args->size);
|
||||
ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -314,7 +314,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
stt->liobn = args->liobn;
|
||||
stt->page_shift = args->page_shift;
|
||||
stt->offset = args->offset;
|
||||
stt->size = size;
|
||||
stt->size = args->size;
|
||||
stt->kvm = kvm;
|
||||
mutex_init(&stt->alloc_lock);
|
||||
INIT_LIST_HEAD_RCU(&stt->iommu_tables);
|
||||
|
@ -1190,8 +1190,7 @@ int kvmppc_uvmem_init(void)
|
||||
|
||||
pfn_first = res->start >> PAGE_SHIFT;
|
||||
pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
|
||||
kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
|
||||
sizeof(unsigned long), GFP_KERNEL);
|
||||
kvmppc_uvmem_bitmap = bitmap_zalloc(pfn_last - pfn_first, GFP_KERNEL);
|
||||
if (!kvmppc_uvmem_bitmap) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unmap;
|
||||
@ -1215,5 +1214,5 @@ void kvmppc_uvmem_free(void)
|
||||
memunmap_pages(&kvmppc_uvmem_pgmap);
|
||||
release_mem_region(kvmppc_uvmem_pgmap.range.start,
|
||||
range_len(&kvmppc_uvmem_pgmap.range));
|
||||
kfree(kvmppc_uvmem_bitmap);
|
||||
bitmap_free(kvmppc_uvmem_bitmap);
|
||||
}
|
||||
|
@ -539,7 +539,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
||||
if (irq == XICS_IPI || irq == 0) {
|
||||
/*
|
||||
* This barrier orders the setting of xc->cppr vs.
|
||||
* subsquent test of xc->mfrr done inside
|
||||
* subsequent test of xc->mfrr done inside
|
||||
* scan_interrupts and push_pending_to_hw
|
||||
*/
|
||||
smp_mb();
|
||||
@ -563,7 +563,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
||||
/*
|
||||
* This barrier orders both setting of in_eoi above vs,
|
||||
* subsequent test of guest_priority, and the setting
|
||||
* of xc->cppr vs. subsquent test of xc->mfrr done inside
|
||||
* of xc->cppr vs. subsequent test of xc->mfrr done inside
|
||||
* scan_interrupts and push_pending_to_hw
|
||||
*/
|
||||
smp_mb();
|
||||
@ -2390,7 +2390,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
|
||||
/*
|
||||
* Now, we select a target if we have one. If we don't we
|
||||
* leave the interrupt untargetted. It means that an interrupt
|
||||
* can become "untargetted" accross migration if it was masked
|
||||
* can become "untargetted" across migration if it was masked
|
||||
* by set_xive() but there is little we can do about it.
|
||||
*/
|
||||
|
||||
|
@ -1015,6 +1015,9 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
|
||||
u32 last_inst = KVM_INST_FETCH_FAILED;
|
||||
enum emulation_result emulated = EMULATE_DONE;
|
||||
|
||||
/* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */
|
||||
kvmppc_fix_ee_after_exit();
|
||||
|
||||
/* update before a new last_exit_type is rewritten */
|
||||
kvmppc_update_timing_stats(vcpu);
|
||||
|
||||
|
@ -424,15 +424,6 @@ _GLOBAL(kvmppc_resume_host)
|
||||
mtspr SPRN_EPCR, r3
|
||||
isync
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/*
|
||||
* We enter with interrupts disabled in hardware, but
|
||||
* we need to call RECONCILE_IRQ_STATE to ensure
|
||||
* that the software state is kept in sync.
|
||||
*/
|
||||
RECONCILE_IRQ_STATE(r3,r5)
|
||||
#endif
|
||||
|
||||
/* Switch to kernel stack and jump to handler. */
|
||||
mr r3, r4
|
||||
mr r5, r14 /* intno */
|
||||
|
Loading…
x
Reference in New Issue
Block a user