mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 10:43:43 +00:00
LoongArch fixes for v6.8-rc2
-----BEGIN PGP SIGNATURE----- iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmW0uEAWHGNoZW5odWFj YWlAa2VybmVsLm9yZwAKCRAChivD8uImeqGUEACf2JP1cPmaWfZZpQtpRwD2umdm Tk1esuMqfvS5RqUqZUKMMc/fHe2JxgZ6J7NZFoAzrd3lZTOxfEjv0hZGEui6Sb8a mnOszrPX3fbP45ViIu7HqUOnkluEofaeTZmAATuDlHroXvvpXV8uGFDyujH085iG ZhpKurE5aT3yxGphHguFLBH14ZXIAHAZHR0NUFs54shAcGV5n2HZipbN7S081iwv RC+ah61Ls93grebC1PxvtvbTPrvEUJo00eqHErWn6u72Ek7bbYpoWvcmxyXXHWAH ETBf9MmMEQccTRCD81wpzWdf1/TdZ2tsrifh5efCHCxhu5flu3RxFygRBR5lNMtS +IdiJnHjZ0xF+tXDGBNiFQ8+b+BvSQ58haj30ob6dFs16e4WMbP6lrACjZA6rBEl Ks5qDwFoeGKxDyJmBflsXK5CUgTFHFD3STPHSZ6o4ChSoZRaiC6W7QxdwHLrcacc 51ThKKUQsaWiR8sa9ag7svCJYcaXzurQtXPsvi6L5VfoT5Rk0HzxbczVoi08M/+z t4V03sEIYtcbRCHblKupz20A9kRXCh7dq/ShrGqZ1A0T2K3n7R+3ol9a5VjKdaJ3 y9lQDYoEpaqFM/G0zcaUpw5ueGoynO2g0Pq6b+U/wWvwWFnAQQAHgtQpaZvscBsw D9ExPRWgsDtXQWzRfg== =3XNw -----END PGP SIGNATURE----- Merge tag 'loongarch-fixes-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson Pull LoongArch fixes from Huacai Chen: "Fix boot failure on machines with more than 8 nodes, and fix two build errors about KVM" * tag 'loongarch-fixes-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: KVM: Add returns to SIMD stubs LoongArch: KVM: Fix build due to API changes LoongArch/smp: Call rcutree_report_cpu_starting() at tlb_init()
This commit is contained in:
commit
955340433a
@ -60,7 +60,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu);
|
||||
void kvm_save_lsx(struct loongarch_fpu *fpu);
|
||||
void kvm_restore_lsx(struct loongarch_fpu *fpu);
|
||||
#else
|
||||
static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { }
|
||||
static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; }
|
||||
static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
|
||||
static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
|
||||
#endif
|
||||
@ -70,7 +70,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu);
|
||||
void kvm_save_lasx(struct loongarch_fpu *fpu);
|
||||
void kvm_restore_lasx(struct loongarch_fpu *fpu);
|
||||
#else
|
||||
static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { }
|
||||
static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; }
|
||||
static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
|
||||
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
|
||||
#endif
|
||||
|
@ -509,7 +509,6 @@ asmlinkage void start_secondary(void)
|
||||
sync_counter();
|
||||
cpu = raw_smp_processor_id();
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
rcutree_report_cpu_starting(cpu);
|
||||
|
||||
cpu_probe();
|
||||
constant_clockevent_init();
|
||||
|
@ -675,7 +675,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
|
||||
*
|
||||
* There are several ways to safely use this helper:
|
||||
*
|
||||
* - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
|
||||
* - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
|
||||
* consuming it. In this case, mmu_lock doesn't need to be held during the
|
||||
* lookup, but it does need to be held while checking the MMU notifier.
|
||||
*
|
||||
@ -855,7 +855,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
|
||||
|
||||
/* Check if an invalidation has taken place since we got pfn */
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) {
|
||||
if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
|
||||
/*
|
||||
* This can happen when mappings are changed asynchronously, but
|
||||
* also synchronously if a COW is triggered by
|
||||
|
@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu)
|
||||
set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
|
||||
set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
|
||||
set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
|
||||
}
|
||||
} else {
|
||||
int vec_sz __maybe_unused;
|
||||
void *addr __maybe_unused;
|
||||
struct page *page __maybe_unused;
|
||||
|
||||
/* Avoid lockdep warning */
|
||||
rcutree_report_cpu_starting(cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
else {
|
||||
void *addr;
|
||||
struct page *page;
|
||||
const int vec_sz = sizeof(exception_handlers);
|
||||
vec_sz = sizeof(exception_handlers);
|
||||
|
||||
if (pcpu_handlers[cpu])
|
||||
return;
|
||||
@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu)
|
||||
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
|
||||
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
|
||||
csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void tlb_init(int cpu)
|
||||
|
Loading…
Reference in New Issue
Block a user