- Add a terminating zero end-element to the array describing AMD CPUs affected

by erratum 1386 so that the matching loop actually terminates instead of
   going off into the weeds
 
 - Update the boot protocol documentation to mention the fact that the
   preferred address to load the kernel to is considered in the relocatable
   kernel case too
 
 - Flush the memory buffer containing the microcode patch after applying
   microcode on AMD Zen1 and Zen2, to avoid unnecessary slowdowns
 
 - Make sure the PPIN CPU feature flag is cleared on all CPUs if PPIN has been
   disabled
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmdMPvcACgkQEsHwGGHe
 VUo2xw//QvwzIfWU/l+UnZppbpRL5gvLy41EgNOwhMBVDd81Fdx87KImg7luDDvM
 FHsydVpSmqS6gMX0n6JQfr7IMz4HLWHff/yJjq2Pgb5BS7HBk8RyQ8YPCaBbXP33
 NsV2fSL2INgLL6z6iefrnStQouIP2iRp+bN1kXSRe0Yhs+RBj6DyKsD6BdN/x342
 AFkP65rY/1+7jLIftI2YulKEB5RmlbNqa9Nzbq1kOfO6I0TPUZmK5XI1xcRKHiwK
 yFaMKufZq94rULhNsbjwPhNqK5LG34AeQ2xpaiujA1uHdQssChmAnGuJzrK2s3T0
 YUo7WzI5LBsRDw0UGtfKjvl6JMFhDvhiSY9f8stS8B8GIiIeErkwKxkzVqAR5rQM
 JbukE0Di/JABXk5sMzwyamFCJ3TgbuSWivK5ujxsiDTU6d/X89f1CRECU02lZT6u
 fc7GIjZ09voep/YruknmyZbha/hh0EofN3GbIkwBsKX6dsypSKAuSkSBysZfRYXE
 z1hZuVyBGQJj0OQMtbIaGGmKJWPcQq18xiKKo1XYIDOL+Ag0RQ0ZrVYA7Wt96MB7
 ImoeduD1ssvU00IJ9QMx/EPdmrZHxzX3C1XGEm1DyW4fTYc8TPJnowxjXuB9Hir6
 IhCARvXni5/8vAeNOb8xx+izr64jRCy3w2rCcjjebqFW/oEvPrQ=
 =RC2M
 -----END PGP SIGNATURE-----

Merge tag 'x86_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Add a terminating zero end-element to the array describing AMD CPUs
   affected by erratum 1386 so that the matching loop actually
   terminates instead of going off into the weeds

 - Update the boot protocol documentation to mention the fact that the
   preferred address to load the kernel to is considered in the
   relocatable kernel case too

 - Flush the memory buffer containing the microcode patch after applying
   microcode on AMD Zen1 and Zen2, to avoid unnecessary slowdowns

 - Make sure the PPIN CPU feature flag is cleared on all CPUs if PPIN
   has been disabled

* tag 'x86_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/CPU/AMD: Terminate the erratum_1386_microcode array
  x86/Documentation: Update algo in init_size description of boot protocol
  x86/microcode/AMD: Flush patch buffer mapping after application
  x86/mm: Carve out INVLPG inline asm for use by others
  x86/cpu: Fix PPIN initialization
This commit is contained in:
Linus Torvalds 2024-12-01 12:35:37 -08:00
commit 58ac609b99
6 changed files with 41 additions and 11 deletions

View File

@ -896,10 +896,19 @@ Offset/size: 0x260/4
The kernel runtime start address is determined by the following algorithm::
if (relocatable_kernel)
runtime_start = align_up(load_address, kernel_alignment)
else
runtime_start = pref_address
if (relocatable_kernel) {
if (load_address < pref_address)
load_address = pref_address;
runtime_start = align_up(load_address, kernel_alignment);
} else {
runtime_start = pref_address;
}
Hence the necessary memory window location and size can be estimated by
a boot loader as::
memory_window_start = runtime_start;
memory_window_size = init_size;
============ ===============
Field name: handover_offset

View File

@ -34,4 +34,8 @@ static inline void __tlb_remove_table(void *table)
free_page_and_swap_cache(table);
}
static inline void invlpg(unsigned long addr)
{
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
#endif /* _ASM_X86_TLB_H */

View File

@ -798,6 +798,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static const struct x86_cpu_desc erratum_1386_microcode[] = {
AMD_CPU_DESC(0x17, 0x1, 0x2, 0x0800126e),
AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052),
{},
};
static void fix_erratum_1386(struct cpuinfo_x86 *c)

View File

@ -169,7 +169,7 @@ static void ppin_init(struct cpuinfo_x86 *c)
}
clear_ppin:
clear_cpu_cap(c, info->feature);
setup_clear_cpu_cap(info->feature);
}
static void default_init(struct cpuinfo_x86 *c)

View File

@ -34,6 +34,7 @@
#include <asm/setup.h>
#include <asm/cpu.h>
#include <asm/msr.h>
#include <asm/tlb.h>
#include "internal.h"
@ -483,11 +484,25 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
}
}
static int __apply_microcode_amd(struct microcode_amd *mc)
static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
{
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
u32 rev, dummy;
native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
if (x86_family(bsp_cpuid_1_eax) == 0x17) {
unsigned long p_addr_end = p_addr + psize - 1;
invlpg(p_addr);
/*
* Flush next page too if patch image is crossing a page
* boundary.
*/
if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
invlpg(p_addr_end);
}
/* verify patch application was successful */
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
@ -529,7 +544,7 @@ static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
if (old_rev > mc->hdr.patch_id)
return ret;
return !__apply_microcode_amd(mc);
return !__apply_microcode_amd(mc, desc.psize);
}
static bool get_builtin_microcode(struct cpio_data *cp)
@ -745,7 +760,7 @@ void reload_ucode_amd(unsigned int cpu)
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev < mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc))
if (!__apply_microcode_amd(mc, p->size))
pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
}
}
@ -798,7 +813,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
goto out;
}
if (__apply_microcode_amd(mc_amd)) {
if (__apply_microcode_amd(mc_amd, p->size)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
return UCODE_ERROR;

View File

@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
#include <asm/tlb.h>
#include "mm_internal.h"
@ -1140,7 +1141,7 @@ STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
bool cpu_pcide;
/* Flush 'addr' from the kernel PCID: */
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
invlpg(addr);
/* If PTI is off there is no user PCID and nothing to flush. */
if (!static_cpu_has(X86_FEATURE_PTI))