Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

Conflicts:

drivers/net/ethernet/ti/icssg/icssg_prueth.c

net/mac80211/chan.c
  89884459a0 ("wifi: mac80211: fix idle calculation with multi-link")
  87f5500285 ("wifi: mac80211: simplify ieee80211_assign_link_chanctx()")
https://lore.kernel.org/all/20240422105623.7b1fbda2@canb.auug.org.au/

net/unix/garbage.c
  1971d13ffa ("af_unix: Suppress false-positive lockdep splat for spin_lock() in __unix_gc().")
  4090fa373f ("af_unix: Replace garbage collection algorithm.")

drivers/net/ethernet/ti/icssg/icssg_prueth.c
drivers/net/ethernet/ti/icssg/icssg_common.c
  4dcd0e83ea ("net: ti: icssg-prueth: Fix signedness bug in prueth_init_rx_chns()")
  e2dc7bfd67 ("net: ti: icssg-prueth: Move common functions into a separate file")

No adjacent changes.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-04-25 12:40:48 -07:00
commit 2bd87951de
321 changed files with 4235 additions and 2482 deletions

View File

@ -38,6 +38,16 @@ Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Alexei Starovoitov <ast@kernel.org> <ast@fb.com> Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com> Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
Alexey Makhalov <alexey.amakhalov@broadcom.com> <amakhalov@vmware.com> Alexey Makhalov <alexey.amakhalov@broadcom.com> <amakhalov@vmware.com>
Alex Elder <elder@kernel.org>
Alex Elder <elder@kernel.org> <aelder@sgi.com>
Alex Elder <elder@kernel.org> <alex.elder@linaro.org>
Alex Elder <elder@kernel.org> <alex.elder@linary.org>
Alex Elder <elder@kernel.org> <elder@dreamhost.com>
Alex Elder <elder@kernel.org> <elder@dreawmhost.com>
Alex Elder <elder@kernel.org> <elder@ieee.org>
Alex Elder <elder@kernel.org> <elder@inktank.com>
Alex Elder <elder@kernel.org> <elder@linaro.org>
Alex Elder <elder@kernel.org> <elder@newdream.net>
Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com> Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com>
Alex Shi <alexs@kernel.org> <alex.shi@intel.com> Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
Alex Shi <alexs@kernel.org> <alex.shi@linaro.org> Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
@ -98,6 +108,8 @@ Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com> Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com> Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de> Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se> Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org> Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org>
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com> Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com>
@ -446,7 +458,8 @@ Mythri P K <mythripk@ti.com>
Nadav Amit <nadav.amit@gmail.com> <namit@vmware.com> Nadav Amit <nadav.amit@gmail.com> <namit@vmware.com>
Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il> Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com> Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
Naoya Horiguchi <naoya.horiguchi@nec.com> <n-horiguchi@ah.jp.nec.com> Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com> Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Neeraj Upadhyay <quic_neeraju@quicinc.com> <neeraju@codeaurora.org> Neeraj Upadhyay <quic_neeraju@quicinc.com> <neeraju@codeaurora.org>
Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com> Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
@ -524,6 +537,7 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com> Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org> Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com> Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
Richard Genoud <richard.genoud@bootlin.com> <richard.genoud@gmail.com>
Richard Leitner <richard.leitner@linux.dev> <dev@g0hl1n.net> Richard Leitner <richard.leitner@linux.dev> <dev@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net> Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com> Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>

View File

@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART) title: Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART)
maintainers: maintainers:
- Richard Genoud <richard.genoud@gmail.com> - Richard Genoud <richard.genoud@bootlin.com>
properties: properties:
compatible: compatible:

View File

@ -24,10 +24,10 @@ fragmentation statistics can be obtained through gfp flag information of
each page. It is already implemented and activated if page owner is each page. It is already implemented and activated if page owner is
enabled. Other usages are more than welcome. enabled. Other usages are more than welcome.
It can also be used to show all the stacks and their outstanding It can also be used to show all the stacks and their current number of
allocations, which gives us a quick overview of where the memory is going allocated base pages, which gives us a quick overview of where the memory
without the need to screen through all the pages and match the allocation is going without the need to screen through all the pages and match the
and free operation. allocation and free operation.
page owner is disabled by default. So, if you'd like to use it, you need page owner is disabled by default. So, if you'd like to use it, you need
to add "page_owner=on" to your boot cmdline. If the kernel is built to add "page_owner=on" to your boot cmdline. If the kernel is built
@ -75,42 +75,45 @@ Usage
cat /sys/kernel/debug/page_owner_stacks/show_stacks > stacks.txt cat /sys/kernel/debug/page_owner_stacks/show_stacks > stacks.txt
cat stacks.txt cat stacks.txt
prep_new_page+0xa9/0x120 post_alloc_hook+0x177/0x1a0
get_page_from_freelist+0x7e6/0x2140 get_page_from_freelist+0xd01/0xd80
__alloc_pages+0x18a/0x370 __alloc_pages+0x39e/0x7e0
new_slab+0xc8/0x580 allocate_slab+0xbc/0x3f0
___slab_alloc+0x1f2/0xaf0 ___slab_alloc+0x528/0x8a0
__slab_alloc.isra.86+0x22/0x40 kmem_cache_alloc+0x224/0x3b0
kmem_cache_alloc+0x31b/0x350 sk_prot_alloc+0x58/0x1a0
__khugepaged_enter+0x39/0x100 sk_alloc+0x32/0x4f0
dup_mmap+0x1c7/0x5ce inet_create+0x427/0xb50
copy_process+0x1afe/0x1c90 __sock_create+0x2e4/0x650
kernel_clone+0x9a/0x3c0 inet_ctl_sock_create+0x30/0x180
__do_sys_clone+0x66/0x90 igmp_net_init+0xc1/0x130
do_syscall_64+0x7f/0x160 ops_init+0x167/0x410
entry_SYSCALL_64_after_hwframe+0x6c/0x74 setup_net+0x304/0xa60
stack_count: 234 copy_net_ns+0x29b/0x4a0
create_new_namespaces+0x4a1/0x820
nr_base_pages: 16
... ...
... ...
echo 7000 > /sys/kernel/debug/page_owner_stacks/count_threshold echo 7000 > /sys/kernel/debug/page_owner_stacks/count_threshold
cat /sys/kernel/debug/page_owner_stacks/show_stacks> stacks_7000.txt cat /sys/kernel/debug/page_owner_stacks/show_stacks> stacks_7000.txt
cat stacks_7000.txt cat stacks_7000.txt
prep_new_page+0xa9/0x120 post_alloc_hook+0x177/0x1a0
get_page_from_freelist+0x7e6/0x2140 get_page_from_freelist+0xd01/0xd80
__alloc_pages+0x18a/0x370 __alloc_pages+0x39e/0x7e0
alloc_pages_mpol+0xdf/0x1e0 alloc_pages_mpol+0x22e/0x490
folio_alloc+0x14/0x50 folio_alloc+0xd5/0x110
filemap_alloc_folio+0xb0/0x100 filemap_alloc_folio+0x78/0x230
page_cache_ra_unbounded+0x97/0x180 page_cache_ra_order+0x287/0x6f0
filemap_fault+0x4b4/0x1200 filemap_get_pages+0x517/0x1160
__do_fault+0x2d/0x110 filemap_read+0x304/0x9f0
do_pte_missing+0x4b0/0xa30 xfs_file_buffered_read+0xe6/0x1d0 [xfs]
__handle_mm_fault+0x7fa/0xb70 xfs_file_read_iter+0x1f0/0x380 [xfs]
handle_mm_fault+0x125/0x300 __kernel_read+0x3b9/0x730
do_user_addr_fault+0x3c9/0x840 kernel_read_file+0x309/0x4d0
exc_page_fault+0x68/0x150 __do_sys_finit_module+0x381/0x730
asm_exc_page_fault+0x22/0x30 do_syscall_64+0x8d/0x150
stack_count: 8248 entry_SYSCALL_64_after_hwframe+0x62/0x6a
nr_base_pages: 20824
... ...
cat /sys/kernel/debug/page_owner > page_owner_full.txt cat /sys/kernel/debug/page_owner > page_owner_full.txt

View File

@ -252,7 +252,7 @@ an involved disclosed party. The current ambassadors list:
AMD Tom Lendacky <thomas.lendacky@amd.com> AMD Tom Lendacky <thomas.lendacky@amd.com>
Ampere Darren Hart <darren@os.amperecomputing.com> Ampere Darren Hart <darren@os.amperecomputing.com>
ARM Catalin Marinas <catalin.marinas@arm.com> ARM Catalin Marinas <catalin.marinas@arm.com>
IBM Power Anton Blanchard <anton@linux.ibm.com> IBM Power Michael Ellerman <ellerman@au.ibm.com>
IBM Z Christian Borntraeger <borntraeger@de.ibm.com> IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com> Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <quic_tsoni@quicinc.com> Qualcomm Trilok Soni <quic_tsoni@quicinc.com>

View File

@ -7829,9 +7829,8 @@ W: http://aeschi.ch.eu.org/efs/
F: fs/efs/ F: fs/efs/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
M: Douglas Miller <dougmill@linux.ibm.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Orphan
F: drivers/net/ethernet/ibm/ehea/ F: drivers/net/ethernet/ibm/ehea/
ELM327 CAN NETWORK DRIVER ELM327 CAN NETWORK DRIVER
@ -8748,10 +8747,9 @@ S: Orphan
F: drivers/usb/gadget/udc/fsl* F: drivers/usb/gadget/udc/fsl*
FREESCALE USB PHY DRIVER FREESCALE USB PHY DRIVER
M: Ran Wang <ran.wang_1@nxp.com>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
S: Maintained S: Orphan
F: drivers/usb/phy/phy-fsl-usb* F: drivers/usb/phy/phy-fsl-usb*
FREEVXFS FILESYSTEM FREEVXFS FILESYSTEM
@ -9579,7 +9577,7 @@ F: kernel/power/
HID CORE LAYER HID CORE LAYER
M: Jiri Kosina <jikos@kernel.org> M: Jiri Kosina <jikos@kernel.org>
M: Benjamin Tissoires <benjamin.tissoires@redhat.com> M: Benjamin Tissoires <bentiss@kernel.org>
L: linux-input@vger.kernel.org L: linux-input@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
@ -10026,7 +10024,7 @@ F: drivers/media/platform/st/sti/hva
HWPOISON MEMORY FAILURE HANDLING HWPOISON MEMORY FAILURE HANDLING
M: Miaohe Lin <linmiaohe@huawei.com> M: Miaohe Lin <linmiaohe@huawei.com>
R: Naoya Horiguchi <naoya.horiguchi@nec.com> R: Naoya Horiguchi <nao.horiguchi@gmail.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
F: mm/hwpoison-inject.c F: mm/hwpoison-inject.c
@ -11997,7 +11995,7 @@ F: include/keys/encrypted-type.h
F: security/keys/encrypted-keys/ F: security/keys/encrypted-keys/
KEYS-TRUSTED KEYS-TRUSTED
M: James Bottomley <jejb@linux.ibm.com> M: James Bottomley <James.Bottomley@HansenPartnership.com>
M: Jarkko Sakkinen <jarkko@kernel.org> M: Jarkko Sakkinen <jarkko@kernel.org>
M: Mimi Zohar <zohar@linux.ibm.com> M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org L: linux-integrity@vger.kernel.org
@ -14359,7 +14357,7 @@ F: drivers/dma/at_xdmac.c
F: include/dt-bindings/dma/at91.h F: include/dt-bindings/dma/at91.h
MICROCHIP AT91 SERIAL DRIVER MICROCHIP AT91 SERIAL DRIVER
M: Richard Genoud <richard.genoud@gmail.com> M: Richard Genoud <richard.genoud@bootlin.com>
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
F: drivers/tty/serial/atmel_serial.c F: drivers/tty/serial/atmel_serial.c
@ -19680,7 +19678,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h F: include/scsi/sg.h
SCSI SUBSYSTEM SCSI SUBSYSTEM
M: "James E.J. Bottomley" <jejb@linux.ibm.com> M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
M: "Martin K. Petersen" <martin.petersen@oracle.com> M: "Martin K. Petersen" <martin.petersen@oracle.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
S: Maintained S: Maintained
@ -22852,7 +22850,7 @@ F: drivers/usb/host/ehci*
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
M: Jiri Kosina <jikos@kernel.org> M: Jiri Kosina <jikos@kernel.org>
M: Benjamin Tissoires <benjamin.tissoires@redhat.com> M: Benjamin Tissoires <bentiss@kernel.org>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git

View File

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 9 PATCHLEVEL = 9
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc5
NAME = Hurr durr I'ma ninja sloth NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -289,6 +289,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
adr_l x1, __hyp_text_end adr_l x1, __hyp_text_end
adr_l x2, dcache_clean_poc adr_l x2, dcache_clean_poc
blr x2 blr x2
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
pre_disable_mmu_workaround
msr sctlr_el2, x0
isb
0: 0:
mov_q x0, HCR_HOST_NVHE_FLAGS mov_q x0, HCR_HOST_NVHE_FLAGS
@ -323,13 +328,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
cbz x0, 2f cbz x0, 2f
/* Set a sane SCTLR_EL1, the VHE way */ /* Set a sane SCTLR_EL1, the VHE way */
pre_disable_mmu_workaround
msr_s SYS_SCTLR_EL12, x1 msr_s SYS_SCTLR_EL12, x1
mov x2, #BOOT_CPU_FLAG_E2H mov x2, #BOOT_CPU_FLAG_E2H
b 3f b 3f
2: 2:
pre_disable_mmu_workaround
msr sctlr_el1, x1 msr sctlr_el1, x1
mov x2, xzr mov x2, xzr
3: 3:

View File

@ -276,7 +276,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *ptep = NULL; pte_t *ptep = NULL;
pgdp = pgd_offset(mm, addr); pgdp = pgd_offset(mm, addr);
p4dp = p4d_offset(pgdp, addr); p4dp = p4d_alloc(mm, pgdp, addr);
if (!p4dp)
return NULL;
pudp = pud_alloc(mm, p4dp, addr); pudp = pud_alloc(mm, p4dp, addr);
if (!pudp) if (!pudp)
return NULL; return NULL;

View File

@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
pte_t *ptep; pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
if (!can_set_direct_map())
return true;
pgdp = pgd_offset_k(addr); pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp))) if (pgd_none(READ_ONCE(*pgdp)))
return false; return false;

View File

@ -197,6 +197,9 @@ static struct skcipher_alg algs[] = {
static int __init chacha_p10_init(void) static int __init chacha_p10_init(void)
{ {
if (!cpu_has_feature(CPU_FTR_ARCH_31))
return 0;
static_branch_enable(&have_p10); static_branch_enable(&have_p10);
return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
@ -204,10 +207,13 @@ static int __init chacha_p10_init(void)
static void __exit chacha_p10_exit(void) static void __exit chacha_p10_exit(void)
{ {
if (!static_branch_likely(&have_p10))
return;
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
} }
module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init); module_init(chacha_p10_init);
module_exit(chacha_p10_exit); module_exit(chacha_p10_exit);
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)"); MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");

View File

@ -1285,15 +1285,14 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct device *dev) struct device *dev)
{ {
struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group; struct iommu_table_group *table_group;
struct iommu_group *grp;
/* At first attach the ownership is already set */ /* At first attach the ownership is already set */
if (!domain) { if (!domain)
iommu_group_put(grp);
return 0; return 0;
}
grp = iommu_group_get(dev);
table_group = iommu_group_get_iommudata(grp); table_group = iommu_group_get_iommudata(grp);
/* /*
* The domain being set to PLATFORM from earlier * The domain being set to PLATFORM from earlier

View File

@ -340,7 +340,8 @@ SYM_CODE_START(pgm_check_handler)
mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
stctg %c1,%c1,__PT_CR1(%r11) stctg %c1,%c1,__PT_CR1(%r11)
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
lg %r12,__LC_GMAP ltg %r12,__LC_GMAP
jz 5f
clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11) clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11)
jne 5f jne 5f
BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST

View File

@ -255,6 +255,71 @@ __visible noinstr void do_int80_emulation(struct pt_regs *regs)
instrumentation_end(); instrumentation_end();
syscall_exit_to_user_mode(regs); syscall_exit_to_user_mode(regs);
} }
#ifdef CONFIG_X86_FRED
/*
* A FRED-specific INT80 handler is warranted for the follwing reasons:
*
* 1) As INT instructions and hardware interrupts are separate event
* types, FRED does not preclude the use of vector 0x80 for external
* interrupts. As a result, the FRED setup code does not reserve
* vector 0x80 and calling int80_is_external() is not merely
* suboptimal but actively incorrect: it could cause a system call
* to be incorrectly ignored.
*
* 2) It is called only for handling vector 0x80 of event type
* EVENT_TYPE_SWINT and will never be called to handle any external
* interrupt (event type EVENT_TYPE_EXTINT).
*
* 3) FRED has separate entry flows depending on if the event came from
* user space or kernel space, and because the kernel does not use
* INT insns, the FRED kernel entry handler fred_entry_from_kernel()
* falls through to fred_bad_type() if the event type is
* EVENT_TYPE_SWINT, i.e., INT insns. So if the kernel is handling
* an INT insn, it can only be from a user level.
*
* 4) int80_emulation() does a CLEAR_BRANCH_HISTORY. While FRED will
* likely take a different approach if it is ever needed: it
* probably belongs in either fred_intx()/ fred_other() or
* asm_fred_entrypoint_user(), depending on if this ought to be done
* for all entries from userspace or only system
* calls.
*
* 5) INT $0x80 is the fast path for 32-bit system calls under FRED.
*/
DEFINE_FREDENTRY_RAW(int80_emulation)
{
int nr;
enter_from_user_mode(regs);
instrumentation_begin();
add_random_kstack_offset();
/*
* FRED pushed 0 into regs::orig_ax and regs::ax contains the
* syscall number.
*
* User tracing code (ptrace or signal handlers) might assume
* that the regs::orig_ax contains a 32-bit number on invoking
* a 32-bit syscall.
*
* Establish the syscall convention by saving the 32bit truncated
* syscall number in regs::orig_ax and by invalidating regs::ax.
*/
regs->orig_ax = regs->ax & GENMASK(31, 0);
regs->ax = -ENOSYS;
nr = syscall_32_enter(regs);
local_irq_enable();
nr = syscall_enter_from_user_mode_work(regs, nr);
do_syscall_32_irqs_on(regs, nr);
instrumentation_end();
syscall_exit_to_user_mode(regs);
}
#endif
#else /* CONFIG_IA32_EMULATION */ #else /* CONFIG_IA32_EMULATION */
/* Handles int $0x80 on a 32bit kernel */ /* Handles int $0x80 on a 32bit kernel */

View File

@ -28,9 +28,9 @@ static noinstr void fred_bad_type(struct pt_regs *regs, unsigned long error_code
if (regs->fred_cs.sl > 0) { if (regs->fred_cs.sl > 0) {
pr_emerg("PANIC: invalid or fatal FRED event; event type %u " pr_emerg("PANIC: invalid or fatal FRED event; event type %u "
"vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n", "vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n",
regs->fred_ss.type, regs->fred_ss.vector, regs->orig_ax, regs->fred_ss.type, regs->fred_ss.vector, error_code,
fred_event_data(regs), regs->cs, regs->ip); fred_event_data(regs), regs->cs, regs->ip);
die("invalid or fatal FRED event", regs, regs->orig_ax); die("invalid or fatal FRED event", regs, error_code);
panic("invalid or fatal FRED event"); panic("invalid or fatal FRED event");
} else { } else {
unsigned long flags = oops_begin(); unsigned long flags = oops_begin();
@ -38,10 +38,10 @@ static noinstr void fred_bad_type(struct pt_regs *regs, unsigned long error_code
pr_alert("BUG: invalid or fatal FRED event; event type %u " pr_alert("BUG: invalid or fatal FRED event; event type %u "
"vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n", "vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n",
regs->fred_ss.type, regs->fred_ss.vector, regs->orig_ax, regs->fred_ss.type, regs->fred_ss.vector, error_code,
fred_event_data(regs), regs->cs, regs->ip); fred_event_data(regs), regs->cs, regs->ip);
if (__die("Invalid or fatal FRED event", regs, regs->orig_ax)) if (__die("Invalid or fatal FRED event", regs, error_code))
sig = 0; sig = 0;
oops_end(flags, regs, sig); oops_end(flags, regs, sig);
@ -66,7 +66,7 @@ static noinstr void fred_intx(struct pt_regs *regs)
/* INT80 */ /* INT80 */
case IA32_SYSCALL_VECTOR: case IA32_SYSCALL_VECTOR:
if (ia32_enabled()) if (ia32_enabled())
return int80_emulation(regs); return fred_int80_emulation(regs);
fallthrough; fallthrough;
#endif #endif

View File

@ -1693,6 +1693,7 @@ void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
lbr->from = x86_pmu.lbr_from; lbr->from = x86_pmu.lbr_from;
lbr->to = x86_pmu.lbr_to; lbr->to = x86_pmu.lbr_to;
lbr->info = x86_pmu.lbr_info; lbr->info = x86_pmu.lbr_info;
lbr->has_callstack = x86_pmu_has_lbr_callstack();
} }
EXPORT_SYMBOL_GPL(x86_perf_get_lbr); EXPORT_SYMBOL_GPL(x86_perf_get_lbr);

View File

@ -79,6 +79,9 @@ do { \
#define __smp_mb__before_atomic() do { } while (0) #define __smp_mb__before_atomic() do { } while (0)
#define __smp_mb__after_atomic() do { } while (0) #define __smp_mb__after_atomic() do { } while (0)
/* Writing to CR3 provides a full memory barrier in switch_mm(). */
#define smp_mb__after_switch_mm() do { } while (0)
#include <asm-generic/barrier.h> #include <asm-generic/barrier.h>
#endif /* _ASM_X86_BARRIER_H */ #endif /* _ASM_X86_BARRIER_H */

View File

@ -855,6 +855,7 @@ struct kvm_vcpu_arch {
int cpuid_nent; int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries; struct kvm_cpuid_entry2 *cpuid_entries;
struct kvm_hypervisor_cpuid kvm_cpuid; struct kvm_hypervisor_cpuid kvm_cpuid;
bool is_amd_compatible;
/* /*
* FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly

View File

@ -555,6 +555,7 @@ struct x86_pmu_lbr {
unsigned int from; unsigned int from;
unsigned int to; unsigned int to;
unsigned int info; unsigned int info;
bool has_callstack;
}; };
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);

View File

@ -1652,7 +1652,8 @@ static void __init bhi_select_mitigation(void)
return; return;
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
spec_ctrl_disable_kernel_rrsba(); spec_ctrl_disable_kernel_rrsba();
if (rrsba_disabled) if (rrsba_disabled)
return; return;
@ -2804,11 +2805,13 @@ static const char *spectre_bhi_state(void)
{ {
if (!boot_cpu_has_bug(X86_BUG_BHI)) if (!boot_cpu_has_bug(X86_BUG_BHI))
return "; BHI: Not affected"; return "; BHI: Not affected";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
return "; BHI: BHI_DIS_S"; return "; BHI: BHI_DIS_S";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
return "; BHI: SW loop, KVM: SW loop"; return "; BHI: SW loop, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled) else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
rrsba_disabled)
return "; BHI: Retpoline"; return "; BHI: Retpoline";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
return "; BHI: Vulnerable, KVM: SW loop"; return "; BHI: Vulnerable, KVM: SW loop";

View File

@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, }, { X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 }, { X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX }, { X86_FEATURE_FMA, X86_FEATURE_AVX },
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, }, { X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, { X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
{ X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },

View File

@ -3,11 +3,6 @@
ccflags-y += -I $(srctree)/arch/x86/kvm ccflags-y += -I $(srctree)/arch/x86/kvm
ccflags-$(CONFIG_KVM_WERROR) += -Werror ccflags-$(CONFIG_KVM_WERROR) += -Werror
ifeq ($(CONFIG_FRAME_POINTER),y)
OBJECT_FILES_NON_STANDARD_vmx/vmenter.o := y
OBJECT_FILES_NON_STANDARD_svm/vmenter.o := y
endif
include $(srctree)/virt/kvm/Makefile.kvm include $(srctree)/virt/kvm/Makefile.kvm
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \

View File

@ -376,6 +376,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_update_pv_runtime(vcpu); kvm_update_pv_runtime(vcpu);
vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);

View File

@ -120,6 +120,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx); return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
} }
static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
{
return vcpu->arch.is_amd_compatible;
}
static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
{
return !guest_cpuid_is_amd_compatible(vcpu);
}
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;

View File

@ -2776,7 +2776,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL); r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
if (r && lvt_type == APIC_LVTPC) if (r && lvt_type == APIC_LVTPC &&
guest_cpuid_is_intel_compatible(apic->vcpu))
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED); kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
return r; return r;
} }

View File

@ -4935,7 +4935,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
context->cpu_role.base.level, is_efer_nx(context), context->cpu_role.base.level, is_efer_nx(context),
guest_can_use(vcpu, X86_FEATURE_GBPAGES), guest_can_use(vcpu, X86_FEATURE_GBPAGES),
is_cr4_pse(context), is_cr4_pse(context),
guest_cpuid_is_amd_or_hygon(vcpu)); guest_cpuid_is_amd_compatible(vcpu));
} }
static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
@ -5576,9 +5576,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
* that problem is swept under the rug; KVM's CPUID API is horrific and * that problem is swept under the rug; KVM's CPUID API is horrific and
* it's all but impossible to solve it without introducing a new API. * it's all but impossible to solve it without introducing a new API.
*/ */
vcpu->arch.root_mmu.root_role.word = 0; vcpu->arch.root_mmu.root_role.invalid = 1;
vcpu->arch.guest_mmu.root_role.word = 0; vcpu->arch.guest_mmu.root_role.invalid = 1;
vcpu->arch.nested_mmu.root_role.word = 0; vcpu->arch.nested_mmu.root_role.invalid = 1;
vcpu->arch.root_mmu.cpu_role.ext.valid = 0; vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
vcpu->arch.guest_mmu.cpu_role.ext.valid = 0; vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
vcpu->arch.nested_mmu.cpu_role.ext.valid = 0; vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
@ -7399,7 +7399,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* by the memslot, KVM can't use a hugepage due to the * by the memslot, KVM can't use a hugepage due to the
* misaligned address regardless of memory attributes. * misaligned address regardless of memory attributes.
*/ */
if (gfn >= slot->base_gfn) { if (gfn >= slot->base_gfn &&
gfn + nr_pages <= slot->base_gfn + slot->npages) {
if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
hugepage_clear_mixed(slot, gfn, level); hugepage_clear_mixed(slot, gfn, level);
else else

View File

@ -1548,17 +1548,21 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
} }
} }
/* static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If {
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE. /*
* If AD bits are not enabled, this will require clearing the writable bit on * All TDP MMU shadow pages share the same role as their root, aside
* each SPTE. Returns true if an SPTE has been changed and the TLBs need to * from level, so it is valid to key off any shadow page to determine if
* be flushed. * write protection is needed for an entire tree.
*/ */
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
}
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end) gfn_t start, gfn_t end)
{ {
u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK; const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter; struct tdp_iter iter;
bool spte_set = false; bool spte_set = false;
@ -1573,7 +1577,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue; continue;
KVM_MMU_WARN_ON(kvm_ad_enabled() && KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte)); spte_ad_need_write_protect(iter.old_spte));
if (!(iter.old_spte & dbit)) if (!(iter.old_spte & dbit))
@ -1590,11 +1594,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
} }
/* /*
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If * Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE. * memslot. Returns true if an SPTE has been changed and the TLBs need to be
* If AD bits are not enabled, this will require clearing the writable bit on * flushed.
* each SPTE. Returns true if an SPTE has been changed and the TLBs need to
* be flushed.
*/ */
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
const struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
@ -1610,18 +1612,11 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
return spte_set; return spte_set;
} }
/*
* Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
* set in mask, starting at gfn. The given memslot is expected to contain all
* the GFNs represented by set bits in the mask. If AD bits are enabled,
* clearing the dirty status will involve clearing the dirty bit on each SPTE
* or, if AD bits are not enabled, clearing the writable bit on each SPTE.
*/
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn, unsigned long mask, bool wrprot) gfn_t gfn, unsigned long mask, bool wrprot)
{ {
u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK : const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
shadow_dirty_mask; shadow_dirty_mask;
struct tdp_iter iter; struct tdp_iter iter;
lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_held_write(&kvm->mmu_lock);
@ -1633,7 +1628,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!mask) if (!mask)
break; break;
KVM_MMU_WARN_ON(kvm_ad_enabled() && KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte)); spte_ad_need_write_protect(iter.old_spte));
if (iter.level > PG_LEVEL_4K || if (iter.level > PG_LEVEL_4K ||
@ -1659,11 +1654,9 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
} }
/* /*
* Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is * Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for
* set in mask, starting at gfn. The given memslot is expected to contain all * which a bit is set in mask, starting at gfn. The given memslot is expected to
* the GFNs represented by set bits in the mask. If AD bits are enabled, * contain all the GFNs represented by set bits in the mask.
* clearing the dirty status will involve clearing the dirty bit on each SPTE
* or, if AD bits are not enabled, clearing the writable bit on each SPTE.
*/ */
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,

View File

@ -775,8 +775,20 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->pebs_data_cfg_mask = ~0ull; pmu->pebs_data_cfg_mask = ~0ull;
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX); bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
if (vcpu->kvm->arch.enable_pmu) if (!vcpu->kvm->arch.enable_pmu)
static_call(kvm_x86_pmu_refresh)(vcpu); return;
static_call(kvm_x86_pmu_refresh)(vcpu);
/*
* At RESET, both Intel and AMD CPUs set all enable bits for general
* purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
* was written for v1 PMUs don't unknowingly leave GP counters disabled
* in the global controls). Emulate that behavior when refreshing the
* PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
*/
if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
} }
void kvm_pmu_init(struct kvm_vcpu *vcpu) void kvm_pmu_init(struct kvm_vcpu *vcpu)

View File

@ -434,7 +434,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
/* Avoid using vmalloc for smaller buffers. */ /* Avoid using vmalloc for smaller buffers. */
size = npages * sizeof(struct page *); size = npages * sizeof(struct page *);
if (size > PAGE_SIZE) if (size > PAGE_SIZE)
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); pages = __vmalloc(size, GFP_KERNEL_ACCOUNT);
else else
pages = kmalloc(size, GFP_KERNEL_ACCOUNT); pages = kmalloc(size, GFP_KERNEL_ACCOUNT);

View File

@ -1503,6 +1503,11 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
} }
static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
{
return page_address(sd->save_area) + 0x400;
}
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
@ -1519,12 +1524,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* or subsequent vmload of host save area. * or subsequent vmload of host save area.
*/ */
vmsave(sd->save_area_pa); vmsave(sd->save_area_pa);
if (sev_es_guest(vcpu->kvm)) { if (sev_es_guest(vcpu->kvm))
struct sev_es_save_area *hostsa; sev_es_prepare_switch_to_guest(sev_es_host_save_area(sd));
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
sev_es_prepare_switch_to_guest(hostsa);
}
if (tsc_scaling) if (tsc_scaling)
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
@ -4101,6 +4102,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted) static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
{ {
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
guest_state_enter_irqoff(); guest_state_enter_irqoff();
@ -4108,7 +4110,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
amd_clear_divider(); amd_clear_divider();
if (sev_es_guest(vcpu->kvm)) if (sev_es_guest(vcpu->kvm))
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
sev_es_host_save_area(sd));
else else
__svm_vcpu_run(svm, spec_ctrl_intercepted); __svm_vcpu_run(svm, spec_ctrl_intercepted);

View File

@ -698,7 +698,8 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
/* vmenter.S */ /* vmenter.S */
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
struct sev_es_save_area *hostsa);
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
#define DEFINE_KVM_GHCB_ACCESSORS(field) \ #define DEFINE_KVM_GHCB_ACCESSORS(field) \

View File

@ -3,6 +3,7 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/bitsperlong.h> #include <asm/bitsperlong.h>
#include <asm/frame.h>
#include <asm/kvm_vcpu_regs.h> #include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include "kvm-asm-offsets.h" #include "kvm-asm-offsets.h"
@ -67,7 +68,7 @@
"", X86_FEATURE_V_SPEC_CTRL "", X86_FEATURE_V_SPEC_CTRL
901: 901:
.endm .endm
.macro RESTORE_HOST_SPEC_CTRL_BODY .macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
900: 900:
/* Same for after vmexit. */ /* Same for after vmexit. */
mov $MSR_IA32_SPEC_CTRL, %ecx mov $MSR_IA32_SPEC_CTRL, %ecx
@ -76,7 +77,7 @@
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL, * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
* if it was not intercepted during guest execution. * if it was not intercepted during guest execution.
*/ */
cmpb $0, (%_ASM_SP) cmpb $0, \spec_ctrl_intercepted
jnz 998f jnz 998f
rdmsr rdmsr
movl %eax, SVM_spec_ctrl(%_ASM_DI) movl %eax, SVM_spec_ctrl(%_ASM_DI)
@ -99,6 +100,7 @@
*/ */
SYM_FUNC_START(__svm_vcpu_run) SYM_FUNC_START(__svm_vcpu_run)
push %_ASM_BP push %_ASM_BP
mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
push %r15 push %r15
push %r14 push %r14
@ -268,7 +270,7 @@ SYM_FUNC_START(__svm_vcpu_run)
RET RET
RESTORE_GUEST_SPEC_CTRL_BODY RESTORE_GUEST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
10: cmpb $0, _ASM_RIP(kvm_rebooting) 10: cmpb $0, _ASM_RIP(kvm_rebooting)
jne 2b jne 2b
@ -290,66 +292,68 @@ SYM_FUNC_START(__svm_vcpu_run)
SYM_FUNC_END(__svm_vcpu_run) SYM_FUNC_END(__svm_vcpu_run)
#ifdef CONFIG_KVM_AMD_SEV
#ifdef CONFIG_X86_64
#define SEV_ES_GPRS_BASE 0x300
#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
#endif
/** /**
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
* @svm: struct vcpu_svm * * @svm: struct vcpu_svm *
* @spec_ctrl_intercepted: bool * @spec_ctrl_intercepted: bool
*/ */
SYM_FUNC_START(__svm_sev_es_vcpu_run) SYM_FUNC_START(__svm_sev_es_vcpu_run)
push %_ASM_BP FRAME_BEGIN
#ifdef CONFIG_X86_64
push %r15
push %r14
push %r13
push %r12
#else
push %edi
push %esi
#endif
push %_ASM_BX
/* /*
* Save variables needed after vmexit on the stack, in inverse * Save non-volatile (callee-saved) registers to the host save area.
* order compared to when they are needed. * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
* saved on VMRUN.
*/ */
mov %rbp, SEV_ES_RBP (%rdx)
mov %r15, SEV_ES_R15 (%rdx)
mov %r14, SEV_ES_R14 (%rdx)
mov %r13, SEV_ES_R13 (%rdx)
mov %r12, SEV_ES_R12 (%rdx)
mov %rbx, SEV_ES_RBX (%rdx)
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
push %_ASM_ARG2
/* Save @svm. */
push %_ASM_ARG1
.ifnc _ASM_ARG1, _ASM_DI
/* /*
* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX * Save volatile registers that hold arguments that are needed after
* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
*/ */
mov %_ASM_ARG1, %_ASM_DI mov %rdi, SEV_ES_RDI (%rdx)
.endif mov %rsi, SEV_ES_RSI (%rdx)
/* Clobbers RAX, RCX, RDX. */ /* Clobbers RAX, RCX, RDX (@hostsa). */
RESTORE_GUEST_SPEC_CTRL RESTORE_GUEST_SPEC_CTRL
/* Get svm->current_vmcb->pa into RAX. */ /* Get svm->current_vmcb->pa into RAX. */
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX mov SVM_current_vmcb(%rdi), %rax
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX mov KVM_VMCB_pa(%rax), %rax
/* Enter guest mode */ /* Enter guest mode */
sti sti
1: vmrun %_ASM_AX 1: vmrun %rax
2: cli 2: cli
/* Pop @svm to RDI, guest registers have been saved already. */
pop %_ASM_DI
#ifdef CONFIG_MITIGATION_RETPOLINE #ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif #endif
/* Clobbers RAX, RCX, RDX. */ /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
RESTORE_HOST_SPEC_CTRL RESTORE_HOST_SPEC_CTRL
/* /*
@ -361,30 +365,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
*/ */
UNTRAIN_RET_VM UNTRAIN_RET_VM
/* "Pop" @spec_ctrl_intercepted. */ FRAME_END
pop %_ASM_BX
pop %_ASM_BX
#ifdef CONFIG_X86_64
pop %r12
pop %r13
pop %r14
pop %r15
#else
pop %esi
pop %edi
#endif
pop %_ASM_BP
RET RET
RESTORE_GUEST_SPEC_CTRL_BODY RESTORE_GUEST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY RESTORE_HOST_SPEC_CTRL_BODY %sil
3: cmpb $0, _ASM_RIP(kvm_rebooting) 3: cmpb $0, kvm_rebooting(%rip)
jne 2b jne 2b
ud2 ud2
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
SYM_FUNC_END(__svm_sev_es_vcpu_run) SYM_FUNC_END(__svm_sev_es_vcpu_run)
#endif /* CONFIG_KVM_AMD_SEV */

View File

@ -535,7 +535,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
perf_capabilities = vcpu_get_perf_capabilities(vcpu); perf_capabilities = vcpu_get_perf_capabilities(vcpu);
if (cpuid_model_is_consistent(vcpu) && if (cpuid_model_is_consistent(vcpu) &&
(perf_capabilities & PMU_CAP_LBR_FMT)) (perf_capabilities & PMU_CAP_LBR_FMT))
x86_perf_get_lbr(&lbr_desc->records); memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
else else
lbr_desc->records.nr = 0; lbr_desc->records.nr = 0;

View File

@ -218,6 +218,8 @@ module_param(ple_window_max, uint, 0444);
int __read_mostly pt_mode = PT_MODE_SYSTEM; int __read_mostly pt_mode = PT_MODE_SYSTEM;
module_param(pt_mode, int, S_IRUGO); module_param(pt_mode, int, S_IRUGO);
struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
static DEFINE_MUTEX(vmx_l1d_flush_mutex); static DEFINE_MUTEX(vmx_l1d_flush_mutex);
@ -7862,10 +7864,9 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx_update_exception_bitmap(vcpu); vmx_update_exception_bitmap(vcpu);
} }
static u64 vmx_get_perf_capabilities(void) static __init u64 vmx_get_perf_capabilities(void)
{ {
u64 perf_cap = PMU_CAP_FW_WRITES; u64 perf_cap = PMU_CAP_FW_WRITES;
struct x86_pmu_lbr lbr;
u64 host_perf_cap = 0; u64 host_perf_cap = 0;
if (!enable_pmu) if (!enable_pmu)
@ -7875,15 +7876,43 @@ static u64 vmx_get_perf_capabilities(void)
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) { if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
x86_perf_get_lbr(&lbr); x86_perf_get_lbr(&vmx_lbr_caps);
if (lbr.nr)
/*
* KVM requires LBR callstack support, as the overhead due to
* context switching LBRs without said support is too high.
* See intel_pmu_create_guest_lbr_event() for more info.
*/
if (!vmx_lbr_caps.has_callstack)
memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
else if (vmx_lbr_caps.nr)
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
} }
if (vmx_pebs_supported()) { if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK; perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
perf_cap &= ~PERF_CAP_PEBS_BASELINE; /*
* Disallow adaptive PEBS as it is functionally broken, can be
* used by the guest to read *host* LBRs, and can be used to
* bypass userspace event filters. To correctly and safely
* support adaptive PEBS, KVM needs to:
*
* 1. Account for the ADAPTIVE flag when (re)programming fixed
* counters.
*
* 2. Gain support from perf (or take direct control of counter
* programming) to support events without adaptive PEBS
* enabled for the hardware counter.
*
* 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
* adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
*
* 4. Document which PMU events are effectively exposed to the
* guest via adaptive PEBS, and make adaptive PEBS mutually
* exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
*/
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
} }
return perf_cap; return perf_cap;

View File

@ -15,6 +15,7 @@
#include "vmx_ops.h" #include "vmx_ops.h"
#include "../cpuid.h" #include "../cpuid.h"
#include "run_flags.h" #include "run_flags.h"
#include "../mmu.h"
#define MSR_TYPE_R 1 #define MSR_TYPE_R 1
#define MSR_TYPE_W 2 #define MSR_TYPE_W 2
@ -109,6 +110,8 @@ struct lbr_desc {
bool msr_passthrough; bool msr_passthrough;
}; };
extern struct x86_pmu_lbr vmx_lbr_caps;
/* /*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need * The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu. * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@ -719,7 +722,8 @@ static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
if (!enable_ept) if (!enable_ept)
return true; return true;
return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits; return allow_smaller_maxphyaddr &&
cpuid_maxphyaddr(vcpu) < kvm_get_shadow_phys_bits();
} }
static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu) static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)

View File

@ -3470,7 +3470,7 @@ static bool is_mci_status_msr(u32 msr)
static bool can_set_mci_status(struct kvm_vcpu *vcpu) static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{ {
/* McStatusWrEn enabled? */ /* McStatusWrEn enabled? */
if (guest_cpuid_is_amd_or_hygon(vcpu)) if (guest_cpuid_is_amd_compatible(vcpu))
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
return false; return false;

View File

@ -382,8 +382,15 @@ SYM_FUNC_END(call_depth_return_thunk)
SYM_CODE_START(__x86_return_thunk) SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || \
defined(CONFIG_MITIGATION_SRSO) || \
defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)
ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \ ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \
"jmp warn_thunk_thunk", X86_FEATURE_ALWAYS "jmp warn_thunk_thunk", X86_FEATURE_ALWAYS
#else
ANNOTATE_UNRET_SAFE
ret
#endif
int3 int3
SYM_CODE_END(__x86_return_thunk) SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk)

View File

@ -645,6 +645,14 @@ static void blkdev_flush_mapping(struct block_device *bdev)
bdev_write_inode(bdev); bdev_write_inode(bdev);
} }
static void blkdev_put_whole(struct block_device *bdev)
{
if (atomic_dec_and_test(&bdev->bd_openers))
blkdev_flush_mapping(bdev);
if (bdev->bd_disk->fops->release)
bdev->bd_disk->fops->release(bdev->bd_disk);
}
static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
{ {
struct gendisk *disk = bdev->bd_disk; struct gendisk *disk = bdev->bd_disk;
@ -663,20 +671,21 @@ static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
if (!atomic_read(&bdev->bd_openers)) if (!atomic_read(&bdev->bd_openers))
set_init_blocksize(bdev); set_init_blocksize(bdev);
if (test_bit(GD_NEED_PART_SCAN, &disk->state))
bdev_disk_changed(disk, false);
atomic_inc(&bdev->bd_openers); atomic_inc(&bdev->bd_openers);
if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
/*
* Only return scanning errors if we are called from contexts
* that explicitly want them, e.g. the BLKRRPART ioctl.
*/
ret = bdev_disk_changed(disk, false);
if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
blkdev_put_whole(bdev);
return ret;
}
}
return 0; return 0;
} }
static void blkdev_put_whole(struct block_device *bdev)
{
if (atomic_dec_and_test(&bdev->bd_openers))
blkdev_flush_mapping(bdev);
if (bdev->bd_disk->fops->release)
bdev->bd_disk->fops->release(bdev->bd_disk);
}
static int blkdev_get_part(struct block_device *part, blk_mode_t mode) static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
{ {
struct gendisk *disk = part->bd_disk; struct gendisk *disk = part->bd_disk;

View File

@ -1439,8 +1439,11 @@ static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
lockdep_assert_held(&iocg->ioc->lock); lockdep_assert_held(&iocg->ioc->lock);
lockdep_assert_held(&iocg->waitq.lock); lockdep_assert_held(&iocg->waitq.lock);
/* make sure that nobody messed with @iocg */ /*
WARN_ON_ONCE(list_empty(&iocg->active_list)); * make sure that nobody messed with @iocg. Check iocg->pd.online
* to avoid warn when removing blkcg or disk.
*/
WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online);
WARN_ON_ONCE(iocg->inuse > 1); WARN_ON_ONCE(iocg->inuse > 1);
iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);

View File

@ -563,7 +563,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
return -EACCES; return -EACCES;
if (bdev_is_partition(bdev)) if (bdev_is_partition(bdev))
return -EINVAL; return -EINVAL;
return disk_scan_partitions(bdev->bd_disk, mode); return disk_scan_partitions(bdev->bd_disk,
mode | BLK_OPEN_STRICT_SCAN);
case BLKTRACESTART: case BLKTRACESTART:
case BLKTRACESTOP: case BLKTRACESTOP:
case BLKTRACETEARDOWN: case BLKTRACETEARDOWN:

View File

@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
} }
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr); attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch; buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1) { while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2; tmp_pos += 2;
tmpx++; tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp); ch = get_char(vc, (u_short *)tmp_pos, &temp);

View File

@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0; size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
if (offset > buffer->data_size || read_size < sizeof(*hdr)) if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0; return 0;
if (u) { if (u) {
if (copy_from_user(object, u + offset, read_size)) if (copy_from_user(object, u + offset, read_size))
return 0; return 0;

View File

@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
switch (data->cd_info.state) { switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE: case HCI_DEVCOREDUMP_IDLE:
err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE); err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
if (err < 0) if (err < 0) {
kfree_skb(skb);
break; break;
}
data->cd_info.cnt = 0; data->cd_info.cnt = 0;
/* It is supposed coredump can be done within 5 seconds */ /* It is supposed coredump can be done within 5 seconds */
@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
break; break;
} }
if (err < 0)
kfree_skb(skb);
return err; return err;
} }
EXPORT_SYMBOL_GPL(btmtk_process_coredump); EXPORT_SYMBOL_GPL(btmtk_process_coredump);

View File

@ -15,6 +15,8 @@
#define VERSION "0.1" #define VERSION "0.1"
#define QCA_BDADDR_DEFAULT (&(bdaddr_t) {{ 0xad, 0x5a, 0x00, 0x00, 0x00, 0x00 }})
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type soc_type) enum qca_btsoc_type soc_type)
{ {
@ -612,6 +614,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
} }
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
static int qca_check_bdaddr(struct hci_dev *hdev)
{
struct hci_rp_read_bd_addr *bda;
struct sk_buff *skb;
int err;
if (bacmp(&hdev->public_addr, BDADDR_ANY))
return 0;
skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Failed to read device address (%d)", err);
return err;
}
if (skb->len != sizeof(*bda)) {
bt_dev_err(hdev, "Device address length mismatch");
kfree_skb(skb);
return -EIO;
}
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bacmp(&bda->bdaddr, QCA_BDADDR_DEFAULT))
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
kfree_skb(skb);
return 0;
}
static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size, static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid) struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{ {
@ -818,6 +852,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
break; break;
} }
err = qca_check_bdaddr(hdev);
if (err)
return err;
bt_dev_info(hdev, "QCA setup on UART is completed"); bt_dev_info(hdev, "QCA setup on UART is completed");
return 0; return 0;

View File

@ -542,6 +542,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BE Bluetooth devices */ /* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK | { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH }, BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK | { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH }, BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK | { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
@ -3480,13 +3482,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_coredump_qca(struct hci_dev *hdev) static void btusb_coredump_qca(struct hci_dev *hdev)
{ {
int err;
static const u8 param[] = { 0x26 }; static const u8 param[] = { 0x26 };
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT); err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
if (IS_ERR(skb)) if (err < 0)
bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb)); bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
kfree_skb(skb);
} }
/* /*

View File

@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup; bool wakeup;
if (!hu->serdev)
return true;
/* BT SoC attached through the serial bus is handled by the serdev driver. /* BT SoC attached through the serial bus is handled by the serdev driver.
* So we need to use the device handle of the serdev driver to get the * So we need to use the device handle of the serdev driver to get the
* status of device may wakeup. * status of device may wakeup.
@ -1905,8 +1908,6 @@ static int qca_setup(struct hci_uart *hu)
case QCA_WCN6750: case QCA_WCN6750:
case QCA_WCN6855: case QCA_WCN6855:
case QCA_WCN7850: case QCA_WCN7850:
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
qcadev = serdev_device_get_drvdata(hu->serdev); qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bdaddr_property_broken) if (qcadev->bdaddr_property_broken)
set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks); set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
@ -1957,8 +1958,10 @@ static int qca_setup(struct hci_uart *hu)
qca_debugfs_init(hdev); qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error; hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout; hu->hdev->cmd_timeout = qca_cmd_timeout;
if (device_can_wakeup(hu->serdev->ctrl->dev.parent)) if (hu->serdev) {
hu->hdev->wakeup = qca_wakeup; if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
hu->hdev->wakeup = qca_wakeup;
}
} else if (ret == -ENOENT) { } else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */ /* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags); set_bit(QCA_ROM_FW, &qca->flags);
@ -2329,16 +2332,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
(data->soc_type == QCA_WCN6750 || (data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) { data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
power_ctrl_enabled = false; return PTR_ERR(qcadev->bt_en);
} }
if (!qcadev->bt_en)
power_ctrl_enabled = false;
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN); GPIOD_IN);
if (IS_ERR(qcadev->sw_ctrl) && if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 || (data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 || data->soc_type == QCA_WCN6855 ||
data->soc_type == QCA_WCN7850)) data->soc_type == QCA_WCN7850)) {
dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n"); dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
return PTR_ERR(qcadev->sw_ctrl);
}
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) { if (IS_ERR(qcadev->susclk)) {
@ -2357,10 +2365,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW); GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) { if (IS_ERR(qcadev->bt_en)) {
dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); dev_err(&serdev->dev, "failed to acquire enable gpio\n");
power_ctrl_enabled = false; return PTR_ERR(qcadev->bt_en);
} }
if (!qcadev->bt_en)
power_ctrl_enabled = false;
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) { if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n"); dev_warn(&serdev->dev, "failed to acquire clk\n");

View File

@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list); static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list); static LIST_HEAD(clk_notifier_list);
/* List of registered clks that use runtime PM */
static HLIST_HEAD(clk_rpm_list);
static DEFINE_MUTEX(clk_rpm_list_lock);
static const struct hlist_head *all_lists[] = { static const struct hlist_head *all_lists[] = {
&clk_root_list, &clk_root_list,
&clk_orphan_list, &clk_orphan_list,
@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw; struct clk_hw *hw;
struct module *owner; struct module *owner;
struct device *dev; struct device *dev;
struct hlist_node rpm_node;
struct device_node *of_node; struct device_node *of_node;
struct clk_core *parent; struct clk_core *parent;
struct clk_parent_map *parents; struct clk_parent_map *parents;
@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev); pm_runtime_put_sync(core->dev);
} }
/**
* clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
*
* Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
* that disabling unused clks avoids a deadlock where a device is runtime PM
* resuming/suspending and the runtime PM callback is trying to grab the
* prepare_lock for something like clk_prepare_enable() while
* clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
* PM resume/suspend the device as well.
*
* Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
* success. Otherwise the lock is released on failure.
*
* Return: 0 on success, negative errno otherwise.
*/
static int clk_pm_runtime_get_all(void)
{
int ret;
struct clk_core *core, *failed;
/*
* Grab the list lock to prevent any new clks from being registered
* or unregistered until clk_pm_runtime_put_all().
*/
mutex_lock(&clk_rpm_list_lock);
/*
* Runtime PM "get" all the devices that are needed for the clks
* currently registered. Do this without holding the prepare_lock, to
* avoid the deadlock.
*/
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
ret = clk_pm_runtime_get(core);
if (ret) {
failed = core;
pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
dev_name(failed->dev), failed->name);
goto err;
}
}
return 0;
err:
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
if (core == failed)
break;
clk_pm_runtime_put(core);
}
mutex_unlock(&clk_rpm_list_lock);
return ret;
}
/**
* clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
*
* Put the runtime PM references taken in clk_pm_runtime_get_all() and release
* the 'clk_rpm_list_lock'.
*/
static void clk_pm_runtime_put_all(void)
{
struct clk_core *core;
hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
clk_pm_runtime_put(core);
mutex_unlock(&clk_rpm_list_lock);
}
static void clk_pm_runtime_init(struct clk_core *core)
{
struct device *dev = core->dev;
if (dev && pm_runtime_enabled(dev)) {
core->rpm_enabled = true;
mutex_lock(&clk_rpm_list_lock);
hlist_add_head(&core->rpm_node, &clk_rpm_list);
mutex_unlock(&clk_rpm_list_lock);
}
}
/*** locking ***/ /*** locking ***/
static void clk_prepare_lock(void) static void clk_prepare_lock(void)
{ {
@ -1381,9 +1469,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED) if (core->flags & CLK_IGNORE_UNUSED)
return; return;
if (clk_pm_runtime_get(core))
return;
if (clk_core_is_prepared(core)) { if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core); trace_clk_unprepare(core);
if (core->ops->unprepare_unused) if (core->ops->unprepare_unused)
@ -1392,8 +1477,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw); core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core); trace_clk_unprepare_complete(core);
} }
clk_pm_runtime_put(core);
} }
static void __init clk_disable_unused_subtree(struct clk_core *core) static void __init clk_disable_unused_subtree(struct clk_core *core)
@ -1409,9 +1492,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE) if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent); clk_core_prepare_enable(core->parent);
if (clk_pm_runtime_get(core))
goto unprepare_out;
flags = clk_enable_lock(); flags = clk_enable_lock();
if (core->enable_count) if (core->enable_count)
@ -1436,8 +1516,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out: unlock_out:
clk_enable_unlock(flags); clk_enable_unlock(flags);
clk_pm_runtime_put(core);
unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE) if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent); clk_core_disable_unprepare(core->parent);
} }
@ -1453,6 +1531,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void) static int __init clk_disable_unused(void)
{ {
struct clk_core *core; struct clk_core *core;
int ret;
if (clk_ignore_unused) { if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n"); pr_warn("clk: Not disabling unused clocks\n");
@ -1461,6 +1540,13 @@ static int __init clk_disable_unused(void)
pr_info("clk: Disabling unused clocks\n"); pr_info("clk: Disabling unused clocks\n");
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
/*
* Grab the prepare lock to keep the clk topology stable while iterating
* over clks.
*/
clk_prepare_lock(); clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node) hlist_for_each_entry(core, &clk_root_list, child_node)
@ -1477,6 +1563,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock(); clk_prepare_unlock();
clk_pm_runtime_put_all();
return 0; return 0;
} }
late_initcall_sync(clk_disable_unused); late_initcall_sync(clk_disable_unused);
@ -3252,9 +3340,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{ {
struct clk_core *child; struct clk_core *child;
clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level); clk_summary_show_one(s, c, level);
clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node) hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1); clk_summary_show_subtree(s, child, level + 1);
@ -3264,11 +3350,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
{ {
struct clk_core *c; struct clk_core *c;
struct hlist_head **lists = s->private; struct hlist_head **lists = s->private;
int ret;
seq_puts(s, " enable prepare protect duty hardware connection\n"); seq_puts(s, " enable prepare protect duty hardware connection\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n"); seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n"); seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
clk_prepare_lock(); clk_prepare_lock();
@ -3277,6 +3367,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0); clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock(); clk_prepare_unlock();
clk_pm_runtime_put_all();
return 0; return 0;
} }
@ -3324,8 +3415,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
struct clk_core *c; struct clk_core *c;
bool first_node = true; bool first_node = true;
struct hlist_head **lists = s->private; struct hlist_head **lists = s->private;
int ret;
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
seq_putc(s, '{'); seq_putc(s, '{');
clk_prepare_lock(); clk_prepare_lock();
for (; *lists; lists++) { for (; *lists; lists++) {
@ -3338,6 +3435,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
} }
clk_prepare_unlock(); clk_prepare_unlock();
clk_pm_runtime_put_all();
seq_puts(s, "}\n"); seq_puts(s, "}\n");
return 0; return 0;
@ -3981,8 +4079,6 @@ static int __clk_core_init(struct clk_core *core)
} }
clk_core_reparent_orphans_nolock(); clk_core_reparent_orphans_nolock();
kref_init(&core->ref);
out: out:
clk_pm_runtime_put(core); clk_pm_runtime_put(core);
unlock: unlock:
@ -4211,6 +4307,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents); kfree(core->parents);
} }
/* Free memory allocated for a struct clk_core */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
if (core->rpm_enabled) {
mutex_lock(&clk_rpm_list_lock);
hlist_del(&core->rpm_node);
mutex_unlock(&clk_rpm_list_lock);
}
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
static struct clk * static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{ {
@ -4231,6 +4343,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out; goto fail_out;
} }
kref_init(&core->ref);
core->name = kstrdup_const(init->name, GFP_KERNEL); core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) { if (!core->name) {
ret = -ENOMEM; ret = -ENOMEM;
@ -4243,9 +4357,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
} }
core->ops = init->ops; core->ops = init->ops;
if (dev && pm_runtime_enabled(dev))
core->rpm_enabled = true;
core->dev = dev; core->dev = dev;
clk_pm_runtime_init(core);
core->of_node = np; core->of_node = np;
if (dev && dev->driver) if (dev && dev->driver)
core->owner = dev->driver->owner; core->owner = dev->driver->owner;
@ -4285,12 +4398,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL; hw->clk = NULL;
fail_create_clk: fail_create_clk:
clk_core_free_parent_map(core);
fail_parents: fail_parents:
fail_ops: fail_ops:
kfree_const(core->name);
fail_name: fail_name:
kfree(core); kref_put(&core->ref, __clk_release);
fail_out: fail_out:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -4370,18 +4481,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
} }
EXPORT_SYMBOL_GPL(of_clk_hw_register); EXPORT_SYMBOL_GPL(of_clk_hw_register);
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
lockdep_assert_held(&prepare_lock);
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
/* /*
* Empty clk_ops for unregistered clocks. These are used temporarily * Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock * after clk_unregister() was called on a clock and until last clock
@ -4472,7 +4571,8 @@ void clk_unregister(struct clk *clk)
if (ops == &clk_nodrv_ops) { if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__, pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name); clk->core->name);
goto unlock; clk_prepare_unlock();
return;
} }
/* /*
* Assign empty clock ops for consumers that might still hold * Assign empty clock ops for consumers that might still hold
@ -4506,11 +4606,10 @@ void clk_unregister(struct clk *clk)
if (clk->core->protect_count) if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n", pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name); __func__, clk->core->name);
clk_prepare_unlock();
kref_put(&clk->core->ref, __clk_release); kref_put(&clk->core->ref, __clk_release);
free_clk(clk); free_clk(clk);
unlock:
clk_prepare_unlock();
} }
EXPORT_SYMBOL_GPL(clk_unregister); EXPORT_SYMBOL_GPL(clk_unregister);
@ -4669,13 +4768,11 @@ void __clk_put(struct clk *clk)
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
clk_set_rate_range_nolock(clk, 0, ULONG_MAX); clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
owner = clk->core->owner;
kref_put(&clk->core->ref, __clk_release);
clk_prepare_unlock(); clk_prepare_unlock();
owner = clk->core->owner;
kref_put(&clk->core->ref, __clk_release);
module_put(owner); module_put(owner);
free_clk(clk); free_clk(clk);
} }

View File

@ -156,7 +156,7 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1", GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
"csw_infra_f26m_sel", 8), "csw_infra_f26m_sel", 8),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2", GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
"csw_infra_f26m_sel", 9), "infra_pcie_peri_ck_26m_ck_p3", 9),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3", GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
"csw_infra_f26m_sel", 10), "csw_infra_f26m_sel", 10),
/* INFRA1 */ /* INFRA1 */

View File

@ -13,6 +13,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "clk-mtk.h" #include "clk-mtk.h"
@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM; return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
} }
devm_pm_runtime_enable(&pdev->dev);
/*
* Do a pm_runtime_resume_and_get() to workaround a possible
* deadlock between clk_register() and the genpd framework.
*/
r = pm_runtime_resume_and_get(&pdev->dev);
if (r)
return r;
/* Calculate how many clk_hw_onecell_data entries to allocate */ /* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks; num_clks = mcd->num_clks + mcd->num_composite_clks;
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks; num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks; goto unregister_clks;
} }
pm_runtime_put(&pdev->dev);
return r; return r;
unregister_clks: unregister_clks:
@ -604,6 +617,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
free_base: free_base:
if (mcd->shared_io && base) if (mcd->shared_io && base)
iounmap(base); iounmap(base);
pm_runtime_put(&pdev->dev);
return r; return r;
} }

View File

@ -641,32 +641,21 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private; struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev); struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting; struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_desc; struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
int i; int ret;
if (iface_desc->desc.bNumEndpoints != 2) if (devpriv->model == VMK8061_MODEL)
ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
&ep_tx_desc, NULL, NULL);
else
ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
&ep_rx_desc, &ep_tx_desc);
if (ret)
return -ENODEV; return -ENODEV;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { devpriv->ep_rx = ep_rx_desc;
ep_desc = &iface_desc->endpoint[i].desc; devpriv->ep_tx = ep_tx_desc;
if (usb_endpoint_is_int_in(ep_desc) ||
usb_endpoint_is_bulk_in(ep_desc)) {
if (!devpriv->ep_rx)
devpriv->ep_rx = ep_desc;
continue;
}
if (usb_endpoint_is_int_out(ep_desc) ||
usb_endpoint_is_bulk_out(ep_desc)) {
if (!devpriv->ep_tx)
devpriv->ep_tx = ep_desc;
continue;
}
}
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx)) if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL; return -EINVAL;

View File

@ -42,6 +42,7 @@ struct dpll_pin_registration {
struct list_head list; struct list_head list;
const struct dpll_pin_ops *ops; const struct dpll_pin_ops *ops;
void *priv; void *priv;
void *cookie;
}; };
struct dpll_device *dpll_device_get_by_id(int id) struct dpll_device *dpll_device_get_by_id(int id)
@ -54,12 +55,14 @@ struct dpll_device *dpll_device_get_by_id(int id)
static struct dpll_pin_registration * static struct dpll_pin_registration *
dpll_pin_registration_find(struct dpll_pin_ref *ref, dpll_pin_registration_find(struct dpll_pin_ref *ref,
const struct dpll_pin_ops *ops, void *priv) const struct dpll_pin_ops *ops, void *priv,
void *cookie)
{ {
struct dpll_pin_registration *reg; struct dpll_pin_registration *reg;
list_for_each_entry(reg, &ref->registration_list, list) { list_for_each_entry(reg, &ref->registration_list, list) {
if (reg->ops == ops && reg->priv == priv) if (reg->ops == ops && reg->priv == priv &&
reg->cookie == cookie)
return reg; return reg;
} }
return NULL; return NULL;
@ -67,7 +70,8 @@ dpll_pin_registration_find(struct dpll_pin_ref *ref,
static int static int
dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin, dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv) const struct dpll_pin_ops *ops, void *priv,
void *cookie)
{ {
struct dpll_pin_registration *reg; struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref; struct dpll_pin_ref *ref;
@ -78,7 +82,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) { xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin) if (ref->pin != pin)
continue; continue;
reg = dpll_pin_registration_find(ref, ops, priv); reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) { if (reg) {
refcount_inc(&ref->refcount); refcount_inc(&ref->refcount);
return 0; return 0;
@ -111,6 +115,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
} }
reg->ops = ops; reg->ops = ops;
reg->priv = priv; reg->priv = priv;
reg->cookie = cookie;
if (ref_exists) if (ref_exists)
refcount_inc(&ref->refcount); refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list); list_add_tail(&reg->list, &ref->registration_list);
@ -119,7 +124,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
} }
static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin, static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv) const struct dpll_pin_ops *ops, void *priv,
void *cookie)
{ {
struct dpll_pin_registration *reg; struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref; struct dpll_pin_ref *ref;
@ -128,7 +134,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) { xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin) if (ref->pin != pin)
continue; continue;
reg = dpll_pin_registration_find(ref, ops, priv); reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg)) if (WARN_ON(!reg))
return -EINVAL; return -EINVAL;
list_del(&reg->list); list_del(&reg->list);
@ -146,7 +152,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
static int static int
dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll, dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
const struct dpll_pin_ops *ops, void *priv) const struct dpll_pin_ops *ops, void *priv, void *cookie)
{ {
struct dpll_pin_registration *reg; struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref; struct dpll_pin_ref *ref;
@ -157,7 +163,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) { xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll) if (ref->dpll != dpll)
continue; continue;
reg = dpll_pin_registration_find(ref, ops, priv); reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) { if (reg) {
refcount_inc(&ref->refcount); refcount_inc(&ref->refcount);
return 0; return 0;
@ -190,6 +196,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
} }
reg->ops = ops; reg->ops = ops;
reg->priv = priv; reg->priv = priv;
reg->cookie = cookie;
if (ref_exists) if (ref_exists)
refcount_inc(&ref->refcount); refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list); list_add_tail(&reg->list, &ref->registration_list);
@ -199,7 +206,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
static void static void
dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll, dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
const struct dpll_pin_ops *ops, void *priv) const struct dpll_pin_ops *ops, void *priv, void *cookie)
{ {
struct dpll_pin_registration *reg; struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref; struct dpll_pin_ref *ref;
@ -208,7 +215,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) { xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll) if (ref->dpll != dpll)
continue; continue;
reg = dpll_pin_registration_find(ref, ops, priv); reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg)) if (WARN_ON(!reg))
return; return;
list_del(&reg->list); list_del(&reg->list);
@ -594,14 +601,14 @@ EXPORT_SYMBOL_GPL(dpll_pin_put);
static int static int
__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin, __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv) const struct dpll_pin_ops *ops, void *priv, void *cookie)
{ {
int ret; int ret;
ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv); ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
if (ret) if (ret)
return ret; return ret;
ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv); ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
if (ret) if (ret)
goto ref_pin_del; goto ref_pin_del;
xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED); xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
@ -610,7 +617,7 @@ __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
return ret; return ret;
ref_pin_del: ref_pin_del:
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv); dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
return ret; return ret;
} }
@ -642,7 +649,7 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
dpll->clock_id == pin->clock_id))) dpll->clock_id == pin->clock_id)))
ret = -EINVAL; ret = -EINVAL;
else else
ret = __dpll_pin_register(dpll, pin, ops, priv); ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock); mutex_unlock(&dpll_lock);
return ret; return ret;
@ -651,11 +658,11 @@ EXPORT_SYMBOL_GPL(dpll_pin_register);
static void static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin, __dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv) const struct dpll_pin_ops *ops, void *priv, void *cookie)
{ {
ASSERT_DPLL_PIN_REGISTERED(pin); ASSERT_DPLL_PIN_REGISTERED(pin);
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv); dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv); dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs)) if (xa_empty(&pin->dpll_refs))
xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED); xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
} }
@ -680,7 +687,7 @@ void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
mutex_lock(&dpll_lock); mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin); dpll_pin_delete_ntf(pin);
__dpll_pin_unregister(dpll, pin, ops, priv); __dpll_pin_unregister(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock); mutex_unlock(&dpll_lock);
} }
EXPORT_SYMBOL_GPL(dpll_pin_unregister); EXPORT_SYMBOL_GPL(dpll_pin_unregister);
@ -716,12 +723,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
return -EINVAL; return -EINVAL;
mutex_lock(&dpll_lock); mutex_lock(&dpll_lock);
ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv); ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
if (ret) if (ret)
goto unlock; goto unlock;
refcount_inc(&pin->refcount); refcount_inc(&pin->refcount);
xa_for_each(&parent->dpll_refs, i, ref) { xa_for_each(&parent->dpll_refs, i, ref) {
ret = __dpll_pin_register(ref->dpll, pin, ops, priv); ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
if (ret) { if (ret) {
stop = i; stop = i;
goto dpll_unregister; goto dpll_unregister;
@ -735,11 +742,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
dpll_unregister: dpll_unregister:
xa_for_each(&parent->dpll_refs, i, ref) xa_for_each(&parent->dpll_refs, i, ref)
if (i < stop) { if (i < stop) {
__dpll_pin_unregister(ref->dpll, pin, ops, priv); __dpll_pin_unregister(ref->dpll, pin, ops, priv,
parent);
dpll_pin_delete_ntf(pin); dpll_pin_delete_ntf(pin);
} }
refcount_dec(&pin->refcount); refcount_dec(&pin->refcount);
dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv); dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
unlock: unlock:
mutex_unlock(&dpll_lock); mutex_unlock(&dpll_lock);
return ret; return ret;
@ -764,10 +772,10 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
mutex_lock(&dpll_lock); mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin); dpll_pin_delete_ntf(pin);
dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv); dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
refcount_dec(&pin->refcount); refcount_dec(&pin->refcount);
xa_for_each(&pin->dpll_refs, i, ref) xa_for_each(&pin->dpll_refs, i, ref)
__dpll_pin_unregister(ref->dpll, pin, ops, priv); __dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
mutex_unlock(&dpll_lock); mutex_unlock(&dpll_lock);
} }
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister); EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);

View File

@ -819,7 +819,7 @@ static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
p->bytes_moved += ctx.bytes_moved; p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo)) amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved; p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {

View File

@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r; return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM && amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved); ctx.bytes_moved);
else else
@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats) struct amdgpu_mem_stats *stats)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo); uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj; struct drm_gem_object *obj;
unsigned int domain; unsigned int domain;
bool shared; bool shared;
/* Abort if the BO doesn't currently have a backing store */ /* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource) if (!res)
return; return;
obj = &bo->tbo.base; obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj); shared = drm_gem_object_is_shared_for_memory_stats(obj);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) { switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM: case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size; stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo)) if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size; stats->visible_vram += size;
if (shared) if (shared)
stats->vram_shared += size; stats->vram_shared += size;
@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */ /* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
if (bo->resource->mem_type != TTM_PL_VRAM) if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
if (amdgpu_bo_in_cpu_visible_vram(abo))
return 0; return 0;
/* Can't move a pinned BO to visible VRAM */ /* Can't move a pinned BO to visible VRAM */
@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */ /* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM && if (bo->resource->mem_type == TTM_PL_VRAM &&
!amdgpu_bo_in_cpu_visible_vram(abo)) !amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo); ttm_bo_move_to_lru_tail_unlocked(bo);
@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/ */
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment; struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf; struct dma_buf *dma_buf;
const char *placement; const char *placement;
@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) { if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain; unsigned int domain;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) { switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM: case AMDGPU_GEM_DOMAIN_VRAM:
if (amdgpu_bo_in_cpu_visible_vram(bo)) if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE"; placement = "VRAM VISIBLE";
else else
placement = "VRAM"; placement = "VRAM";

View File

@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
} }
/**
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
*/
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;
amdgpu_res_next(&cursor, cursor.size);
}
return false;
}
/** /**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/ */

View File

@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) { amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM /* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this * first, but only set GTT as busy placement, so this
@ -403,40 +403,55 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
return r; return r;
} }
/**
* amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
* @adev: amdgpu device
* @res: the resource to check
*
* Returns: true if the full resource is CPU visible, false otherwise.
*/
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res)
{
struct amdgpu_res_cursor cursor;
if (!res)
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
res->mem_type == AMDGPU_PL_PREEMPT)
return true;
if (res->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
return false;
amdgpu_res_next(&cursor, cursor.size);
}
return true;
}
/* /*
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
* *
* Called by amdgpu_bo_move() * Called by amdgpu_bo_move()
*/ */
static bool amdgpu_mem_visible(struct amdgpu_device *adev, static bool amdgpu_res_copyable(struct amdgpu_device *adev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
u64 mem_size = (u64)mem->size; if (!amdgpu_res_cpu_visible(adev, mem))
struct amdgpu_res_cursor cursor;
u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
return true;
if (mem->mem_type != TTM_PL_VRAM)
return false; return false;
amdgpu_res_first(mem, 0, mem_size, &cursor); /* ttm_resource_ioremap only supports contiguous memory */
end = cursor.start + cursor.size; if (mem->mem_type == TTM_PL_VRAM &&
while (cursor.remaining) { !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
amdgpu_res_next(&cursor, cursor.size); return false;
if (!cursor.remaining) return true;
break;
/* ttm_resource_ioremap only supports contiguous memory */
if (end != cursor.start)
return false;
end = cursor.start + cursor.size;
}
return end <= adev->gmc.visible_vram_size;
} }
/* /*
@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) { if (r) {
/* Check that all memory is CPU accessible */ /* Check that all memory is CPU accessible */
if (!amdgpu_mem_visible(adev, old_mem) || if (!amdgpu_res_copyable(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) { !amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n"); pr_err("Move buffer fallback to memcpy unavailable\n");
return r; return r;
} }
@ -557,7 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) { switch (mem->mem_type) {
case TTM_PL_SYSTEM: case TTM_PL_SYSTEM:
@ -568,9 +582,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
if (adev->mman.aper_base_kaddr && if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS) mem->placement & TTM_PL_FLAG_CONTIGUOUS)

View File

@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start); uint64_t start);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
int amdgpu_ttm_init(struct amdgpu_device *adev); int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,

View File

@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping); trace_amdgpu_vm_bo_map(bo_va, mapping);
} }
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/** /**
* amdgpu_vm_bo_map - map bo inside a vm * amdgpu_vm_bo_map - map bo inside a vm
* *
@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr; uint64_t eaddr;
int r;
/* validate the parameters */ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) if (r)
return -EINVAL; return r;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) { if (tmp) {
@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr; uint64_t eaddr;
int r; int r;
/* validate the parameters */ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) if (r)
return -EINVAL; return r;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
/* Allocate all the needed memory */ /* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
} }
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr; mapping->start = saddr;
mapping->last = eaddr; mapping->last = eaddr;
@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed); LIST_HEAD(removed);
uint64_t eaddr; uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */ /* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL); before = kzalloc(sizeof(*before), GFP_KERNEL);

View File

@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex); mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) { if (kfd_is_locked()) {
mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process"); pr_debug("KFD is locked! Cannot create process");
return ERR_PTR(-EINVAL); process = ERR_PTR(-EINVAL);
goto out;
} }
/* A prior open of /dev/kfd could have already created the process. */ /* A prior open of /dev/kfd could have already created the process. */

View File

@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source; enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector; enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features; enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter; enc10->base.transmitter = init_data->transmitter;

View File

@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source; enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector; enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
} }
enc10->base.features.flags.bits.HDMI_6GB_EN = 1; enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info) if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,

View File

@ -23,6 +23,7 @@
*/ */
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_reg.h" #include "nouveau_reg.h"
#include "dispnv04/hw.h" #include "dispnv04/hw.h"
#include "nouveau_encoder.h" #include "nouveau_encoder.h"
@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/ */
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) { if (*conn == 0xf2005014 && *conf == 0xffffffff) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1); fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false; return false;
} }
} }
@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__ #ifdef __powerpc__
/* Apple iMac G4 NV17 */ /* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) { if (of_machine_is_compatible("PowerMac4,5")) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1); fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2); fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return; return;
} }
#endif #endif
/* Make up some sane defaults */ /* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
bios->legacy.i2c_indices.crt, 1, 1); bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV, fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv, bios->legacy.i2c_indices.tv,
all_heads, 0); all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr || else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr) bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel, bios->legacy.i2c_indices.panel,
all_heads, 1); all_heads, DCB_OUTPUT_B);
} }
static int static int

View File

@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
u8 *dpcd = nv_encoder->dp.dpcd; u8 *dpcd = nv_encoder->dp.dpcd;
int ret = NOUVEAU_DP_NONE, hpd; int ret = NOUVEAU_DP_NONE, hpd;
/* If we've already read the DPCD on an eDP device, we don't need to /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
* reread it as it won't change * haven't probed them once before.
*/ */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
dpcd[DP_DPCD_REV] != 0) if (connector->status == connector_status_connected)
return NOUVEAU_DP_SST; return NOUVEAU_DP_SST;
else if (connector->status == connector_status_disconnected)
return NOUVEAU_DP_NONE;
}
// Ensure that the aux bus is enabled for probing
drm_dp_dpcd_set_powered(&nv_connector->aux, true);
mutex_lock(&nv_encoder->dp.hpd_irq_lock); mutex_lock(&nv_encoder->dp.hpd_irq_lock);
if (mstm) { if (mstm) {
@ -293,6 +299,13 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST) if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
nv50_mstm_remove(mstm); nv50_mstm_remove(mstm);
/* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
* and since we don't really have a usecase for that anyway - just disable the aux bus here
* if we've decided the connector is disconnected
*/
if (ret == NOUVEAU_DP_NONE)
drm_dp_dpcd_set_powered(&nv_connector->aux, false);
mutex_unlock(&nv_encoder->dp.hpd_irq_lock); mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
return ret; return ret;
} }

View File

@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL; void __iomem *map = NULL;
/* Already mapped? */ /* Already mapped? */
if (refcount_inc_not_zero(&iobj->maps)) if (refcount_inc_not_zero(&iobj->maps)) {
/* read barrier match the wmb on refcount set */
smp_rmb();
return iobj->map; return iobj->map;
}
/* Take the lock, and re-check that another thread hasn't /* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime. * already mapped the object in the meantime.
@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast; iobj->base.memory.ptrs = &nv50_instobj_fast;
else else
iobj->base.memory.ptrs = &nv50_instobj_slow; iobj->base.memory.ptrs = &nv50_instobj_slow;
/* barrier to ensure the ptrs are written before refcount is set */
smp_wmb();
refcount_set(&iobj->maps, 1); refcount_set(&iobj->maps, 1);
} }

View File

@ -614,8 +614,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi); struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi); mipi_dsi_detach(ctx->dsi);
mipi_dsi_device_unregister(ctx->dsi);
drm_panel_remove(&ctx->panel); drm_panel_remove(&ctx->panel);
} }

View File

@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi); struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi); mipi_dsi_detach(ctx->dsi);
mipi_dsi_device_unregister(ctx->dsi);
drm_panel_remove(&ctx->panel); drm_panel_remove(&ctx->panel);
} }

View File

@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2 typedef struct _ATOM_PPLIB_STATE_V2
{ {
//number of valid dpm levels in this state; Driver uses it to calculate the whole //number of valid dpm levels in this state; Driver uses it to calculate the whole
//size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels; UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos //a index to the array of nonClockInfos
@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/** /**
* Driver will read the first ucNumDPMLevels in this array * Driver will read the first ucNumDPMLevels in this array
*/ */
UCHAR clockInfoIndex[1]; UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2; } ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{ typedef struct _StateArray{
//how many states we have //how many states we have
UCHAR ucNumEntries; UCHAR ucNumEntries;
ATOM_PPLIB_STATE_V2 states[1]; ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray; }StateArray;
@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO) //sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize; UCHAR ucEntrySize;
UCHAR clockInfo[1]; UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray; }ClockInfoArray;
typedef struct _NonClockInfoArray{ typedef struct _NonClockInfoArray{
@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO) //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize; UCHAR ucEntrySize;
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray; }NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record

View File

@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) { for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci = ATOM_CONNECTOR_INFO_I2C ci;
supported_devices->info.asConnInfo[i];
if (frev > 1)
ci = supported_devices->info_2d1.asConnInfo[i];
else
ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false; bios_connectors[i].valid = false;

View File

@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching, enum ttm_caching caching,
unsigned int order) unsigned int order)
{ {
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order]; return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86 #ifdef CONFIG_X86
switch (caching) { switch (caching) {
case ttm_write_combined: case ttm_write_combined:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32) if (pool->use_dma32)
return &global_dma32_write_combined[order]; return &global_dma32_write_combined[order];
return &global_write_combined[order]; return &global_write_combined[order];
case ttm_uncached: case ttm_uncached:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32) if (pool->use_dma32)
return &global_dma32_uncached[order]; return &global_dma32_uncached[order];
@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc; pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32; pool->use_dma32 = use_dma32;
if (use_dma_alloc || nid != NUMA_NO_NODE) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (j = 0; j < NR_PAGE_ORDERS; ++j) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) struct ttm_pool_type *pt;
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j); /* Initialize only pool types which are actually used */
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_init(pt, pool, i, j);
}
} }
} }
EXPORT_SYMBOL(ttm_pool_init); EXPORT_SYMBOL(ttm_pool_init);
@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{ {
unsigned int i, j; unsigned int i, j;
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (j = 0; j < NR_PAGE_ORDERS; ++j) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) struct ttm_pool_type *pt;
ttm_pool_type_fini(&pool->caching[i].orders[j]);
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_fini(pt);
}
} }
/* We removed the pool types from the LRU, but we need to also make sure /* We removed the pool types from the LRU, but we need to also make sure

View File

@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv; struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_BIN]; u64 runtime = local_clock() - file->start_ns[V3D_BIN];
file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
file->jobs_sent[V3D_BIN]++; file->jobs_sent[V3D_BIN]++;
v3d->queue[V3D_BIN].jobs_sent++; v3d->queue[V3D_BIN].jobs_sent++;
@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv; struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_RENDER]; u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
file->jobs_sent[V3D_RENDER]++; file->jobs_sent[V3D_RENDER]++;
v3d->queue[V3D_RENDER].jobs_sent++; v3d->queue[V3D_RENDER].jobs_sent++;
@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv; struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_CSD]; u64 runtime = local_clock() - file->start_ns[V3D_CSD];
file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
file->jobs_sent[V3D_CSD]++; file->jobs_sent[V3D_CSD]++;
v3d->queue[V3D_CSD].jobs_sent++; v3d->queue[V3D_CSD].jobs_sent++;
@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv; struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_TFU]; u64 runtime = local_clock() - file->start_ns[V3D_TFU];
file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
file->jobs_sent[V3D_TFU]++; file->jobs_sent[V3D_TFU]++;
v3d->queue[V3D_TFU].jobs_sent++; v3d->queue[V3D_TFU].jobs_sent++;

View File

@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
.no_wait_gpu = false .no_wait_gpu = false
}; };
u32 j, initial_line = dst_offset / dst_stride; u32 j, initial_line = dst_offset / dst_stride;
struct vmw_bo_blit_line_data d; struct vmw_bo_blit_line_data d = {0};
int ret = 0; int ret = 0;
struct page **dst_pages = NULL;
struct page **src_pages = NULL;
/* Buffer objects need to be either pinned or reserved: */ /* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count)) if (!(dst->pin_count))
@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret; return ret;
} }
if (!src->ttm->pages && src->ttm->sg) {
src_pages = kvmalloc_array(src->ttm->num_pages,
sizeof(struct page *), GFP_KERNEL);
if (!src_pages)
return -ENOMEM;
ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
src->ttm->num_pages);
if (ret)
goto out;
}
if (!dst->ttm->pages && dst->ttm->sg) {
dst_pages = kvmalloc_array(dst->ttm->num_pages,
sizeof(struct page *), GFP_KERNEL);
if (!dst_pages) {
ret = -ENOMEM;
goto out;
}
ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
dst->ttm->num_pages);
if (ret)
goto out;
}
d.mapped_dst = 0; d.mapped_dst = 0;
d.mapped_src = 0; d.mapped_src = 0;
d.dst_addr = NULL; d.dst_addr = NULL;
d.src_addr = NULL; d.src_addr = NULL;
d.dst_pages = dst->ttm->pages; d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
d.src_pages = src->ttm->pages; d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size); d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size); d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL); d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@ -504,6 +529,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
kunmap_atomic(d.src_addr); kunmap_atomic(d.src_addr);
if (d.dst_addr) if (d.dst_addr)
kunmap_atomic(d.dst_addr); kunmap_atomic(d.dst_addr);
if (src_pages)
kvfree(src_pages);
if (dst_pages)
kvfree(dst_pages);
return ret; return ret;
} }

View File

@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{ {
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel, .interruptible = params->bo_type != ttm_bo_type_kernel,
.no_wait_gpu = false .no_wait_gpu = false,
.resv = params->resv,
}; };
struct ttm_device *bdev = &dev_priv->bdev; struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm; struct drm_device *vdev = &dev_priv->drm;
@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain); vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type, ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
&vmw_bo->placement, 0, &ctx, NULL, &vmw_bo->placement, 0, &ctx,
NULL, destroy); params->sg, params->resv, destroy);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;

View File

@ -55,6 +55,8 @@ struct vmw_bo_params {
enum ttm_bo_type bo_type; enum ttm_bo_type bo_type;
size_t size; size_t size;
bool pin; bool pin;
struct dma_resv *resv;
struct sg_table *sg;
}; };
/** /**

View File

@ -1628,6 +1628,7 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle, .prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd, .prime_handle_to_fd = vmw_prime_handle_to_fd,
.gem_prime_import_sg_table = vmw_prime_import_sg_table,
.fops = &vmwgfx_driver_fops, .fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME, .name = VMWGFX_DRIVER_NAME,

View File

@ -1130,6 +1130,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, uint32_t handle, uint32_t flags,
int *prime_fd); int *prime_fd);
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *table);
/* /*
* MemoryOBject management - vmwgfx_mob.c * MemoryOBject management - vmwgfx_mob.c

View File

@ -149,6 +149,38 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
return ret; return ret;
} }
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *table)
{
int ret;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_gem_object *gem = NULL;
struct vmw_bo *vbo;
struct vmw_bo_params params = {
.domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_sg,
.size = attach->dmabuf->size,
.pin = false,
.resv = attach->dmabuf->resv,
.sg = table,
};
dma_resv_lock(params.resv, NULL);
ret = vmw_bo_create(dev_priv, &params, &vbo);
if (ret != 0)
goto out_no_bo;
vbo->tbo.base.funcs = &vmw_gem_object_funcs;
gem = &vbo->tbo.base;
out_no_bo:
dma_resv_unlock(params.resv);
return gem;
}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)

View File

@ -933,6 +933,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc); crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@ -940,9 +941,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask & bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary); drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */ /*
if (has_primary != new_state->enable) * This is fine in general, but broken userspace might expect
return -EINVAL; * some actual rendering so give a clue as why it's blank.
*/
if (new_state->enable && !has_primary)
drm_dbg_driver(&vmw->drm,
"CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask && if (new_state->connector_mask != connector_mask &&

View File

@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = { static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
}; };
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = { static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {

View File

@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle) int fd, u32 *handle)
{ {
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
return ttm_prime_fd_to_handle(tfile, fd, handle); if (ret)
ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
return ret;
} }
int vmw_prime_handle_to_fd(struct drm_device *dev, int vmw_prime_handle_to_fd(struct drm_device *dev,
@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd) int *prime_fd)
{ {
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); int ret;
if (handle > VMWGFX_NUM_MOB)
ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
else
ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
return ret;
} }

View File

@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) { switch (dev_priv->map_mode) {
case vmw_dma_map_bind: case vmw_dma_map_bind:
case vmw_dma_map_populate: case vmw_dma_map_populate:
vsgt->sgt = &vmw_tt->sgt; if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
ret = sg_alloc_table_from_pages_segment( vsgt->sgt = vmw_tt->dma_ttm.sg;
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, } else {
(unsigned long)vsgt->num_pages << PAGE_SHIFT, vsgt->sgt = &vmw_tt->sgt;
dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL); ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
if (ret) vsgt->pages, vsgt->num_pages, 0,
goto out_sg_alloc_fail; (unsigned long)vsgt->num_pages << PAGE_SHIFT,
dma_get_max_seg_size(dev_priv->drm.dev),
GFP_KERNEL);
if (ret)
goto out_sg_alloc_fail;
}
ret = vmw_ttm_map_for_dma(vmw_tt); ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0; return 0;
out_map_fail: out_map_fail:
sg_free_table(vmw_tt->vsgt.sgt); drm_warn(&dev_priv->drm, "VSG table map failed!");
vmw_tt->vsgt.sgt = NULL; sg_free_table(vsgt->sgt);
vsgt->sgt = NULL;
out_sg_alloc_fail: out_sg_alloc_fail:
return ret; return ret;
} }
@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev, static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
int ret; bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
/* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm)) if (ttm_tt_is_populated(ttm))
return 0; return 0;
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); if (external && ttm->sg)
return drm_prime_sg_to_dma_addr_array(ttm->sg,
ttm->dma_address,
ttm->num_pages);
return ret; return ttm_pool_alloc(&bdev->pool, ttm, ctx);
} }
static void vmw_ttm_unpopulate(struct ttm_device *bdev, static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{ {
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm); dma_ttm);
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
if (external)
return;
vmw_ttm_unbind(bdev, ttm); vmw_ttm_unbind(bdev, ttm);
@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{ {
struct vmw_ttm_tt *vmw_be; struct vmw_ttm_tt *vmw_be;
int ret; int ret;
bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be) if (!vmw_be)
@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev); vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL; vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) if (external)
page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags, ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached); ttm_cached);
else else

View File

@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL); ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
if (ret) if (ret)
return ret; goto err;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) { if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
/* /*
@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
*/ */
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) { if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
ttm_bo_unreserve(&bo->ttm); ttm_bo_unreserve(&bo->ttm);
return -EINVAL; ret = -EINVAL;
goto err;
} }
bo->flags |= XE_BO_SCANOUT_BIT; bo->flags |= XE_BO_SCANOUT_BIT;
} }
ttm_bo_unreserve(&bo->ttm); ttm_bo_unreserve(&bo->ttm);
return 0;
err:
xe_bo_put(bo);
return ret; return ret;
} }

View File

@ -1577,6 +1577,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe->usm.num_vm_in_fault_mode--; xe->usm.num_vm_in_fault_mode--;
else if (!(vm->flags & XE_VM_FLAG_MIGRATION)) else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe->usm.num_vm_in_non_fault_mode--; xe->usm.num_vm_in_non_fault_mode--;
if (vm->usm.asid) {
void *lookup;
xe_assert(xe, xe->info.has_asid);
xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
xe_assert(xe, lookup == vm);
}
mutex_unlock(&xe->usm.lock); mutex_unlock(&xe->usm.lock);
for_each_tile(tile, xe, id) for_each_tile(tile, xe, id)
@ -1592,24 +1602,15 @@ static void vm_destroy_work_func(struct work_struct *w)
struct xe_device *xe = vm->xe; struct xe_device *xe = vm->xe;
struct xe_tile *tile; struct xe_tile *tile;
u8 id; u8 id;
void *lookup;
/* xe_vm_close_and_put was not called? */ /* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size); xe_assert(xe, !vm->size);
mutex_destroy(&vm->snap_mutex); mutex_destroy(&vm->snap_mutex);
if (!(vm->flags & XE_VM_FLAG_MIGRATION)) { if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_put(xe); xe_device_mem_access_put(xe);
if (xe->info.has_asid && vm->usm.asid) {
mutex_lock(&xe->usm.lock);
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
xe_assert(xe, lookup == vm);
mutex_unlock(&xe->usm.lock);
}
}
for_each_tile(tile, xe, id) for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]); XE_WARN_ON(vm->pt_root[id]);

View File

@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
} }
break; break;
case REPORT_TYPE_MOUSE: case REPORT_TYPE_MOUSE:
workitem->reports_supported |= STD_MOUSE | HIDPP; workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
if (djrcv_dev->type == recvr_type_mouse_only)
workitem->reports_supported |= MULTIMEDIA;
break; break;
} }
} }

View File

@ -944,9 +944,11 @@ static void mcp2221_hid_unregister(void *ptr)
/* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */ /* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
static void mcp2221_remove(struct hid_device *hdev) static void mcp2221_remove(struct hid_device *hdev)
{ {
#if IS_REACHABLE(CONFIG_IIO)
struct mcp2221 *mcp = hid_get_drvdata(hdev); struct mcp2221 *mcp = hid_get_drvdata(hdev);
cancel_delayed_work_sync(&mcp->init_work); cancel_delayed_work_sync(&mcp->init_work);
#endif
} }
#if IS_REACHABLE(CONFIG_IIO) #if IS_REACHABLE(CONFIG_IIO)

View File

@ -481,10 +481,10 @@ static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
{ BTN_TR, JC_BTN_R, }, { BTN_TR, JC_BTN_R, },
{ BTN_TR2, JC_BTN_LSTICK, }, /* ZR */ { BTN_TR2, JC_BTN_LSTICK, }, /* ZR */
{ BTN_START, JC_BTN_PLUS, }, { BTN_START, JC_BTN_PLUS, },
{ BTN_FORWARD, JC_BTN_Y, }, /* C UP */ { BTN_SELECT, JC_BTN_Y, }, /* C UP */
{ BTN_BACK, JC_BTN_ZR, }, /* C DOWN */ { BTN_X, JC_BTN_ZR, }, /* C DOWN */
{ BTN_LEFT, JC_BTN_X, }, /* C LEFT */ { BTN_Y, JC_BTN_X, }, /* C LEFT */
{ BTN_RIGHT, JC_BTN_MINUS, }, /* C RIGHT */ { BTN_C, JC_BTN_MINUS, }, /* C RIGHT */
{ BTN_MODE, JC_BTN_HOME, }, { BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, }, { BTN_Z, JC_BTN_CAP, },
{ /* sentinel */ }, { /* sentinel */ },

View File

@ -64,7 +64,6 @@
/* flags */ /* flags */
#define I2C_HID_STARTED 0 #define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1 #define I2C_HID_RESET_PENDING 1
#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00 #define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01 #define I2C_HID_PWR_SLEEP 0x01
@ -190,15 +189,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
msgs[n].len = recv_len; msgs[n].len = recv_len;
msgs[n].buf = recv_buf; msgs[n].buf = recv_buf;
n++; n++;
set_bit(I2C_HID_READ_PENDING, &ihid->flags);
} }
ret = i2c_transfer(client->adapter, msgs, n); ret = i2c_transfer(client->adapter, msgs, n);
if (recv_len)
clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
if (ret != n) if (ret != n)
return ret < 0 ? ret : -EIO; return ret < 0 ? ret : -EIO;
@ -556,9 +550,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{ {
struct i2c_hid *ihid = dev_id; struct i2c_hid *ihid = dev_id;
if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
return IRQ_HANDLED;
i2c_hid_get_input(ihid); i2c_hid_get_input(ihid);
return IRQ_HANDLED; return IRQ_HANDLED;
@ -735,12 +726,15 @@ static int i2c_hid_parse(struct hid_device *hid)
mutex_lock(&ihid->reset_lock); mutex_lock(&ihid->reset_lock);
do { do {
ret = i2c_hid_start_hwreset(ihid); ret = i2c_hid_start_hwreset(ihid);
if (ret) if (ret == 0)
ret = i2c_hid_finish_hwreset(ihid);
else
msleep(1000); msleep(1000);
} while (tries-- > 0 && ret); } while (tries-- > 0 && ret);
mutex_unlock(&ihid->reset_lock);
if (ret) if (ret)
goto abort_reset; return ret;
use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name, use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
&rsize); &rsize);
@ -750,11 +744,8 @@ static int i2c_hid_parse(struct hid_device *hid)
i2c_hid_dbg(ihid, "Using a HID report descriptor override\n"); i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
} else { } else {
rdesc = kzalloc(rsize, GFP_KERNEL); rdesc = kzalloc(rsize, GFP_KERNEL);
if (!rdesc)
if (!rdesc) { return -ENOMEM;
ret = -ENOMEM;
goto abort_reset;
}
i2c_hid_dbg(ihid, "asking HID report descriptor\n"); i2c_hid_dbg(ihid, "asking HID report descriptor\n");
@ -763,23 +754,10 @@ static int i2c_hid_parse(struct hid_device *hid)
rdesc, rsize); rdesc, rsize);
if (ret) { if (ret) {
hid_err(hid, "reading report descriptor failed\n"); hid_err(hid, "reading report descriptor failed\n");
goto abort_reset; goto out;
} }
} }
/*
* Windows directly reads the report-descriptor after sending reset
* and then waits for resets completion afterwards. Some touchpads
* actually wait for the report-descriptor to be read before signalling
* reset completion.
*/
ret = i2c_hid_finish_hwreset(ihid);
abort_reset:
clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
mutex_unlock(&ihid->reset_lock);
if (ret)
goto out;
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc); i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize); ret = hid_parse_report(hid, rdesc, rsize);

View File

@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev) if (!dev)
return NULL; return NULL;
dev->devc = &pdev->dev;
ishtp_device_init(dev); ishtp_device_init(dev);
init_waitqueue_head(&dev->wait_hw_ready); init_waitqueue_head(&dev->wait_hw_ready);
@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
} }
dev->ops = &ish_hw_ops; dev->ops = &ish_hw_ops;
dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr); dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
return dev; return dev;
} }

View File

@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
} }
} }
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id) static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
enum ib_cm_state old_state)
{ {
struct cm_id_private *cm_id_priv; struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id); cm_id_priv = container_of(cm_id, struct cm_id_private, id);
pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__, pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount)); cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
} }
static void cm_destroy_id(struct ib_cm_id *cm_id, int err) static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{ {
struct cm_id_private *cm_id_priv; struct cm_id_private *cm_id_priv;
enum ib_cm_state old_state;
struct cm_work *work; struct cm_work *work;
int ret; int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id); cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock); spin_lock_irq(&cm_id_priv->lock);
old_state = cm_id->state;
retest: retest:
switch (cm_id->state) { switch (cm_id->state) {
case IB_CM_LISTEN: case IB_CM_LISTEN:
@ -1151,7 +1154,7 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
msecs_to_jiffies( msecs_to_jiffies(
CM_DESTROY_ID_WAIT_TIMEOUT)); CM_DESTROY_ID_WAIT_TIMEOUT));
if (!ret) /* timeout happened */ if (!ret) /* timeout happened */
cm_destroy_id_wait_timeout(cm_id); cm_destroy_id_wait_timeout(cm_id, old_state);
} while (!ret); } while (!ret);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL) while ((work = cm_dequeue_work(cm_id_priv)) != NULL)

View File

@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev; mdev = dev->mdev;
mdev_port_num = 1; mdev_port_num = 1;
} }
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) { if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
!mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */ /* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev; mdev = dev->mdev;
mdev_port_num = 1; mdev_port_num = 1;

View File

@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm) if (rxe->tfm)
crypto_free_shash(rxe->tfm); crypto_free_shash(rxe->tfm);
mutex_destroy(&rxe->usdev_lock);
} }
/* initialize rxe device parameters */ /* initialize rxe device parameters */

View File

@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
path->num_nodes = num_nodes; path->num_nodes = num_nodes;
mutex_lock(&icc_bw_lock);
for (i = num_nodes - 1; i >= 0; i--) { for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++; node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list); hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
node = node->reverse; node = node->reverse;
} }
mutex_unlock(&icc_bw_lock);
return path; return path;
} }
@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
pr_err("%s: error (%d)\n", __func__, ret); pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock); mutex_lock(&icc_lock);
mutex_lock(&icc_bw_lock);
for (i = 0; i < path->num_nodes; i++) { for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node; node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node); hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users)) if (!WARN_ON(!node->provider->users))
node->provider->users--; node->provider->users--;
} }
mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock); mutex_unlock(&icc_lock);
kfree_const(path->name); kfree_const(path->name);

View File

@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = {
.links = { X1E80100_SLAVE_A2NOC_SNOC }, .links = { X1E80100_SLAVE_A2NOC_SNOC },
}; };
static struct qcom_icc_node ddr_perf_mode_master = {
.name = "ddr_perf_mode_master",
.id = X1E80100_MASTER_DDR_PERF_MODE,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { X1E80100_SLAVE_DDR_PERF_MODE },
};
static struct qcom_icc_node qup0_core_master = { static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master", .name = "qup0_core_master",
.id = X1E80100_MASTER_QUP_CORE_0, .id = X1E80100_MASTER_QUP_CORE_0,
@ -688,14 +679,6 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.links = { X1E80100_MASTER_A2NOC_SNOC }, .links = { X1E80100_MASTER_A2NOC_SNOC },
}; };
static struct qcom_icc_node ddr_perf_mode_slave = {
.name = "ddr_perf_mode_slave",
.id = X1E80100_SLAVE_DDR_PERF_MODE,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup0_core_slave = { static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave", .name = "qup0_core_slave",
.id = X1E80100_SLAVE_QUP_CORE_0, .id = X1E80100_SLAVE_QUP_CORE_0,
@ -1377,12 +1360,6 @@ static struct qcom_icc_bcm bcm_acv = {
.nodes = { &ebi }, .nodes = { &ebi },
}; };
static struct qcom_icc_bcm bcm_acv_perf = {
.name = "ACV_PERF",
.num_nodes = 1,
.nodes = { &ddr_perf_mode_slave },
};
static struct qcom_icc_bcm bcm_ce0 = { static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0", .name = "CE0",
.num_nodes = 1, .num_nodes = 1,
@ -1583,18 +1560,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = {
}; };
static struct qcom_icc_bcm * const clk_virt_bcms[] = { static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_acv_perf,
&bcm_qup0, &bcm_qup0,
&bcm_qup1, &bcm_qup1,
&bcm_qup2, &bcm_qup2,
}; };
static struct qcom_icc_node * const clk_virt_nodes[] = { static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
[MASTER_QUP_CORE_0] = &qup0_core_master, [MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master, [MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master, [MASTER_QUP_CORE_2] = &qup2_core_master,
[SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave, [SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave, [SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave, [SLAVE_QUP_CORE_2] = &qup2_core_slave,

View File

@ -37,6 +37,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
depends on FAULT_INJECTION depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU depends on RUNTIME_TESTING_MENU
select IOMMUFD_DRIVER
default n default n
help help
This is dangerous, do not enable unless running This is dangerous, do not enable unless running

View File

@ -1002,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
} else { } else {
pcr->card_removed |= SD_EXIST; pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST; pcr->card_inserted &= ~SD_EXIST;
if (PCI_PID(pcr) == PID_5261) { if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS, rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0); RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS; pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;

View File

@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},

View File

@ -400,25 +400,40 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev) static int mei_vsc_suspend(struct device *dev)
{ {
struct mei_device *mei_dev = dev_get_drvdata(dev); struct mei_device *mei_dev = dev_get_drvdata(dev);
struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
mei_stop(mei_dev); mei_stop(mei_dev);
mei_disable_interrupts(mei_dev);
vsc_tp_free_irq(hw->tp);
return 0; return 0;
} }
static int mei_vsc_resume(struct device *dev) static int mei_vsc_resume(struct device *dev)
{ {
struct mei_device *mei_dev = dev_get_drvdata(dev); struct mei_device *mei_dev = dev_get_drvdata(dev);
struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
int ret; int ret;
ret = vsc_tp_request_irq(hw->tp);
if (ret)
return ret;
ret = mei_restart(mei_dev); ret = mei_restart(mei_dev);
if (ret) if (ret)
return ret; goto err_free;
/* start timer if stopped in suspend */ /* start timer if stopped in suspend */
schedule_delayed_work(&mei_dev->timer_work, HZ); schedule_delayed_work(&mei_dev->timer_work, HZ);
return 0; return 0;
err_free:
vsc_tp_free_irq(hw->tp);
return ret;
} }
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume); static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);

View File

@ -94,6 +94,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
{} {}
}; };
static irqreturn_t vsc_tp_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
atomic_inc(&tp->assert_cnt);
wake_up(&tp->xfer_wait);
return IRQ_WAKE_THREAD;
}
static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
if (tp->event_notify)
tp->event_notify(tp->event_notify_context);
return IRQ_HANDLED;
}
/* wakeup firmware and wait for response */ /* wakeup firmware and wait for response */
static int vsc_tp_wakeup_request(struct vsc_tp *tp) static int vsc_tp_wakeup_request(struct vsc_tp *tp)
{ {
@ -383,6 +404,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
} }
EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP); EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
/**
* vsc_tp_request_irq - request irq for vsc_tp device
* @tp: vsc_tp device handle
*/
int vsc_tp_request_irq(struct vsc_tp *tp)
{
struct spi_device *spi = tp->spi;
struct device *dev = &spi->dev;
int ret;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), tp);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
/**
* vsc_tp_free_irq - free irq for vsc_tp device
* @tp: vsc_tp device handle
*/
void vsc_tp_free_irq(struct vsc_tp *tp)
{
free_irq(tp->spi->irq, tp);
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
/** /**
* vsc_tp_intr_synchronize - synchronize vsc_tp interrupt * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
* @tp: vsc_tp device handle * @tp: vsc_tp device handle
@ -413,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp)
} }
EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP); EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
static irqreturn_t vsc_tp_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
atomic_inc(&tp->assert_cnt);
return IRQ_WAKE_THREAD;
}
static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
wake_up(&tp->xfer_wait);
if (tp->event_notify)
tp->event_notify(tp->event_notify_context);
return IRQ_HANDLED;
}
static int vsc_tp_match_any(struct acpi_device *adev, void *data) static int vsc_tp_match_any(struct acpi_device *adev, void *data)
{ {
struct acpi_device **__adev = data; struct acpi_device **__adev = data;
@ -490,10 +521,9 @@ static int vsc_tp_probe(struct spi_device *spi)
tp->spi = spi; tp->spi = spi;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY); irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr, ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
vsc_tp_thread_isr, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(dev), tp);
dev_name(dev), tp);
if (ret) if (ret)
return ret; return ret;
@ -522,6 +552,8 @@ static int vsc_tp_probe(struct spi_device *spi)
err_destroy_lock: err_destroy_lock:
mutex_destroy(&tp->mutex); mutex_destroy(&tp->mutex);
free_irq(spi->irq, tp);
return ret; return ret;
} }
@ -532,6 +564,8 @@ static void vsc_tp_remove(struct spi_device *spi)
platform_device_unregister(tp->pdev); platform_device_unregister(tp->pdev);
mutex_destroy(&tp->mutex); mutex_destroy(&tp->mutex);
free_irq(spi->irq, tp);
} }
static const struct acpi_device_id vsc_tp_acpi_ids[] = { static const struct acpi_device_id vsc_tp_acpi_ids[] = {

View File

@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb, int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
void *context); void *context);
int vsc_tp_request_irq(struct vsc_tp *tp);
void vsc_tp_free_irq(struct vsc_tp *tp);
void vsc_tp_intr_enable(struct vsc_tp *tp); void vsc_tp_intr_enable(struct vsc_tp *tp);
void vsc_tp_intr_disable(struct vsc_tp *tp); void vsc_tp_intr_disable(struct vsc_tp *tp);
void vsc_tp_intr_synchronize(struct vsc_tp *tp); void vsc_tp_intr_synchronize(struct vsc_tp *tp);

View File

@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
phy_interface_set_rgmii(supported); phy_interface_set_rgmii(supported);
} }
static void
mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err) {
dev_err(chip->dev, "p%d: failed to read port status\n", port);
return;
}
switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
__set_bit(PHY_INTERFACE_MODE_REVMII, supported);
break;
case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
__set_bit(PHY_INTERFACE_MODE_MII, supported);
break;
case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
__set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
break;
case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
__set_bit(PHY_INTERFACE_MODE_RMII, supported);
break;
case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
__set_bit(PHY_INTERFACE_MODE_RGMII, supported);
break;
default:
dev_err(chip->dev,
"p%d: invalid port mode in status register: %04x\n",
port, reg);
}
}
static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config) struct phylink_config *config)
{ {
unsigned long *supported = config->supported_interfaces; if (!mv88e6xxx_phy_is_internal(chip, port))
mv88e6250_setup_supported_interfaces(chip, port, config);
/* Translate the default cmode */
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100; config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
} }

View File

@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900 #define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00 #define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00 #define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00 /* - Modes with PHY suffix use output instead of input clock
#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00 * - Modes without RMII or RGMII use MII
#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00 * - Modes without speed do not have a fixed speed specified in the manual
#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00 * ("DC to x MHz" - variable clock support?)
*/
#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800 #define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400 #define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300 #define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300

View File

@ -436,10 +436,8 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ); umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
} }
static int bcmasp_tx_poll(struct napi_struct *napi, int budget) static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{ {
struct bcmasp_intf *intf =
container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64; struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev; struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0; unsigned long read, released = 0;
@ -482,10 +480,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT); DESC_RING_COUNT);
} }
/* Ensure all descriptors have been written to DRAM for the hardware return released;
* to see updated contents. }
*/
wmb(); static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
{
struct bcmasp_intf *intf =
container_of(napi, struct bcmasp_intf, tx_napi);
int released = 0;
released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi); napi_complete(&intf->tx_napi);
@ -797,6 +801,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_dma_read = intf->tx_spb_dma_addr; intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
intf->tx_spb_index = 0; intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0; intf->tx_spb_clean_index = 0;
memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
/* Make sure channels are disabled */ /* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
@ -885,6 +890,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0); } while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL); tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
bcmasp_tx_reclaim(intf);
umac_enable_set(intf, UMC_CMD_TX_EN, 0); umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev); phy_stop(dev->phydev);

Some files were not shown because too many files have changed in this diff Show More