mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: net/sched/act_ct.c26488172b0
("net/sched: Fix UAF when resolving a clash")3abbd7ed8b
("act_ct: prepare for stolen verdict coming from conntrack and nat engine") No adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
7c8267275d
1
.mailmap
1
.mailmap
@ -384,6 +384,7 @@ Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
||||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||
Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
|
||||
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
|
||||
Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com>
|
||||
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
||||
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
|
||||
Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org>
|
||||
|
4
CREDITS
4
CREDITS
@ -3150,9 +3150,11 @@ S: Triftstra=DFe 55
|
||||
S: 13353 Berlin
|
||||
S: Germany
|
||||
|
||||
N: Gustavo Pimental
|
||||
N: Gustavo Pimentel
|
||||
E: gustavo.pimentel@synopsys.com
|
||||
D: PCI driver for Synopsys DesignWare
|
||||
D: Synopsys DesignWare eDMA driver
|
||||
D: Synopsys DesignWare xData traffic generator
|
||||
|
||||
N: Emanuel Pirker
|
||||
E: epirker@edu.uni-klu.ac.at
|
||||
|
@ -62,10 +62,10 @@ cmodx.c::
|
||||
printf("Value before cmodx: %d\n", value);
|
||||
|
||||
// Call prctl before first fence.i is called inside modify_instruction
|
||||
prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_ON, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS);
|
||||
prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_ON, PR_RISCV_SCOPE_PER_PROCESS);
|
||||
modify_instruction();
|
||||
// Call prctl after final fence.i is called in process
|
||||
prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_OFF, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS);
|
||||
prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_OFF, PR_RISCV_SCOPE_PER_PROCESS);
|
||||
|
||||
value = get_value();
|
||||
printf("Value after cmodx: %d\n", value);
|
||||
|
@ -49,7 +49,7 @@ example usage
|
||||
$ devlink region show [ DEV/REGION ]
|
||||
$ devlink region del DEV/REGION snapshot SNAPSHOT_ID
|
||||
$ devlink region dump DEV/REGION [ snapshot SNAPSHOT_ID ]
|
||||
$ devlink region read DEV/REGION [ snapshot SNAPSHOT_ID ] address ADDRESS length length
|
||||
$ devlink region read DEV/REGION [ snapshot SNAPSHOT_ID ] address ADDRESS length LENGTH
|
||||
|
||||
# Show all of the exposed regions with region sizes:
|
||||
$ devlink region show
|
||||
|
@ -6239,9 +6239,8 @@ S: Maintained
|
||||
F: drivers/usb/dwc3/
|
||||
|
||||
DESIGNWARE XDATA IP DRIVER
|
||||
M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: Documentation/misc-devices/dw-xdata-pcie.rst
|
||||
F: drivers/misc/dw-xdata-pcie.c
|
||||
|
||||
@ -14475,7 +14474,7 @@ MEMORY MAPPING
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Lorenzo Stoakes <lstoakes@gmail.com>
|
||||
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -849,6 +849,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
|
||||
{
|
||||
struct eeh_dev *edev;
|
||||
struct pci_dev *pdev;
|
||||
struct pci_bus *bus = NULL;
|
||||
|
||||
if (pe->type & EEH_PE_PHB)
|
||||
return pe->phb->bus;
|
||||
@ -859,9 +860,11 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
|
||||
|
||||
/* Retrieve the parent PCI bus of first (top) PCI device */
|
||||
edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
|
||||
pci_lock_rescan_remove();
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (pdev)
|
||||
return pdev->bus;
|
||||
bus = pdev->bus;
|
||||
pci_unlock_rescan_remove();
|
||||
|
||||
return NULL;
|
||||
return bus;
|
||||
}
|
||||
|
@ -647,8 +647,9 @@ __after_prom_start:
|
||||
* Note: This process overwrites the OF exception vectors.
|
||||
*/
|
||||
LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
|
||||
mr. r4,r26 /* In some cases the loader may */
|
||||
beq 9f /* have already put us at zero */
|
||||
mr r4,r26 /* Load the virtual source address into r4 */
|
||||
cmpld r3,r4 /* Check if source == dest */
|
||||
beq 9f /* If so skip the copy */
|
||||
li r6,0x100 /* Start offset, the first 0x100 */
|
||||
/* bytes were copied earlier. */
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <asm/paca.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/sections.h> /* _end */
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/svm.h>
|
||||
@ -317,6 +318,16 @@ void default_machine_kexec(struct kimage *image)
|
||||
if (!kdump_in_progress())
|
||||
kexec_prepare_cpus();
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
/*
|
||||
* This must be done after other CPUs have shut down, otherwise they
|
||||
* could execute the 'scv' instruction, which is not supported with
|
||||
* reloc disabled (see configure_exceptions()).
|
||||
*/
|
||||
if (firmware_has_feature(FW_FEATURE_SET_MODE))
|
||||
pseries_disable_reloc_on_exc();
|
||||
#endif
|
||||
|
||||
printk("kexec: Starting switchover sequence.\n");
|
||||
|
||||
/* switch to a staticly allocated stack. Based on irq stack code.
|
||||
|
@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
|
||||
} else
|
||||
xics_kexec_teardown_cpu(secondary);
|
||||
}
|
||||
|
||||
void pseries_machine_kexec(struct kimage *image)
|
||||
{
|
||||
if (firmware_has_feature(FW_FEATURE_SET_MODE))
|
||||
pseries_disable_reloc_on_exc();
|
||||
|
||||
default_machine_kexec(image);
|
||||
}
|
||||
|
@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { }
|
||||
#endif
|
||||
|
||||
extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
|
||||
void pseries_machine_kexec(struct kimage *image);
|
||||
|
||||
extern void pSeries_final_fixup(void);
|
||||
|
||||
|
@ -343,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void)
|
||||
{
|
||||
void (*ctor)(void *) = get_dtl_cache_ctor();
|
||||
|
||||
dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
|
||||
DISPATCH_LOG_BYTES, 0, ctor);
|
||||
dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES,
|
||||
DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor);
|
||||
if (!dtl_cache) {
|
||||
pr_warn("Failed to create dispatch trace log buffer cache\n");
|
||||
pr_warn("Stolen time statistics will be unreliable\n");
|
||||
@ -1159,7 +1159,6 @@ define_machine(pseries) {
|
||||
.machine_check_exception = pSeries_machine_check_exception,
|
||||
.machine_check_log_err = pSeries_machine_check_log_err,
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.machine_kexec = pseries_machine_kexec,
|
||||
.kexec_cpu_down = pseries_kexec_cpu_down,
|
||||
#endif
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
|
@ -121,20 +121,12 @@ static void machine_kexec_mask_interrupts(void)
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
struct irq_chip *chip;
|
||||
int ret;
|
||||
|
||||
chip = irq_desc_get_chip(desc);
|
||||
if (!chip)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* First try to remove the active state. If this
|
||||
* fails, try to EOI the interrupt.
|
||||
*/
|
||||
ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
|
||||
|
||||
if (ret && irqd_irq_inprogress(&desc->irq_data) &&
|
||||
chip->irq_eoi)
|
||||
if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
|
||||
if (chip->irq_mask)
|
||||
|
@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
bool (*fn)(void *, unsigned long), void *arg)
|
||||
{
|
||||
unsigned long fp, sp, pc;
|
||||
int graph_idx = 0;
|
||||
int level = 0;
|
||||
|
||||
if (regs) {
|
||||
@ -68,7 +69,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
pc = regs->ra;
|
||||
} else {
|
||||
fp = frame->fp;
|
||||
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
|
||||
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
|
||||
&frame->ra);
|
||||
if (pc == (unsigned long)ret_from_exception) {
|
||||
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
|
||||
|
@ -327,7 +327,7 @@ static long kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_att
|
||||
|
||||
event = perf_event_create_kernel_counter(attr, -1, current, kvm_riscv_pmu_overflow, pmc);
|
||||
if (IS_ERR(event)) {
|
||||
pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
|
||||
pr_debug("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
|
||||
return PTR_ERR(event);
|
||||
}
|
||||
|
||||
|
@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
|
||||
u64 instruction_io_other;
|
||||
u64 instruction_lpsw;
|
||||
u64 instruction_lpswe;
|
||||
u64 instruction_lpswey;
|
||||
u64 instruction_pfmf;
|
||||
u64 instruction_ptff;
|
||||
u64 instruction_sck;
|
||||
|
@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
|
||||
STATS_DESC_COUNTER(VCPU, instruction_io_other),
|
||||
STATS_DESC_COUNTER(VCPU, instruction_lpsw),
|
||||
STATS_DESC_COUNTER(VCPU, instruction_lpswe),
|
||||
STATS_DESC_COUNTER(VCPU, instruction_lpswey),
|
||||
STATS_DESC_COUNTER(VCPU, instruction_pfmf),
|
||||
STATS_DESC_COUNTER(VCPU, instruction_ptff),
|
||||
STATS_DESC_COUNTER(VCPU, instruction_sck),
|
||||
|
@ -138,6 +138,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
|
||||
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
|
||||
{
|
||||
u32 base1 = vcpu->arch.sie_block->ipb >> 28;
|
||||
s64 disp1;
|
||||
|
||||
/* The displacement is a 20bit _SIGNED_ value */
|
||||
disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
|
||||
((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
|
||||
|
||||
if (ar)
|
||||
*ar = base1;
|
||||
|
||||
return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
|
||||
}
|
||||
|
||||
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
|
||||
u64 *address1, u64 *address2,
|
||||
u8 *ar_b1, u8 *ar_b2)
|
||||
|
@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_lpswey(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
psw_t new_psw;
|
||||
u64 addr;
|
||||
int rc;
|
||||
u8 ar;
|
||||
|
||||
vcpu->stat.instruction_lpswey++;
|
||||
|
||||
if (!test_kvm_facility(vcpu->kvm, 193))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
|
||||
if (addr & 7)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
vcpu->arch.sie_block->gpsw = new_psw;
|
||||
if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_stidp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 stidp_data = vcpu->kvm->arch.model.cpuid;
|
||||
@ -1462,6 +1492,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
|
||||
case 0x61:
|
||||
case 0x62:
|
||||
return handle_ri(vcpu);
|
||||
case 0x71:
|
||||
return handle_lpswey(vcpu);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -55,6 +55,8 @@ unsigned long *crst_table_alloc(struct mm_struct *mm)
|
||||
|
||||
void crst_table_free(struct mm_struct *mm, unsigned long *table)
|
||||
{
|
||||
if (!table)
|
||||
return;
|
||||
pagetable_free(virt_to_ptdesc(table));
|
||||
}
|
||||
|
||||
@ -262,6 +264,8 @@ static unsigned long *base_crst_alloc(unsigned long val)
|
||||
|
||||
static void base_crst_free(unsigned long *table)
|
||||
{
|
||||
if (!table)
|
||||
return;
|
||||
pagetable_free(virt_to_ptdesc(table));
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
struct task_struct;
|
||||
|
||||
static inline struct task_struct *get_current(void)
|
||||
static __always_inline struct task_struct *get_current(void)
|
||||
{
|
||||
return current_thread_info()->task;
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ struct thread_info {
|
||||
}
|
||||
|
||||
/* how to get the thread information struct from C */
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
static __always_inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
struct thread_info *ti;
|
||||
__asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/sched.h> /* need_resched() */
|
||||
#include <linux/sort.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/cpu.h>
|
||||
@ -386,25 +385,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
|
||||
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
|
||||
}
|
||||
|
||||
static int acpi_cst_latency_cmp(const void *a, const void *b)
|
||||
static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
|
||||
{
|
||||
const struct acpi_processor_cx *x = a, *y = b;
|
||||
int i, j, k;
|
||||
|
||||
if (!(x->valid && y->valid))
|
||||
return 0;
|
||||
if (x->latency > y->latency)
|
||||
return 1;
|
||||
if (x->latency < y->latency)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
static void acpi_cst_latency_swap(void *a, void *b, int n)
|
||||
{
|
||||
struct acpi_processor_cx *x = a, *y = b;
|
||||
for (i = 1; i < length; i++) {
|
||||
if (!states[i].valid)
|
||||
continue;
|
||||
|
||||
if (!(x->valid && y->valid))
|
||||
return;
|
||||
swap(x->latency, y->latency);
|
||||
for (j = i - 1, k = i; j >= 0; j--) {
|
||||
if (!states[j].valid)
|
||||
continue;
|
||||
|
||||
if (states[j].latency > states[k].latency)
|
||||
swap(states[j].latency, states[k].latency);
|
||||
|
||||
k = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int acpi_processor_power_verify(struct acpi_processor *pr)
|
||||
@ -449,10 +447,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
|
||||
|
||||
if (buggy_latency) {
|
||||
pr_notice("FW issue: working around C-state latencies out of order\n");
|
||||
sort(&pr->power.states[1], max_cstate,
|
||||
sizeof(struct acpi_processor_cx),
|
||||
acpi_cst_latency_cmp,
|
||||
acpi_cst_latency_swap);
|
||||
acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
|
||||
}
|
||||
|
||||
lapic_timer_propagate_broadcast(pr);
|
||||
|
@ -16,8 +16,8 @@ tpm-y += eventlog/common.o
|
||||
tpm-y += eventlog/tpm1.o
|
||||
tpm-y += eventlog/tpm2.o
|
||||
tpm-y += tpm-buf.o
|
||||
tpm-y += tpm2-sessions.o
|
||||
|
||||
tpm-$(CONFIG_TCG_TPM2_HMAC) += tpm2-sessions.o
|
||||
tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
|
||||
tpm-$(CONFIG_EFI) += eventlog/efi.o
|
||||
tpm-$(CONFIG_OF) += eventlog/of.o
|
||||
|
@ -83,9 +83,6 @@
|
||||
#define AES_KEY_BYTES AES_KEYSIZE_128
|
||||
#define AES_KEY_BITS (AES_KEY_BYTES*8)
|
||||
|
||||
static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
|
||||
u32 *handle, u8 *name);
|
||||
|
||||
/*
|
||||
* This is the structure that carries all the auth information (like
|
||||
* session handle, nonces, session key and auth) from use to use it is
|
||||
@ -148,6 +145,7 @@ struct tpm2_auth {
|
||||
u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
/*
|
||||
* Name Size based on TPM algorithm (assumes no hash bigger than 255)
|
||||
*/
|
||||
@ -163,6 +161,226 @@ static u8 name_size(const u8 *name)
|
||||
return size_map[alg] + 2;
|
||||
}
|
||||
|
||||
static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
|
||||
{
|
||||
struct tpm_header *head = (struct tpm_header *)buf->data;
|
||||
off_t offset = TPM_HEADER_SIZE;
|
||||
u32 tot_len = be32_to_cpu(head->length);
|
||||
u32 val;
|
||||
|
||||
/* we're starting after the header so adjust the length */
|
||||
tot_len -= TPM_HEADER_SIZE;
|
||||
|
||||
/* skip public */
|
||||
val = tpm_buf_read_u16(buf, &offset);
|
||||
if (val > tot_len)
|
||||
return -EINVAL;
|
||||
offset += val;
|
||||
/* name */
|
||||
val = tpm_buf_read_u16(buf, &offset);
|
||||
if (val != name_size(&buf->data[offset]))
|
||||
return -EINVAL;
|
||||
memcpy(name, &buf->data[offset], val);
|
||||
/* forget the rest */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
|
||||
{
|
||||
struct tpm_buf buf;
|
||||
int rc;
|
||||
|
||||
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
tpm_buf_append_u32(&buf, handle);
|
||||
rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
|
||||
if (rc == TPM2_RC_SUCCESS)
|
||||
rc = tpm2_parse_read_public(name, &buf);
|
||||
|
||||
tpm_buf_destroy(&buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
#endif /* CONFIG_TCG_TPM2_HMAC */
|
||||
|
||||
/**
|
||||
* tpm_buf_append_name() - add a handle area to the buffer
|
||||
* @chip: the TPM chip structure
|
||||
* @buf: The buffer to be appended
|
||||
* @handle: The handle to be appended
|
||||
* @name: The name of the handle (may be NULL)
|
||||
*
|
||||
* In order to compute session HMACs, we need to know the names of the
|
||||
* objects pointed to by the handles. For most objects, this is simply
|
||||
* the actual 4 byte handle or an empty buf (in these cases @name
|
||||
* should be NULL) but for volatile objects, permanent objects and NV
|
||||
* areas, the name is defined as the hash (according to the name
|
||||
* algorithm which should be set to sha256) of the public area to
|
||||
* which the two byte algorithm id has been appended. For these
|
||||
* objects, the @name pointer should point to this. If a name is
|
||||
* required but @name is NULL, then TPM2_ReadPublic() will be called
|
||||
* on the handle to obtain the name.
|
||||
*
|
||||
* As with most tpm_buf operations, success is assumed because failure
|
||||
* will be caused by an incorrect programming model and indicated by a
|
||||
* kernel message.
|
||||
*/
|
||||
void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
|
||||
u32 handle, u8 *name)
|
||||
{
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
enum tpm2_mso_type mso = tpm2_handle_mso(handle);
|
||||
struct tpm2_auth *auth;
|
||||
int slot;
|
||||
#endif
|
||||
|
||||
if (!tpm2_chip_auth(chip)) {
|
||||
tpm_buf_append_u32(buf, handle);
|
||||
/* count the number of handles in the upper bits of flags */
|
||||
buf->handles++;
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4;
|
||||
if (slot >= AUTH_MAX_NAMES) {
|
||||
dev_err(&chip->dev, "TPM: too many handles\n");
|
||||
return;
|
||||
}
|
||||
auth = chip->auth;
|
||||
WARN(auth->session != tpm_buf_length(buf),
|
||||
"name added in wrong place\n");
|
||||
tpm_buf_append_u32(buf, handle);
|
||||
auth->session += 4;
|
||||
|
||||
if (mso == TPM2_MSO_PERSISTENT ||
|
||||
mso == TPM2_MSO_VOLATILE ||
|
||||
mso == TPM2_MSO_NVRAM) {
|
||||
if (!name)
|
||||
tpm2_read_public(chip, handle, auth->name[slot]);
|
||||
} else {
|
||||
if (name)
|
||||
dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
|
||||
}
|
||||
|
||||
auth->name_h[slot] = handle;
|
||||
if (name)
|
||||
memcpy(auth->name[slot], name, name_size(name));
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tpm_buf_append_name);
|
||||
|
||||
/**
|
||||
* tpm_buf_append_hmac_session() - Append a TPM session element
|
||||
* @chip: the TPM chip structure
|
||||
* @buf: The buffer to be appended
|
||||
* @attributes: The session attributes
|
||||
* @passphrase: The session authority (NULL if none)
|
||||
* @passphrase_len: The length of the session authority (0 if none)
|
||||
*
|
||||
* This fills in a session structure in the TPM command buffer, except
|
||||
* for the HMAC which cannot be computed until the command buffer is
|
||||
* complete. The type of session is controlled by the @attributes,
|
||||
* the main ones of which are TPM2_SA_CONTINUE_SESSION which means the
|
||||
* session won't terminate after tpm_buf_check_hmac_response(),
|
||||
* TPM2_SA_DECRYPT which means this buffers first parameter should be
|
||||
* encrypted with a session key and TPM2_SA_ENCRYPT, which means the
|
||||
* response buffer's first parameter needs to be decrypted (confusing,
|
||||
* but the defines are written from the point of view of the TPM).
|
||||
*
|
||||
* Any session appended by this command must be finalized by calling
|
||||
* tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect
|
||||
* and the TPM will reject the command.
|
||||
*
|
||||
* As with most tpm_buf operations, success is assumed because failure
|
||||
* will be caused by an incorrect programming model and indicated by a
|
||||
* kernel message.
|
||||
*/
|
||||
void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
|
||||
u8 attributes, u8 *passphrase,
|
||||
int passphrase_len)
|
||||
{
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
u8 nonce[SHA256_DIGEST_SIZE];
|
||||
struct tpm2_auth *auth;
|
||||
u32 len;
|
||||
#endif
|
||||
|
||||
if (!tpm2_chip_auth(chip)) {
|
||||
/* offset tells us where the sessions area begins */
|
||||
int offset = buf->handles * 4 + TPM_HEADER_SIZE;
|
||||
u32 len = 9 + passphrase_len;
|
||||
|
||||
if (tpm_buf_length(buf) != offset) {
|
||||
/* not the first session so update the existing length */
|
||||
len += get_unaligned_be32(&buf->data[offset]);
|
||||
put_unaligned_be32(len, &buf->data[offset]);
|
||||
} else {
|
||||
tpm_buf_append_u32(buf, len);
|
||||
}
|
||||
/* auth handle */
|
||||
tpm_buf_append_u32(buf, TPM2_RS_PW);
|
||||
/* nonce */
|
||||
tpm_buf_append_u16(buf, 0);
|
||||
/* attributes */
|
||||
tpm_buf_append_u8(buf, 0);
|
||||
/* passphrase */
|
||||
tpm_buf_append_u16(buf, passphrase_len);
|
||||
tpm_buf_append(buf, passphrase, passphrase_len);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
/*
|
||||
* The Architecture Guide requires us to strip trailing zeros
|
||||
* before computing the HMAC
|
||||
*/
|
||||
while (passphrase && passphrase_len > 0 && passphrase[passphrase_len - 1] == '\0')
|
||||
passphrase_len--;
|
||||
|
||||
auth = chip->auth;
|
||||
auth->attrs = attributes;
|
||||
auth->passphrase_len = passphrase_len;
|
||||
if (passphrase_len)
|
||||
memcpy(auth->passphrase, passphrase, passphrase_len);
|
||||
|
||||
if (auth->session != tpm_buf_length(buf)) {
|
||||
/* we're not the first session */
|
||||
len = get_unaligned_be32(&buf->data[auth->session]);
|
||||
if (4 + len + auth->session != tpm_buf_length(buf)) {
|
||||
WARN(1, "session length mismatch, cannot append");
|
||||
return;
|
||||
}
|
||||
|
||||
/* add our new session */
|
||||
len += 9 + 2 * SHA256_DIGEST_SIZE;
|
||||
put_unaligned_be32(len, &buf->data[auth->session]);
|
||||
} else {
|
||||
tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE);
|
||||
}
|
||||
|
||||
/* random number for our nonce */
|
||||
get_random_bytes(nonce, sizeof(nonce));
|
||||
memcpy(auth->our_nonce, nonce, sizeof(nonce));
|
||||
tpm_buf_append_u32(buf, auth->handle);
|
||||
/* our new nonce */
|
||||
tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
|
||||
tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
|
||||
tpm_buf_append_u8(buf, auth->attrs);
|
||||
/* and put a placeholder for the hmac */
|
||||
tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
|
||||
tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tpm_buf_append_hmac_session);
|
||||
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
|
||||
static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
|
||||
u32 *handle, u8 *name);
|
||||
|
||||
/*
|
||||
* It turns out the crypto hmac(sha256) is hard for us to consume
|
||||
* because it assumes a fixed key and the TPM seems to change the key
|
||||
@ -343,82 +561,6 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
|
||||
crypto_free_kpp(kpp);
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_buf_append_hmac_session() - Append a TPM session element
|
||||
* @chip: the TPM chip structure
|
||||
* @buf: The buffer to be appended
|
||||
* @attributes: The session attributes
|
||||
* @passphrase: The session authority (NULL if none)
|
||||
* @passphrase_len: The length of the session authority (0 if none)
|
||||
*
|
||||
* This fills in a session structure in the TPM command buffer, except
|
||||
* for the HMAC which cannot be computed until the command buffer is
|
||||
* complete. The type of session is controlled by the @attributes,
|
||||
* the main ones of which are TPM2_SA_CONTINUE_SESSION which means the
|
||||
* session won't terminate after tpm_buf_check_hmac_response(),
|
||||
* TPM2_SA_DECRYPT which means this buffers first parameter should be
|
||||
* encrypted with a session key and TPM2_SA_ENCRYPT, which means the
|
||||
* response buffer's first parameter needs to be decrypted (confusing,
|
||||
* but the defines are written from the point of view of the TPM).
|
||||
*
|
||||
* Any session appended by this command must be finalized by calling
|
||||
* tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect
|
||||
* and the TPM will reject the command.
|
||||
*
|
||||
* As with most tpm_buf operations, success is assumed because failure
|
||||
* will be caused by an incorrect programming model and indicated by a
|
||||
* kernel message.
|
||||
*/
|
||||
void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
|
||||
u8 attributes, u8 *passphrase,
|
||||
int passphrase_len)
|
||||
{
|
||||
u8 nonce[SHA256_DIGEST_SIZE];
|
||||
u32 len;
|
||||
struct tpm2_auth *auth = chip->auth;
|
||||
|
||||
/*
|
||||
* The Architecture Guide requires us to strip trailing zeros
|
||||
* before computing the HMAC
|
||||
*/
|
||||
while (passphrase && passphrase_len > 0
|
||||
&& passphrase[passphrase_len - 1] == '\0')
|
||||
passphrase_len--;
|
||||
|
||||
auth->attrs = attributes;
|
||||
auth->passphrase_len = passphrase_len;
|
||||
if (passphrase_len)
|
||||
memcpy(auth->passphrase, passphrase, passphrase_len);
|
||||
|
||||
if (auth->session != tpm_buf_length(buf)) {
|
||||
/* we're not the first session */
|
||||
len = get_unaligned_be32(&buf->data[auth->session]);
|
||||
if (4 + len + auth->session != tpm_buf_length(buf)) {
|
||||
WARN(1, "session length mismatch, cannot append");
|
||||
return;
|
||||
}
|
||||
|
||||
/* add our new session */
|
||||
len += 9 + 2 * SHA256_DIGEST_SIZE;
|
||||
put_unaligned_be32(len, &buf->data[auth->session]);
|
||||
} else {
|
||||
tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE);
|
||||
}
|
||||
|
||||
/* random number for our nonce */
|
||||
get_random_bytes(nonce, sizeof(nonce));
|
||||
memcpy(auth->our_nonce, nonce, sizeof(nonce));
|
||||
tpm_buf_append_u32(buf, auth->handle);
|
||||
/* our new nonce */
|
||||
tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
|
||||
tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
|
||||
tpm_buf_append_u8(buf, auth->attrs);
|
||||
/* and put a placeholder for the hmac */
|
||||
tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
|
||||
tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
|
||||
}
|
||||
EXPORT_SYMBOL(tpm_buf_append_hmac_session);
|
||||
|
||||
/**
|
||||
* tpm_buf_fill_hmac_session() - finalize the session HMAC
|
||||
* @chip: the TPM chip structure
|
||||
@ -449,6 +591,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
|
||||
u8 cphash[SHA256_DIGEST_SIZE];
|
||||
struct sha256_state sctx;
|
||||
|
||||
if (!auth)
|
||||
return;
|
||||
|
||||
/* save the command code in BE format */
|
||||
auth->ordinal = head->ordinal;
|
||||
|
||||
@ -567,104 +712,6 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
|
||||
}
|
||||
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
|
||||
|
||||
static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
|
||||
{
|
||||
struct tpm_header *head = (struct tpm_header *)buf->data;
|
||||
off_t offset = TPM_HEADER_SIZE;
|
||||
u32 tot_len = be32_to_cpu(head->length);
|
||||
u32 val;
|
||||
|
||||
/* we're starting after the header so adjust the length */
|
||||
tot_len -= TPM_HEADER_SIZE;
|
||||
|
||||
/* skip public */
|
||||
val = tpm_buf_read_u16(buf, &offset);
|
||||
if (val > tot_len)
|
||||
return -EINVAL;
|
||||
offset += val;
|
||||
/* name */
|
||||
val = tpm_buf_read_u16(buf, &offset);
|
||||
if (val != name_size(&buf->data[offset]))
|
||||
return -EINVAL;
|
||||
memcpy(name, &buf->data[offset], val);
|
||||
/* forget the rest */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
|
||||
{
|
||||
struct tpm_buf buf;
|
||||
int rc;
|
||||
|
||||
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
tpm_buf_append_u32(&buf, handle);
|
||||
rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
|
||||
if (rc == TPM2_RC_SUCCESS)
|
||||
rc = tpm2_parse_read_public(name, &buf);
|
||||
|
||||
tpm_buf_destroy(&buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_buf_append_name() - add a handle area to the buffer
|
||||
* @chip: the TPM chip structure
|
||||
* @buf: The buffer to be appended
|
||||
* @handle: The handle to be appended
|
||||
* @name: The name of the handle (may be NULL)
|
||||
*
|
||||
* In order to compute session HMACs, we need to know the names of the
|
||||
* objects pointed to by the handles. For most objects, this is simply
|
||||
* the actual 4 byte handle or an empty buf (in these cases @name
|
||||
* should be NULL) but for volatile objects, permanent objects and NV
|
||||
* areas, the name is defined as the hash (according to the name
|
||||
* algorithm which should be set to sha256) of the public area to
|
||||
* which the two byte algorithm id has been appended. For these
|
||||
* objects, the @name pointer should point to this. If a name is
|
||||
* required but @name is NULL, then TPM2_ReadPublic() will be called
|
||||
* on the handle to obtain the name.
|
||||
*
|
||||
* As with most tpm_buf operations, success is assumed because failure
|
||||
* will be caused by an incorrect programming model and indicated by a
|
||||
* kernel message.
|
||||
*/
|
||||
void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
|
||||
u32 handle, u8 *name)
|
||||
{
|
||||
enum tpm2_mso_type mso = tpm2_handle_mso(handle);
|
||||
struct tpm2_auth *auth = chip->auth;
|
||||
int slot;
|
||||
|
||||
slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE)/4;
|
||||
if (slot >= AUTH_MAX_NAMES) {
|
||||
dev_err(&chip->dev, "TPM: too many handles\n");
|
||||
return;
|
||||
}
|
||||
WARN(auth->session != tpm_buf_length(buf),
|
||||
"name added in wrong place\n");
|
||||
tpm_buf_append_u32(buf, handle);
|
||||
auth->session += 4;
|
||||
|
||||
if (mso == TPM2_MSO_PERSISTENT ||
|
||||
mso == TPM2_MSO_VOLATILE ||
|
||||
mso == TPM2_MSO_NVRAM) {
|
||||
if (!name)
|
||||
tpm2_read_public(chip, handle, auth->name[slot]);
|
||||
} else {
|
||||
if (name)
|
||||
dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
|
||||
}
|
||||
|
||||
auth->name_h[slot] = handle;
|
||||
if (name)
|
||||
memcpy(auth->name[slot], name, name_size(name));
|
||||
}
|
||||
EXPORT_SYMBOL(tpm_buf_append_name);
|
||||
|
||||
/**
|
||||
* tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness
|
||||
* @chip: the TPM chip structure
|
||||
@ -705,6 +752,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
|
||||
u32 cc = be32_to_cpu(auth->ordinal);
|
||||
int parm_len, len, i, handles;
|
||||
|
||||
if (!auth)
|
||||
return rc;
|
||||
|
||||
if (auth->session >= TPM_HEADER_SIZE) {
|
||||
WARN(1, "tpm session not filled correctly\n");
|
||||
goto out;
|
||||
@ -824,8 +874,13 @@ EXPORT_SYMBOL(tpm_buf_check_hmac_response);
|
||||
*/
|
||||
void tpm2_end_auth_session(struct tpm_chip *chip)
|
||||
{
|
||||
tpm2_flush_context(chip, chip->auth->handle);
|
||||
memzero_explicit(chip->auth, sizeof(*chip->auth));
|
||||
struct tpm2_auth *auth = chip->auth;
|
||||
|
||||
if (!auth)
|
||||
return;
|
||||
|
||||
tpm2_flush_context(chip, auth->handle);
|
||||
memzero_explicit(auth, sizeof(*auth));
|
||||
}
|
||||
EXPORT_SYMBOL(tpm2_end_auth_session);
|
||||
|
||||
@ -907,6 +962,11 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
|
||||
int rc;
|
||||
u32 null_key;
|
||||
|
||||
if (!auth) {
|
||||
dev_warn_once(&chip->dev, "auth session is not active\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = tpm2_load_null(chip, &null_key);
|
||||
if (rc)
|
||||
goto out;
|
||||
@ -1301,3 +1361,4 @@ int tpm2_sessions_init(struct tpm_chip *chip)
|
||||
|
||||
return rc;
|
||||
}
|
||||
#endif /* CONFIG_TCG_TPM2_HMAC */
|
||||
|
@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
|
||||
static const struct mtk_clk_desc mfg_desc = {
|
||||
.clks = mfg_clks,
|
||||
.num_clks = ARRAY_SIZE(mfg_clks),
|
||||
.need_runtime_pm = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id of_match_clk_mt8183_mfg[] = {
|
||||
|
@ -496,14 +496,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
|
||||
}
|
||||
|
||||
|
||||
devm_pm_runtime_enable(&pdev->dev);
|
||||
/*
|
||||
* Do a pm_runtime_resume_and_get() to workaround a possible
|
||||
* deadlock between clk_register() and the genpd framework.
|
||||
*/
|
||||
r = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (r)
|
||||
return r;
|
||||
if (mcd->need_runtime_pm) {
|
||||
devm_pm_runtime_enable(&pdev->dev);
|
||||
/*
|
||||
* Do a pm_runtime_resume_and_get() to workaround a possible
|
||||
* deadlock between clk_register() and the genpd framework.
|
||||
*/
|
||||
r = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Calculate how many clk_hw_onecell_data entries to allocate */
|
||||
num_clks = mcd->num_clks + mcd->num_composite_clks;
|
||||
@ -585,7 +587,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
|
||||
goto unregister_clks;
|
||||
}
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
if (mcd->need_runtime_pm)
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
return r;
|
||||
|
||||
@ -618,7 +621,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
|
||||
if (mcd->shared_io && base)
|
||||
iounmap(base);
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
if (mcd->need_runtime_pm)
|
||||
pm_runtime_put(&pdev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -237,6 +237,8 @@ struct mtk_clk_desc {
|
||||
|
||||
int (*clk_notifier_func)(struct device *dev, struct clk *clk);
|
||||
unsigned int mfg_clk_idx;
|
||||
|
||||
bool need_runtime_pm;
|
||||
};
|
||||
|
||||
int mtk_clk_pdev_probe(struct platform_device *pdev);
|
||||
|
@ -70,7 +70,6 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
|
||||
static const struct alpha_pll_config ipq5018_pll_config = {
|
||||
.l = 0x2a,
|
||||
.config_ctl_val = 0x4001075b,
|
||||
.config_ctl_hi_val = 0x304,
|
||||
.main_output_mask = BIT(0),
|
||||
.aux_output_mask = BIT(1),
|
||||
.early_output_mask = BIT(3),
|
||||
@ -84,7 +83,6 @@ static const struct alpha_pll_config ipq5018_pll_config = {
|
||||
static const struct alpha_pll_config ipq5332_pll_config = {
|
||||
.l = 0x2d,
|
||||
.config_ctl_val = 0x4001075b,
|
||||
.config_ctl_hi_val = 0x304,
|
||||
.main_output_mask = BIT(0),
|
||||
.aux_output_mask = BIT(1),
|
||||
.early_output_mask = BIT(3),
|
||||
|
@ -2574,6 +2574,9 @@ static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw,
|
||||
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
|
||||
a >> ALPHA_BITWIDTH);
|
||||
|
||||
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
|
||||
PLL_ALPHA_EN, PLL_ALPHA_EN);
|
||||
|
||||
regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL);
|
||||
|
||||
/* Wait five micro seconds or more */
|
||||
|
@ -2140,9 +2140,10 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
|
||||
|
||||
static struct clk_branch gcc_crypto_axi_clk = {
|
||||
.halt_reg = 0x16010,
|
||||
.halt_check = BRANCH_HALT_VOTED,
|
||||
.clkr = {
|
||||
.enable_reg = 0x16010,
|
||||
.enable_mask = BIT(0),
|
||||
.enable_reg = 0xb004,
|
||||
.enable_mask = BIT(15),
|
||||
.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gcc_crypto_axi_clk",
|
||||
.parent_hws = (const struct clk_hw *[]) {
|
||||
@ -2156,9 +2157,10 @@ static struct clk_branch gcc_crypto_axi_clk = {
|
||||
|
||||
static struct clk_branch gcc_crypto_ahb_clk = {
|
||||
.halt_reg = 0x16014,
|
||||
.halt_check = BRANCH_HALT_VOTED,
|
||||
.clkr = {
|
||||
.enable_reg = 0x16014,
|
||||
.enable_mask = BIT(0),
|
||||
.enable_reg = 0xb004,
|
||||
.enable_mask = BIT(16),
|
||||
.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gcc_crypto_ahb_clk",
|
||||
.parent_hws = (const struct clk_hw *[]) {
|
||||
|
@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = {
|
||||
.enable_mask = BIT(6),
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "gpll6",
|
||||
.parent_hws = (const struct clk_hw*[]){
|
||||
&gpll0.clkr.hw,
|
||||
.parent_data = &(const struct clk_parent_data){
|
||||
.fw_name = "bi_tcxo",
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_fixed_fabia_ops,
|
||||
@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = {
|
||||
.clkr.hw.init = &(struct clk_init_data){
|
||||
.name = "gpll6_out_even",
|
||||
.parent_hws = (const struct clk_hw*[]){
|
||||
&gpll0.clkr.hw,
|
||||
&gpll6.clkr.hw,
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_fabia_ops,
|
||||
@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = {
|
||||
.enable_mask = BIT(7),
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "gpll7",
|
||||
.parent_hws = (const struct clk_hw*[]){
|
||||
&gpll0.clkr.hw,
|
||||
.parent_data = &(const struct clk_parent_data){
|
||||
.fw_name = "bi_tcxo",
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_fixed_fabia_ops,
|
||||
|
@ -132,7 +132,6 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
|
||||
|
||||
for (i = 0; i < desc->hw_clks->num ; i++) {
|
||||
struct clk_hw *hw = desc->hw_clks->hws[i];
|
||||
struct ccu_common *common = hw_to_ccu_common(hw);
|
||||
const char *name;
|
||||
|
||||
if (!hw)
|
||||
@ -147,14 +146,21 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
|
||||
pr_err("Couldn't register clock %d - %s\n", i, name);
|
||||
goto err_clk_unreg;
|
||||
}
|
||||
}
|
||||
|
||||
if (common->max_rate)
|
||||
clk_hw_set_rate_range(hw, common->min_rate,
|
||||
common->max_rate);
|
||||
for (i = 0; i < desc->num_ccu_clks; i++) {
|
||||
struct ccu_common *cclk = desc->ccu_clks[i];
|
||||
|
||||
if (!cclk)
|
||||
continue;
|
||||
|
||||
if (cclk->max_rate)
|
||||
clk_hw_set_rate_range(&cclk->hw, cclk->min_rate,
|
||||
cclk->max_rate);
|
||||
else
|
||||
WARN(common->min_rate,
|
||||
WARN(cclk->min_rate,
|
||||
"No max_rate, ignoring min_rate of clock %d - %s\n",
|
||||
i, name);
|
||||
i, clk_hw_get_name(&cclk->hw));
|
||||
}
|
||||
|
||||
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
|
||||
|
@ -890,8 +890,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
|
||||
pr_warn(FW_WARN "P-state 0 is not max freq\n");
|
||||
|
||||
if (acpi_cpufreq_driver.set_boost)
|
||||
if (acpi_cpufreq_driver.set_boost) {
|
||||
set_boost(policy, acpi_cpufreq_driver.boost_enabled);
|
||||
policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
|
@ -1431,7 +1431,8 @@ static int cpufreq_online(unsigned int cpu)
|
||||
}
|
||||
|
||||
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
|
||||
policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
|
||||
if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
|
||||
policy->boost_enabled = true;
|
||||
|
||||
/*
|
||||
* The initialization has succeeded and the policy is online.
|
||||
|
@ -101,8 +101,10 @@ static __init struct device *sysfb_parent_dev(const struct screen_info *si)
|
||||
if (IS_ERR(pdev)) {
|
||||
return ERR_CAST(pdev);
|
||||
} else if (pdev) {
|
||||
if (!sysfb_pci_dev_is_enabled(pdev))
|
||||
if (!sysfb_pci_dev_is_enabled(pdev)) {
|
||||
pci_dev_put(pdev);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
return &pdev->dev;
|
||||
}
|
||||
|
||||
@ -137,7 +139,7 @@ static __init int sysfb_init(void)
|
||||
if (compatible) {
|
||||
pd = sysfb_create_simplefb(si, &mode, parent);
|
||||
if (!IS_ERR(pd))
|
||||
goto unlock_mutex;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
/* if the FB is incompatible, create a legacy framebuffer device */
|
||||
@ -155,7 +157,7 @@ static __init int sysfb_init(void)
|
||||
pd = platform_device_alloc(name, 0);
|
||||
if (!pd) {
|
||||
ret = -ENOMEM;
|
||||
goto unlock_mutex;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
pd->dev.parent = parent;
|
||||
@ -170,9 +172,11 @@ static __init int sysfb_init(void)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
goto unlock_mutex;
|
||||
goto put_device;
|
||||
err:
|
||||
platform_device_put(pd);
|
||||
put_device:
|
||||
put_device(parent);
|
||||
unlock_mutex:
|
||||
mutex_unlock(&disable_lock);
|
||||
return ret;
|
||||
|
@ -619,8 +619,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
|
||||
ret = gpiochip_get_ngpios(gc, dev);
|
||||
if (ret)
|
||||
gc->ngpio = gc->bgpio_bits;
|
||||
else
|
||||
gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8));
|
||||
|
||||
ret = bgpio_setup_io(gc, dat, set, clr, flags);
|
||||
if (ret)
|
||||
|
@ -202,6 +202,24 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
|
||||
* helper, and be consistent with what other drivers do.
|
||||
*/
|
||||
{ "qi,lb60", "rb-gpios", true },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_PCI_LANTIQ)
|
||||
/*
|
||||
* According to the PCI specification, the RST# pin is an
|
||||
* active-low signal. However, most of the device trees that
|
||||
* have been widely used for a long time incorrectly describe
|
||||
* reset GPIO as active-high, and were also using wrong name
|
||||
* for the property.
|
||||
*/
|
||||
{ "lantiq,pci-xway", "gpio-reset", false },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
|
||||
/*
|
||||
* DTS for Nokia N900 incorrectly specified "active high"
|
||||
* polarity for the reset line, while the chip actually
|
||||
* treats it as "active low".
|
||||
*/
|
||||
{ "ti,tsc2005", "reset-gpios", false },
|
||||
#endif
|
||||
};
|
||||
unsigned int i;
|
||||
@ -504,9 +522,9 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
|
||||
{ "reset", "reset-n-io", "marvell,nfc-uart" },
|
||||
{ "reset", "reset-n-io", "mrvl,nfc-uart" },
|
||||
#endif
|
||||
#if !IS_ENABLED(CONFIG_PCI_LANTIQ)
|
||||
#if IS_ENABLED(CONFIG_PCI_LANTIQ)
|
||||
/* MIPS Lantiq PCI */
|
||||
{ "reset", "gpios-reset", "lantiq,pci-xway" },
|
||||
{ "reset", "gpio-reset", "lantiq,pci-xway" },
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -10048,6 +10048,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
|
||||
/* Update Freesync settings. */
|
||||
reset_freesync_config_for_crtc(dm_new_crtc_state);
|
||||
get_freesync_config_for_crtc(dm_new_crtc_state,
|
||||
dm_new_conn_state);
|
||||
|
||||
@ -11181,6 +11182,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void parse_edid_displayid_vrr(struct drm_connector *connector,
|
||||
struct edid *edid)
|
||||
{
|
||||
u8 *edid_ext = NULL;
|
||||
int i;
|
||||
int j = 0;
|
||||
u16 min_vfreq;
|
||||
u16 max_vfreq;
|
||||
|
||||
if (edid == NULL || edid->extensions == 0)
|
||||
return;
|
||||
|
||||
/* Find DisplayID extension */
|
||||
for (i = 0; i < edid->extensions; i++) {
|
||||
edid_ext = (void *)(edid + (i + 1));
|
||||
if (edid_ext[0] == DISPLAYID_EXT)
|
||||
break;
|
||||
}
|
||||
|
||||
if (edid_ext == NULL)
|
||||
return;
|
||||
|
||||
while (j < EDID_LENGTH) {
|
||||
/* Get dynamic video timing range from DisplayID if available */
|
||||
if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 &&
|
||||
(edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
|
||||
min_vfreq = edid_ext[j+9];
|
||||
if (edid_ext[j+1] & 7)
|
||||
max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
|
||||
else
|
||||
max_vfreq = edid_ext[j+10];
|
||||
|
||||
if (max_vfreq && min_vfreq) {
|
||||
connector->display_info.monitor_range.max_vfreq = max_vfreq;
|
||||
connector->display_info.monitor_range.min_vfreq = min_vfreq;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
|
||||
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
@ -11302,6 +11346,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
||||
if (!adev->dm.freesync_module)
|
||||
goto update;
|
||||
|
||||
/* Some eDP panels only have the refresh rate range info in DisplayID */
|
||||
if ((connector->display_info.monitor_range.min_vfreq == 0 ||
|
||||
connector->display_info.monitor_range.max_vfreq == 0))
|
||||
parse_edid_displayid_vrr(connector, edid);
|
||||
|
||||
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
|
||||
bool edid_check_required = false;
|
||||
@ -11309,9 +11358,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
||||
if (is_dp_capable_without_timing_msa(adev->dm.dc,
|
||||
amdgpu_dm_connector)) {
|
||||
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
|
||||
freesync_capable = true;
|
||||
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
|
||||
if (amdgpu_dm_connector->max_vfreq -
|
||||
amdgpu_dm_connector->min_vfreq > 10)
|
||||
freesync_capable = true;
|
||||
} else {
|
||||
edid_check_required = edid->version > 1 ||
|
||||
(edid->version == 1 &&
|
||||
|
@ -3364,6 +3364,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
&mode_lib->vba.UrgentBurstFactorLumaPre[k],
|
||||
&mode_lib->vba.UrgentBurstFactorChromaPre[k],
|
||||
&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
|
||||
|
||||
v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] /
|
||||
8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k];
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -234,6 +234,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
|
||||
out->round_trip_ping_latency_dcfclk_cycles = 106;
|
||||
out->smn_latency_us = 2;
|
||||
out->dispclk_dppclk_vco_speed_mhz = 3600;
|
||||
out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
|
||||
break;
|
||||
|
||||
}
|
||||
|
@ -294,7 +294,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000;
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
|
||||
|
||||
if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx] == dml_fclock_change_unsupported)
|
||||
if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0] == dml_fclock_change_unsupported)
|
||||
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false;
|
||||
else
|
||||
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
|
||||
|
@ -734,7 +734,7 @@ struct atom_gpio_pin_lut_v2_1
|
||||
{
|
||||
struct atom_common_table_header table_header;
|
||||
/*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
|
||||
struct atom_gpio_pin_assignment gpio_pin[8];
|
||||
struct atom_gpio_pin_assignment gpio_pin[];
|
||||
};
|
||||
|
||||
|
||||
|
@ -84,7 +84,8 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
sizes->surface_width, sizes->surface_height,
|
||||
sizes->surface_bpp);
|
||||
|
||||
format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
|
||||
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
|
||||
sizes->surface_height, format);
|
||||
if (IS_ERR(buffer))
|
||||
|
@ -420,13 +420,20 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1280x1920_rightside_up,
|
||||
}, { /* Valve Steam Deck */
|
||||
}, { /* Valve Steam Deck (Jupiter) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* Valve Steam Deck (Galileo) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* VIOS LTH17 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
|
||||
|
@ -2088,6 +2088,9 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
|
||||
u32 ln0, ln1, pin_assignment;
|
||||
u8 width;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 14)
|
||||
return;
|
||||
|
||||
if (!intel_encoder_is_tc(&dig_port->base) ||
|
||||
intel_tc_port_in_tbt_alt_mode(dig_port))
|
||||
return;
|
||||
|
@ -1001,6 +1001,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
mode = drm_mode_duplicate(dev, nv_connector->native_mode);
|
||||
if (!mode)
|
||||
return 0;
|
||||
|
||||
drm_mode_probed_add(connector, mode);
|
||||
ret = 1;
|
||||
}
|
||||
|
@ -86,15 +86,15 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
|
||||
int ret = 0;
|
||||
void *out_alloc;
|
||||
|
||||
if (!in->count)
|
||||
return NULL;
|
||||
|
||||
/* User stride must be at least the minimum object size, otherwise it might
|
||||
* lack useful information.
|
||||
*/
|
||||
if (in->stride < min_stride)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!in->count)
|
||||
return NULL;
|
||||
|
||||
out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
|
||||
if (!out_alloc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -458,6 +458,16 @@ struct panthor_queue {
|
||||
/** @seqno: Sequence number of the last initialized fence. */
|
||||
atomic64_t seqno;
|
||||
|
||||
/**
|
||||
* @last_fence: Fence of the last submitted job.
|
||||
*
|
||||
* We return this fence when we get an empty command stream.
|
||||
* This way, we are guaranteed that all earlier jobs have completed
|
||||
* when drm_sched_job::s_fence::finished without having to feed
|
||||
* the CS ring buffer with a dummy job that only signals the fence.
|
||||
*/
|
||||
struct dma_fence *last_fence;
|
||||
|
||||
/**
|
||||
* @in_flight_jobs: List containing all in-flight jobs.
|
||||
*
|
||||
@ -829,6 +839,9 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
|
||||
panthor_kernel_bo_destroy(queue->ringbuf);
|
||||
panthor_kernel_bo_destroy(queue->iface.mem);
|
||||
|
||||
/* Release the last_fence we were holding, if any. */
|
||||
dma_fence_put(queue->fence_ctx.last_fence);
|
||||
|
||||
kfree(queue);
|
||||
}
|
||||
|
||||
@ -2784,9 +2797,6 @@ static void group_sync_upd_work(struct work_struct *work)
|
||||
|
||||
spin_lock(&queue->fence_ctx.lock);
|
||||
list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
|
||||
if (!job->call_info.size)
|
||||
continue;
|
||||
|
||||
if (syncobj->seqno < job->done_fence->seqno)
|
||||
break;
|
||||
|
||||
@ -2865,11 +2875,14 @@ queue_run_job(struct drm_sched_job *sched_job)
|
||||
static_assert(sizeof(call_instrs) % 64 == 0,
|
||||
"call_instrs is not aligned on a cacheline");
|
||||
|
||||
/* Stream size is zero, nothing to do => return a NULL fence and let
|
||||
* drm_sched signal the parent.
|
||||
/* Stream size is zero, nothing to do except making sure all previously
|
||||
* submitted jobs are done before we signal the
|
||||
* drm_sched_job::s_fence::finished fence.
|
||||
*/
|
||||
if (!job->call_info.size)
|
||||
return NULL;
|
||||
if (!job->call_info.size) {
|
||||
job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
|
||||
return dma_fence_get(job->done_fence);
|
||||
}
|
||||
|
||||
ret = pm_runtime_resume_and_get(ptdev->base.dev);
|
||||
if (drm_WARN_ON(&ptdev->base, ret))
|
||||
@ -2928,6 +2941,10 @@ queue_run_job(struct drm_sched_job *sched_job)
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the last fence. */
|
||||
dma_fence_put(queue->fence_ctx.last_fence);
|
||||
queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
|
||||
|
||||
done_fence = dma_fence_get(job->done_fence);
|
||||
|
||||
out_unlock:
|
||||
@ -3378,10 +3395,15 @@ panthor_job_create(struct panthor_file *pfile,
|
||||
goto err_put_job;
|
||||
}
|
||||
|
||||
job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
|
||||
if (!job->done_fence) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_job;
|
||||
/* Empty command streams don't need a fence, they'll pick the one from
|
||||
* the previously submitted job.
|
||||
*/
|
||||
if (job->call_info.size) {
|
||||
job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
|
||||
if (!job->done_fence) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_job;
|
||||
}
|
||||
}
|
||||
|
||||
ret = drm_sched_job_init(&job->base,
|
||||
|
@ -642,7 +642,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
if (bo_va->it.start)
|
||||
if (bo_va->it.start && bo_va->bo)
|
||||
r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
|
||||
|
||||
error_unlock:
|
||||
|
@ -346,6 +346,7 @@ static void ttm_bo_release(struct kref *kref)
|
||||
if (!dma_resv_test_signaled(bo->base.resv,
|
||||
DMA_RESV_USAGE_BOOKKEEP) ||
|
||||
(want_init_on_free() && (bo->ttm != NULL)) ||
|
||||
bo->type == ttm_bo_type_sg ||
|
||||
!dma_resv_trylock(bo->base.resv)) {
|
||||
/* The BO is not idle, resurrect it for delayed destroy */
|
||||
ttm_bo_flush_all_fences(bo);
|
||||
|
@ -342,7 +342,7 @@ static void init_steering_oaddrm(struct xe_gt *gt)
|
||||
else
|
||||
gt->steering[OADDRM].group_target = 1;
|
||||
|
||||
gt->steering[DSS].instance_target = 0; /* unused */
|
||||
gt->steering[OADDRM].instance_target = 0; /* unused */
|
||||
}
|
||||
|
||||
static void init_steering_sqidi_psmi(struct xe_gt *gt)
|
||||
@ -357,8 +357,8 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
|
||||
|
||||
static void init_steering_inst0(struct xe_gt *gt)
|
||||
{
|
||||
gt->steering[DSS].group_target = 0; /* unused */
|
||||
gt->steering[DSS].instance_target = 0; /* unused */
|
||||
gt->steering[INSTANCE0].group_target = 0; /* unused */
|
||||
gt->steering[INSTANCE0].instance_target = 0; /* unused */
|
||||
}
|
||||
|
||||
static const struct {
|
||||
|
@ -1334,7 +1334,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
GFP_KERNEL, true, 0);
|
||||
if (IS_ERR(sa_bo)) {
|
||||
err = PTR_ERR(sa_bo);
|
||||
goto err;
|
||||
goto err_bb;
|
||||
}
|
||||
|
||||
ppgtt_ofs = NUM_KERNEL_PDE +
|
||||
@ -1385,7 +1385,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
update_idx);
|
||||
if (IS_ERR(job)) {
|
||||
err = PTR_ERR(job);
|
||||
goto err_bb;
|
||||
goto err_sa;
|
||||
}
|
||||
|
||||
/* Wait on BO move */
|
||||
@ -1434,12 +1434,12 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
|
||||
err_job:
|
||||
xe_sched_job_put(job);
|
||||
err_sa:
|
||||
drm_suballoc_free(sa_bo, NULL);
|
||||
err_bb:
|
||||
if (!q)
|
||||
mutex_unlock(&m->job_mutex);
|
||||
xe_bb_free(bb, NULL);
|
||||
err:
|
||||
drm_suballoc_free(sa_bo, NULL);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
@ -32,7 +31,6 @@ struct i2c_pnx_mif {
|
||||
int ret; /* Return value */
|
||||
int mode; /* Interface mode */
|
||||
struct completion complete; /* I/O completion */
|
||||
struct timer_list timer; /* Timeout */
|
||||
u8 * buf; /* Data buffer */
|
||||
int len; /* Length of data buffer */
|
||||
int order; /* RX Bytes to order via TX */
|
||||
@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data)
|
||||
return (timeout <= 0);
|
||||
}
|
||||
|
||||
static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
|
||||
{
|
||||
struct timer_list *timer = &alg_data->mif.timer;
|
||||
unsigned long expires = msecs_to_jiffies(alg_data->timeout);
|
||||
|
||||
if (expires <= 1)
|
||||
expires = 2;
|
||||
|
||||
del_timer_sync(timer);
|
||||
|
||||
dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n",
|
||||
jiffies, expires);
|
||||
|
||||
timer->expires = jiffies + expires;
|
||||
|
||||
add_timer(timer);
|
||||
}
|
||||
|
||||
/**
|
||||
* i2c_pnx_start - start a device
|
||||
* @slave_addr: slave address
|
||||
@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
|
||||
~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
|
||||
I2C_REG_CTL(alg_data));
|
||||
|
||||
del_timer_sync(&alg_data->mif.timer);
|
||||
|
||||
dev_dbg(&alg_data->adapter.dev,
|
||||
"%s(): Waking up xfer routine.\n",
|
||||
__func__);
|
||||
@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
|
||||
~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
|
||||
I2C_REG_CTL(alg_data));
|
||||
|
||||
/* Stop timer. */
|
||||
del_timer_sync(&alg_data->mif.timer);
|
||||
dev_dbg(&alg_data->adapter.dev,
|
||||
"%s(): Waking up xfer routine after zero-xfer.\n",
|
||||
__func__);
|
||||
@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
|
||||
mcntrl_drmie | mcntrl_daie);
|
||||
iowrite32(ctl, I2C_REG_CTL(alg_data));
|
||||
|
||||
/* Kill timer. */
|
||||
del_timer_sync(&alg_data->mif.timer);
|
||||
complete(&alg_data->mif.complete);
|
||||
}
|
||||
}
|
||||
@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
|
||||
mcntrl_drmie);
|
||||
iowrite32(ctl, I2C_REG_CTL(alg_data));
|
||||
|
||||
/* Stop timer, to prevent timeout. */
|
||||
del_timer_sync(&alg_data->mif.timer);
|
||||
complete(&alg_data->mif.complete);
|
||||
} else if (stat & mstatus_nai) {
|
||||
/* Slave did not acknowledge, generate a STOP */
|
||||
@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
|
||||
/* Our return value. */
|
||||
alg_data->mif.ret = -EIO;
|
||||
|
||||
/* Stop timer, to prevent timeout. */
|
||||
del_timer_sync(&alg_data->mif.timer);
|
||||
complete(&alg_data->mif.complete);
|
||||
} else {
|
||||
/*
|
||||
@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void i2c_pnx_timeout(struct timer_list *t)
|
||||
static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data)
|
||||
{
|
||||
struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
|
||||
u32 ctl;
|
||||
|
||||
dev_err(&alg_data->adapter.dev,
|
||||
@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t)
|
||||
iowrite32(ctl, I2C_REG_CTL(alg_data));
|
||||
wait_reset(alg_data);
|
||||
alg_data->mif.ret = -EIO;
|
||||
complete(&alg_data->mif.complete);
|
||||
}
|
||||
|
||||
static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
|
||||
@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
|
||||
struct i2c_msg *pmsg;
|
||||
int rc = 0, completed = 0, i;
|
||||
struct i2c_pnx_algo_data *alg_data = adap->algo_data;
|
||||
unsigned long time_left;
|
||||
u32 stat;
|
||||
|
||||
dev_dbg(&alg_data->adapter.dev,
|
||||
@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
|
||||
dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
|
||||
__func__, alg_data->mif.mode, alg_data->mif.len);
|
||||
|
||||
i2c_pnx_arm_timer(alg_data);
|
||||
|
||||
/* initialize the completion var */
|
||||
init_completion(&alg_data->mif.complete);
|
||||
@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
|
||||
break;
|
||||
|
||||
/* Wait for completion */
|
||||
wait_for_completion(&alg_data->mif.complete);
|
||||
time_left = wait_for_completion_timeout(&alg_data->mif.complete,
|
||||
alg_data->timeout);
|
||||
if (time_left == 0)
|
||||
i2c_pnx_timeout(alg_data);
|
||||
|
||||
if (!(rc = alg_data->mif.ret))
|
||||
completed++;
|
||||
@ -653,7 +624,10 @@ static int i2c_pnx_probe(struct platform_device *pdev)
|
||||
alg_data->adapter.algo_data = alg_data;
|
||||
alg_data->adapter.nr = pdev->id;
|
||||
|
||||
alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT;
|
||||
alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT);
|
||||
if (alg_data->timeout <= 1)
|
||||
alg_data->timeout = 2;
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
|
||||
if (pdev->dev.of_node) {
|
||||
@ -673,8 +647,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(alg_data->clk))
|
||||
return PTR_ERR(alg_data->clk);
|
||||
|
||||
timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
|
||||
|
||||
snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
|
||||
"%s", pdev->name);
|
||||
|
||||
|
@ -1047,31 +1047,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
|
||||
return ARRAY_SIZE(lan9303_mib);
|
||||
}
|
||||
|
||||
static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
|
||||
static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum)
|
||||
{
|
||||
struct lan9303 *chip = ds->priv;
|
||||
int phy_base = chip->phy_addr_base;
|
||||
|
||||
if (phy == phy_base)
|
||||
if (port == 0)
|
||||
return lan9303_virt_phy_reg_read(chip, regnum);
|
||||
if (phy > phy_base + 2)
|
||||
if (port > 2)
|
||||
return -ENODEV;
|
||||
|
||||
return chip->ops->phy_read(chip, phy, regnum);
|
||||
return chip->ops->phy_read(chip, phy_base + port, regnum);
|
||||
}
|
||||
|
||||
static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
|
||||
static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum,
|
||||
u16 val)
|
||||
{
|
||||
struct lan9303 *chip = ds->priv;
|
||||
int phy_base = chip->phy_addr_base;
|
||||
|
||||
if (phy == phy_base)
|
||||
if (port == 0)
|
||||
return lan9303_virt_phy_reg_write(chip, regnum, val);
|
||||
if (phy > phy_base + 2)
|
||||
if (port > 2)
|
||||
return -ENODEV;
|
||||
|
||||
return chip->ops->phy_write(chip, phy, regnum, val);
|
||||
return chip->ops->phy_write(chip, phy_base + port, regnum, val);
|
||||
}
|
||||
|
||||
static int lan9303_port_enable(struct dsa_switch *ds, int port,
|
||||
@ -1099,7 +1099,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
|
||||
vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
|
||||
|
||||
lan9303_disable_processing_port(chip, port);
|
||||
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
|
||||
lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN);
|
||||
}
|
||||
|
||||
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
|
||||
@ -1374,8 +1374,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
|
||||
|
||||
static int lan9303_register_switch(struct lan9303 *chip)
|
||||
{
|
||||
int base;
|
||||
|
||||
chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
|
||||
if (!chip->ds)
|
||||
return -ENOMEM;
|
||||
@ -1385,8 +1383,7 @@ static int lan9303_register_switch(struct lan9303 *chip)
|
||||
chip->ds->priv = chip;
|
||||
chip->ds->ops = &lan9303_switch_ops;
|
||||
chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops;
|
||||
base = chip->phy_addr_base;
|
||||
chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
|
||||
chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0);
|
||||
|
||||
return dsa_register_switch(chip->ds);
|
||||
}
|
||||
|
@ -1380,6 +1380,7 @@ static int bcmasp_probe(struct platform_device *pdev)
|
||||
dev_err(dev, "Cannot create eth interface %d\n", i);
|
||||
bcmasp_remove_intfs(priv);
|
||||
of_node_put(intf_node);
|
||||
ret = -ENOMEM;
|
||||
goto of_put_exit;
|
||||
}
|
||||
list_add_tail(&intf->list, &priv->intfs);
|
||||
|
@ -6282,6 +6282,21 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
|
||||
return max_ring;
|
||||
}
|
||||
|
||||
u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp)
|
||||
{
|
||||
u16 i, tbl_size, max_ring = 0;
|
||||
struct bnxt_rss_ctx *rss_ctx;
|
||||
|
||||
tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
|
||||
|
||||
list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) {
|
||||
for (i = 0; i < tbl_size; i++)
|
||||
max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]);
|
||||
}
|
||||
|
||||
return max_ring;
|
||||
}
|
||||
|
||||
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
|
||||
{
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
|
||||
|
@ -2814,6 +2814,7 @@ int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
|
||||
void bnxt_fill_ipv6_mask(__be32 mask[4]);
|
||||
int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
|
||||
void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
|
||||
u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp);
|
||||
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
|
||||
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
|
||||
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
|
||||
|
@ -961,6 +961,12 @@ static int bnxt_set_channels(struct net_device *dev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (req_rx_rings < bp->rx_nr_rings &&
|
||||
req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) {
|
||||
netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
|
||||
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
|
||||
netif_is_rxfh_configured(dev)) {
|
||||
|
@ -1108,6 +1108,46 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_force_smbus - Force interfaces to transition to SMBUS mode.
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Force the MAC and the PHY to SMBUS mode. Assumes semaphore already
|
||||
* acquired.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure.
|
||||
**/
|
||||
static s32 e1000e_force_smbus(struct e1000_hw *hw)
|
||||
{
|
||||
u16 smb_ctrl = 0;
|
||||
u32 ctrl_ext;
|
||||
s32 ret_val;
|
||||
|
||||
/* Switching PHY interface always returns MDI error
|
||||
* so disable retry mechanism to avoid wasting time
|
||||
*/
|
||||
e1000e_disable_phy_retry(hw);
|
||||
|
||||
/* Force SMBus mode in the PHY */
|
||||
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl);
|
||||
if (ret_val) {
|
||||
e1000e_enable_phy_retry(hw);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
|
||||
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, smb_ctrl);
|
||||
|
||||
e1000e_enable_phy_retry(hw);
|
||||
|
||||
/* Force SMBus mode in the MAC */
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
|
||||
* @hw: pointer to the HW structure
|
||||
@ -1165,6 +1205,14 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
if (hw->mac.type != e1000_pch_mtp) {
|
||||
ret_val = e1000e_force_smbus(hw);
|
||||
if (ret_val) {
|
||||
e_dbg("Failed to force SMBUS: %d\n", ret_val);
|
||||
goto release;
|
||||
}
|
||||
}
|
||||
|
||||
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
|
||||
* LPLU and disable Gig speed when entering ULP
|
||||
*/
|
||||
@ -1225,27 +1273,12 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
|
||||
}
|
||||
|
||||
release:
|
||||
/* Switching PHY interface always returns MDI error
|
||||
* so disable retry mechanism to avoid wasting time
|
||||
*/
|
||||
e1000e_disable_phy_retry(hw);
|
||||
|
||||
/* Force SMBus mode in PHY */
|
||||
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
|
||||
if (ret_val) {
|
||||
e1000e_enable_phy_retry(hw);
|
||||
hw->phy.ops.release(hw);
|
||||
goto out;
|
||||
if (hw->mac.type == e1000_pch_mtp) {
|
||||
ret_val = e1000e_force_smbus(hw);
|
||||
if (ret_val)
|
||||
e_dbg("Failed to force SMBUS over MTL system: %d\n",
|
||||
ret_val);
|
||||
}
|
||||
phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
|
||||
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
|
||||
|
||||
e1000e_enable_phy_retry(hw);
|
||||
|
||||
/* Force SMBus mode in MAC */
|
||||
mac_reg = er32(CTRL_EXT);
|
||||
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
|
||||
ew32(CTRL_EXT, mac_reg);
|
||||
|
||||
hw->phy.ops.release(hw);
|
||||
out:
|
||||
|
@ -13293,6 +13293,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
|
||||
bool need_reset;
|
||||
int i;
|
||||
|
||||
/* VSI shall be deleted in a moment, block loading new programs */
|
||||
if (prog && test_bit(__I40E_IN_REMOVE, pf->state))
|
||||
return -EINVAL;
|
||||
|
||||
/* Don't allow frames that span over multiple buffers */
|
||||
if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
|
||||
@ -13301,14 +13305,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
|
||||
|
||||
/* When turning XDP on->off/off->on we reset and rebuild the rings. */
|
||||
need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
|
||||
|
||||
if (need_reset)
|
||||
i40e_prep_for_reset(pf);
|
||||
|
||||
/* VSI shall be deleted in a moment, just return EINVAL */
|
||||
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||
return -EINVAL;
|
||||
|
||||
old_prog = xchg(&vsi->xdp_prog, prog);
|
||||
|
||||
if (need_reset) {
|
||||
|
@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
|
||||
if (ch->dma.irq)
|
||||
free_irq(ch->dma.irq, priv);
|
||||
if (IS_RX(ch->idx)) {
|
||||
int desc;
|
||||
struct ltq_dma_channel *dma = &ch->dma;
|
||||
|
||||
for (desc = 0; desc < LTQ_DESC_NUM; desc++)
|
||||
for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
|
||||
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
|
||||
}
|
||||
}
|
||||
|
@ -1643,7 +1643,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
|
||||
if (req->ssow > block->lf.max) {
|
||||
dev_err(&rvu->pdev->dev,
|
||||
"Func 0x%x: Invalid SSOW req, %d > max %d\n",
|
||||
pcifunc, req->sso, block->lf.max);
|
||||
pcifunc, req->ssow, block->lf.max);
|
||||
return -EINVAL;
|
||||
}
|
||||
mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
|
||||
|
@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *of_node;
|
||||
struct mtk_star_priv *priv;
|
||||
struct phy_device *phydev;
|
||||
struct net_device *ndev;
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev)
|
||||
netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
|
||||
netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
|
||||
|
||||
phydev = of_phy_find_device(priv->phy_node);
|
||||
if (phydev) {
|
||||
phydev->mac_managed_pm = true;
|
||||
put_device(&phydev->mdio.dev);
|
||||
}
|
||||
|
||||
return devm_register_netdev(dev, ndev);
|
||||
}
|
||||
|
||||
|
@ -352,11 +352,11 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
|
||||
netif_dbg(ks, intr, ks->netdev,
|
||||
"%s: txspace %d\n", __func__, tx_space);
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
spin_lock_bh(&ks->statelock);
|
||||
ks->tx_space = tx_space;
|
||||
if (netif_queue_stopped(ks->netdev))
|
||||
netif_wake_queue(ks->netdev);
|
||||
spin_unlock(&ks->statelock);
|
||||
spin_unlock_bh(&ks->statelock);
|
||||
}
|
||||
|
||||
if (status & IRQ_SPIBEI) {
|
||||
@ -482,6 +482,7 @@ static int ks8851_net_open(struct net_device *dev)
|
||||
ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
|
||||
|
||||
ks->queued_len = 0;
|
||||
ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
|
||||
netif_start_queue(ks->netdev);
|
||||
|
||||
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
|
||||
@ -635,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
|
||||
|
||||
/* schedule work to do the actual set of the data if needed */
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
spin_lock_bh(&ks->statelock);
|
||||
|
||||
if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
|
||||
memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
|
||||
schedule_work(&ks->rxctrl_work);
|
||||
}
|
||||
|
||||
spin_unlock(&ks->statelock);
|
||||
spin_unlock_bh(&ks->statelock);
|
||||
}
|
||||
|
||||
static int ks8851_set_mac_address(struct net_device *dev, void *addr)
|
||||
@ -1101,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
|
||||
int ret;
|
||||
|
||||
ks->netdev = netdev;
|
||||
ks->tx_space = 6144;
|
||||
|
||||
ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
|
||||
ret = PTR_ERR_OR_ZERO(ks->gpio);
|
||||
|
@ -340,10 +340,10 @@ static void ks8851_tx_work(struct work_struct *work)
|
||||
|
||||
tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
spin_lock_bh(&ks->statelock);
|
||||
ks->queued_len -= dequeued_len;
|
||||
ks->tx_space = tx_space;
|
||||
spin_unlock(&ks->statelock);
|
||||
spin_unlock_bh(&ks->statelock);
|
||||
|
||||
ks8851_unlock_spi(ks, &flags);
|
||||
}
|
||||
|
@ -748,7 +748,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev)
|
||||
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
|
||||
lan87xx_cable_test_report_trans(detect));
|
||||
|
||||
return 0;
|
||||
return phy_init_hw(phydev);
|
||||
}
|
||||
|
||||
static int lan87xx_cable_test_get_status(struct phy_device *phydev,
|
||||
|
@ -70,6 +70,7 @@
|
||||
#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
|
||||
|
||||
#define PPP_PROTO_LEN 2
|
||||
#define PPP_LCP_HDRLEN 4
|
||||
|
||||
/*
|
||||
* An instance of /dev/ppp can be associated with either a ppp
|
||||
@ -493,6 +494,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool ppp_check_packet(struct sk_buff *skb, size_t count)
|
||||
{
|
||||
/* LCP packets must include LCP header which 4 bytes long:
|
||||
* 1-byte code, 1-byte identifier, and 2-byte length.
|
||||
*/
|
||||
return get_unaligned_be16(skb->data) != PPP_LCP ||
|
||||
count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
|
||||
}
|
||||
|
||||
static ssize_t ppp_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
@ -515,6 +525,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
ret = -EINVAL;
|
||||
if (unlikely(!ppp_check_packet(skb, count))) {
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (pf->kind) {
|
||||
case INTERFACE:
|
||||
|
@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
|
||||
if (bits == 32) {
|
||||
*(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
|
||||
} else if (bits == 128) {
|
||||
((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
|
||||
((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
|
||||
((u64 *)dst)[0] = get_unaligned_be64(src);
|
||||
((u64 *)dst)[1] = get_unaligned_be64(src + 8);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
|
||||
*/
|
||||
static inline int wg_cpumask_next_online(int *last_cpu)
|
||||
{
|
||||
int cpu = cpumask_next(*last_cpu, cpu_online_mask);
|
||||
int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpu = cpumask_first(cpu_online_mask);
|
||||
*last_cpu = cpu;
|
||||
WRITE_ONCE(*last_cpu, cpu);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (skb_queue_empty(&peer->staged_packet_queue)) {
|
||||
if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
|
||||
skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
|
||||
GFP_ATOMIC);
|
||||
if (unlikely(!skb))
|
||||
|
@ -81,7 +81,8 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent);
|
||||
/*
|
||||
* These interrupt controllers abuse interrupt-map for unspeakable
|
||||
* reasons and rely on the core code to *ignore* it (the drivers do
|
||||
* their own parsing of the property).
|
||||
* their own parsing of the property). The PAsemi entry covers a
|
||||
* non-sensical interrupt-map that is better left ignored.
|
||||
*
|
||||
* If you think of adding to the list for something *new*, think
|
||||
* again. There is a high chance that you will be sent back to the
|
||||
@ -95,6 +96,7 @@ static const char * const of_irq_imap_abusers[] = {
|
||||
"fsl,ls1043a-extirq",
|
||||
"fsl,ls1088a-extirq",
|
||||
"renesas,rza1-irqc",
|
||||
"pasemi,rootbus",
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -293,20 +295,8 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
||||
imaplen -= imap - oldimap;
|
||||
pr_debug(" -> imaplen=%d\n", imaplen);
|
||||
}
|
||||
if (!match) {
|
||||
if (intc) {
|
||||
/*
|
||||
* The PASEMI Nemo is a known offender, so
|
||||
* let's only warn for anyone else.
|
||||
*/
|
||||
WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
|
||||
"%pOF interrupt-map failed, using interrupt-controller\n",
|
||||
ipar);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!match)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Successfully parsed an interrupt-map translation; copy new
|
||||
|
@ -167,7 +167,7 @@ u64 riscv_pmu_event_update(struct perf_event *event)
|
||||
unsigned long cmask;
|
||||
u64 oldval, delta;
|
||||
|
||||
if (!rvpmu->ctr_read)
|
||||
if (!rvpmu->ctr_read || (hwc->state & PERF_HES_UPTODATE))
|
||||
return 0;
|
||||
|
||||
cmask = riscv_pmu_ctr_get_width_mask(event);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/soc/andes/irq.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/errata_list.h>
|
||||
#include <asm/sbi.h>
|
||||
@ -114,7 +115,7 @@ struct sbi_pmu_event_data {
|
||||
};
|
||||
};
|
||||
|
||||
static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
|
||||
static struct sbi_pmu_event_data pmu_hw_event_map[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_CPU_CYCLES,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
@ -148,7 +149,7 @@ static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
@ -293,6 +294,34 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M
|
||||
},
|
||||
};
|
||||
|
||||
static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH,
|
||||
0, cmask, 0, edata->event_idx, 0, 0);
|
||||
if (!ret.error) {
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
|
||||
ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
|
||||
} else if (ret.error == SBI_ERR_NOT_SUPPORTED) {
|
||||
/* This event cannot be monitored by any counter */
|
||||
edata->event_idx = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void pmu_sbi_check_std_events(struct work_struct *work)
|
||||
{
|
||||
for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
|
||||
pmu_sbi_check_event(&pmu_hw_event_map[i]);
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++)
|
||||
for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++)
|
||||
for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
|
||||
pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(check_std_events_work, pmu_sbi_check_std_events);
|
||||
|
||||
static int pmu_sbi_ctr_get_width(int idx)
|
||||
{
|
||||
return pmu_ctr_list[idx].width;
|
||||
@ -478,6 +507,12 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
|
||||
u64 raw_config_val;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Ensure we are finished checking standard hardware events for
|
||||
* validity before allowing userspace to configure any events.
|
||||
*/
|
||||
flush_work(&check_std_events_work);
|
||||
|
||||
switch (type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
if (config >= PERF_COUNT_HW_MAX)
|
||||
@ -762,7 +797,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
|
||||
* which may include counters that are not enabled yet.
|
||||
*/
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
|
||||
0, pmu->cmask, 0, 0, 0, 0);
|
||||
0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
|
||||
}
|
||||
|
||||
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
|
||||
@ -1359,6 +1394,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto out_unregister;
|
||||
|
||||
/* Asynchronously check which standard events are available */
|
||||
schedule_work(&check_std_events_work);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unregister:
|
||||
|
@ -3299,6 +3299,7 @@ static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
|
||||
},
|
||||
.driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int toshiba_acpi_add(struct acpi_device *acpi_dev)
|
||||
|
@ -4119,8 +4119,6 @@ static int sd_resume(struct device *dev)
|
||||
{
|
||||
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
|
||||
if (opal_unlock_from_suspend(sdkp->opal_dev)) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
|
||||
return -EIO;
|
||||
@ -4137,12 +4135,13 @@ static int sd_resume_common(struct device *dev, bool runtime)
|
||||
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
|
||||
return 0;
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
|
||||
if (!sd_do_start_stop(sdkp->device, runtime)) {
|
||||
sdkp->suspended = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
ret = sd_start_stop_device(sdkp, 1);
|
||||
if (!ret) {
|
||||
sd_resume(dev);
|
||||
|
@ -759,6 +759,9 @@ static void power_allocator_manage(struct thermal_zone_device *tz)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!params->trip_max)
|
||||
return;
|
||||
|
||||
allocate_power(tz, params->trip_max->temperature);
|
||||
params->update_cdevs = true;
|
||||
}
|
||||
|
@ -300,6 +300,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
|
||||
thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies);
|
||||
else if (tz->polling_delay_jiffies)
|
||||
thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies);
|
||||
else if (tz->temperature == THERMAL_TEMP_INVALID)
|
||||
thermal_zone_device_set_polling(tz, msecs_to_jiffies(THERMAL_RECHECK_DELAY_MS));
|
||||
}
|
||||
|
||||
static struct thermal_governor *thermal_get_tz_governor(struct thermal_zone_device *tz)
|
||||
@ -482,16 +484,14 @@ static void thermal_trip_crossed(struct thermal_zone_device *tz,
|
||||
thermal_governor_trip_crossed(governor, tz, trip, crossed_up);
|
||||
}
|
||||
|
||||
static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a,
|
||||
static int thermal_trip_notify_cmp(void *not_used, const struct list_head *a,
|
||||
const struct list_head *b)
|
||||
{
|
||||
struct thermal_trip_desc *tda = container_of(a, struct thermal_trip_desc,
|
||||
notify_list_node);
|
||||
struct thermal_trip_desc *tdb = container_of(b, struct thermal_trip_desc,
|
||||
notify_list_node);
|
||||
int ret = tdb->notify_temp - tda->notify_temp;
|
||||
|
||||
return ascending ? ret : -ret;
|
||||
return tda->notify_temp - tdb->notify_temp;
|
||||
}
|
||||
|
||||
void __thermal_zone_device_update(struct thermal_zone_device *tz,
|
||||
@ -511,7 +511,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
|
||||
update_temperature(tz);
|
||||
|
||||
if (tz->temperature == THERMAL_TEMP_INVALID)
|
||||
return;
|
||||
goto monitor;
|
||||
|
||||
__thermal_zone_set_trips(tz);
|
||||
|
||||
@ -520,12 +520,12 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
|
||||
for_each_trip_desc(tz, td)
|
||||
handle_thermal_trip(tz, td, &way_up_list, &way_down_list);
|
||||
|
||||
list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp);
|
||||
list_sort(NULL, &way_up_list, thermal_trip_notify_cmp);
|
||||
list_for_each_entry(td, &way_up_list, notify_list_node)
|
||||
thermal_trip_crossed(tz, &td->trip, governor, true);
|
||||
|
||||
list_sort(NULL, &way_down_list, thermal_trip_notify_cmp);
|
||||
list_for_each_entry(td, &way_down_list, notify_list_node)
|
||||
list_for_each_entry_reverse(td, &way_down_list, notify_list_node)
|
||||
thermal_trip_crossed(tz, &td->trip, governor, false);
|
||||
|
||||
if (governor->manage)
|
||||
@ -533,6 +533,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
|
||||
|
||||
thermal_debug_update_trip_stats(tz);
|
||||
|
||||
monitor:
|
||||
monitor_thermal_zone(tz);
|
||||
}
|
||||
|
||||
|
@ -133,6 +133,12 @@ struct thermal_zone_device {
|
||||
struct thermal_trip_desc trips[] __counted_by(num_trips);
|
||||
};
|
||||
|
||||
/*
|
||||
* Default delay after a failing thermal zone temperature check before
|
||||
* attempting to check it again.
|
||||
*/
|
||||
#define THERMAL_RECHECK_DELAY_MS 250
|
||||
|
||||
/* Default Thermal Governor */
|
||||
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
|
||||
#define DEFAULT_THERMAL_GOVERNOR "step_wise"
|
||||
|
@ -105,16 +105,15 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
|
||||
* @hba: per adapter instance
|
||||
* @req: pointer to the request to be issued
|
||||
*
|
||||
* Return: the hardware queue instance on which the request would
|
||||
* be queued.
|
||||
* Return: the hardware queue instance on which the request will be or has
|
||||
* been queued. %NULL if the request has already been freed.
|
||||
*/
|
||||
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
|
||||
struct request *req)
|
||||
{
|
||||
u32 utag = blk_mq_unique_tag(req);
|
||||
u32 hwq = blk_mq_unique_tag_to_hwq(utag);
|
||||
struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
|
||||
|
||||
return &hba->uhq[hwq];
|
||||
return hctx ? &hba->uhq[hctx->queue_num] : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -515,6 +514,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
|
||||
if (!cmd)
|
||||
return -EINVAL;
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
if (!hwq)
|
||||
return 0;
|
||||
} else {
|
||||
hwq = hba->dev_cmd_queue;
|
||||
}
|
||||
|
@ -6456,6 +6456,8 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
|
||||
/* Release cmd in MCQ mode if abort succeeds */
|
||||
if (is_mcq_enabled(hba) && (*ret == 0)) {
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
|
||||
if (!hwq)
|
||||
return 0;
|
||||
spin_lock_irqsave(&hwq->cq_lock, flags);
|
||||
if (ufshcd_cmd_inflight(lrbp->cmd))
|
||||
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||
|
@ -1260,7 +1260,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
|
||||
struct vfio_pci_hot_reset_info hdr;
|
||||
struct vfio_pci_fill_info fill = {};
|
||||
bool slot = false;
|
||||
int ret, count;
|
||||
int ret, count = 0;
|
||||
|
||||
if (copy_from_user(&hdr, arg, minsz))
|
||||
return -EFAULT;
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include "alloc_background.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "backpointers.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_cache.h"
|
||||
#include "btree_io.h"
|
||||
#include "btree_key_cache.h"
|
||||
@ -1553,13 +1554,13 @@ int bch2_check_alloc_info(struct bch_fs *c)
|
||||
}
|
||||
|
||||
static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
struct btree_iter *alloc_iter)
|
||||
struct btree_iter *alloc_iter,
|
||||
struct bkey_buf *last_flushed)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter lru_iter;
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a;
|
||||
struct bkey_s_c alloc_k, lru_k;
|
||||
struct bkey_s_c alloc_k;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
@ -1573,6 +1574,14 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
|
||||
a = bch2_alloc_to_v4(alloc_k, &a_convert);
|
||||
|
||||
if (a->fragmentation_lru) {
|
||||
ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START,
|
||||
a->fragmentation_lru,
|
||||
alloc_k, last_flushed);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (a->data_type != BCH_DATA_cached)
|
||||
return 0;
|
||||
|
||||
@ -1597,41 +1606,30 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
a = &a_mut->v;
|
||||
}
|
||||
|
||||
lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
|
||||
lru_pos(alloc_k.k->p.inode,
|
||||
bucket_to_u64(alloc_k.k->p),
|
||||
a->io_time[READ]), 0);
|
||||
ret = bkey_err(lru_k);
|
||||
ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ],
|
||||
alloc_k, last_flushed);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
|
||||
alloc_key_to_missing_lru_entry,
|
||||
"missing lru entry\n"
|
||||
" %s",
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
|
||||
ret = bch2_lru_set(trans,
|
||||
alloc_k.k->p.inode,
|
||||
bucket_to_u64(alloc_k.k->p),
|
||||
a->io_time[READ]);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
goto err;
|
||||
err:
|
||||
fsck_err:
|
||||
bch2_trans_iter_exit(trans, &lru_iter);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
|
||||
{
|
||||
struct bkey_buf last_flushed;
|
||||
|
||||
bch2_bkey_buf_init(&last_flushed);
|
||||
bkey_init(&last_flushed.k->k);
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_check_alloc_to_lru_ref(trans, &iter)));
|
||||
bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)));
|
||||
|
||||
bch2_bkey_buf_exit(&last_flushed, c);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1703,6 +1703,7 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
|
||||
nr[c->open_buckets[i].data_type]++;
|
||||
|
||||
printbuf_tabstops_reset(out);
|
||||
printbuf_tabstop_push(out, 24);
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
@ -1736,6 +1737,7 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
|
||||
nr[c->open_buckets[i].data_type]++;
|
||||
|
||||
printbuf_tabstops_reset(out);
|
||||
printbuf_tabstop_push(out, 12);
|
||||
printbuf_tabstop_push(out, 16);
|
||||
printbuf_tabstop_push(out, 16);
|
||||
|
@ -434,13 +434,6 @@ int bch2_check_btree_backpointers(struct bch_fs *c)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
|
||||
{
|
||||
return bpos_eq(l.k->p, r.k->p) &&
|
||||
bkey_bytes(l.k) == bkey_bytes(r.k) &&
|
||||
!memcmp(l.v, r.v, bkey_val_bytes(l.k));
|
||||
}
|
||||
|
||||
struct extents_to_bp_state {
|
||||
struct bpos bucket_start;
|
||||
struct bpos bucket_end;
|
||||
@ -536,11 +529,8 @@ static int check_bp_exists(struct btree_trans *trans,
|
||||
struct btree_iter other_extent_iter = {};
|
||||
struct printbuf buf = PRINTBUF;
|
||||
struct bkey_s_c bp_k;
|
||||
struct bkey_buf tmp;
|
||||
int ret = 0;
|
||||
|
||||
bch2_bkey_buf_init(&tmp);
|
||||
|
||||
struct bch_dev *ca = bch2_dev_bucket_tryget(c, bucket);
|
||||
if (!ca) {
|
||||
prt_str(&buf, "extent for nonexistent device:bucket ");
|
||||
@ -565,22 +555,9 @@ static int check_bp_exists(struct btree_trans *trans,
|
||||
|
||||
if (bp_k.k->type != KEY_TYPE_backpointer ||
|
||||
memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
|
||||
bch2_bkey_buf_reassemble(&tmp, c, orig_k);
|
||||
|
||||
if (!bkey_and_val_eq(orig_k, bkey_i_to_s_c(s->last_flushed.k))) {
|
||||
if (bp.level) {
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_btree_interior_updates_flush(c);
|
||||
}
|
||||
|
||||
ret = bch2_btree_write_buffer_flush_sync(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_bkey_buf_copy(&s->last_flushed, c, tmp.k);
|
||||
ret = -BCH_ERR_transaction_restart_write_buffer_flush;
|
||||
goto out;
|
||||
}
|
||||
ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
goto check_existing_bp;
|
||||
}
|
||||
@ -589,7 +566,6 @@ static int check_bp_exists(struct btree_trans *trans,
|
||||
fsck_err:
|
||||
bch2_trans_iter_exit(trans, &other_extent_iter);
|
||||
bch2_trans_iter_exit(trans, &bp_iter);
|
||||
bch2_bkey_buf_exit(&tmp, c);
|
||||
bch2_dev_put(ca);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
@ -794,6 +770,8 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
|
||||
!((1U << btree) & btree_interior_mask))
|
||||
continue;
|
||||
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
__for_each_btree_node(trans, iter, btree,
|
||||
btree == start.btree ? start.pos : POS_MIN,
|
||||
0, depth, BTREE_ITER_prefetch, b, ret) {
|
||||
@ -905,7 +883,7 @@ static int check_one_backpointer(struct btree_trans *trans,
|
||||
struct bbpos start,
|
||||
struct bbpos end,
|
||||
struct bkey_s_c_backpointer bp,
|
||||
struct bpos *last_flushed_pos)
|
||||
struct bkey_buf *last_flushed)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
@ -925,20 +903,18 @@ static int check_one_backpointer(struct btree_trans *trans,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
|
||||
*last_flushed_pos = bp.k->p;
|
||||
ret = bch2_btree_write_buffer_flush_sync(trans) ?:
|
||||
-BCH_ERR_transaction_restart_write_buffer_flush;
|
||||
goto out;
|
||||
}
|
||||
if (!k.k) {
|
||||
ret = bch2_btree_write_buffer_maybe_flush(trans, bp.s_c, last_flushed);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (fsck_err_on(!k.k, c,
|
||||
backpointer_to_missing_ptr,
|
||||
"backpointer for missing %s\n %s",
|
||||
bp.v->level ? "btree node" : "extent",
|
||||
(bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
|
||||
ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
|
||||
goto out;
|
||||
if (fsck_err(c, backpointer_to_missing_ptr,
|
||||
"backpointer for missing %s\n %s",
|
||||
bp.v->level ? "btree node" : "extent",
|
||||
(bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
|
||||
ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
fsck_err:
|
||||
@ -951,14 +927,20 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
|
||||
struct bbpos start,
|
||||
struct bbpos end)
|
||||
{
|
||||
struct bpos last_flushed_pos = SPOS_MAX;
|
||||
struct bkey_buf last_flushed;
|
||||
|
||||
return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
|
||||
bch2_bkey_buf_init(&last_flushed);
|
||||
bkey_init(&last_flushed.k->k);
|
||||
|
||||
int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
|
||||
POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_one_backpointer(trans, start, end,
|
||||
bkey_s_c_to_backpointer(k),
|
||||
&last_flushed_pos));
|
||||
&last_flushed));
|
||||
|
||||
bch2_bkey_buf_exit(&last_flushed, trans->c);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_check_backpointers_to_extents(struct bch_fs *c)
|
||||
|
@ -660,8 +660,9 @@ int bch2_bkey_format_invalid(struct bch_fs *c,
|
||||
bch2_bkey_format_field_overflows(f, i)) {
|
||||
unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
|
||||
u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
|
||||
u64 packed_max = f->bits_per_field[i]
|
||||
? ~((~0ULL << 1) << (f->bits_per_field[i] - 1))
|
||||
unsigned packed_bits = min(64, f->bits_per_field[i]);
|
||||
u64 packed_max = packed_bits
|
||||
? ~((~0ULL << 1) << (packed_bits - 1))
|
||||
: 0;
|
||||
|
||||
prt_printf(err, "field %u too large: %llu + %llu > %llu",
|
||||
|
@ -194,6 +194,13 @@ static inline struct bpos bkey_max(struct bpos l, struct bpos r)
|
||||
return bkey_gt(l, r) ? l : r;
|
||||
}
|
||||
|
||||
static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
|
||||
{
|
||||
return bpos_eq(l.k->p, r.k->p) &&
|
||||
bkey_bytes(l.k) == bkey_bytes(r.k) &&
|
||||
!memcmp(l.v, r.v, bkey_val_bytes(l.k));
|
||||
}
|
||||
|
||||
void bch2_bpos_swab(struct bpos *);
|
||||
void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *);
|
||||
|
||||
|
@ -903,6 +903,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
bch2_dev_usage_update(c, ca, &old_gc, &gc, 0, true);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
gc.fragmentation_lru = alloc_lru_idx_fragmentation(gc, ca);
|
||||
|
||||
if (fsck_err_on(new.data_type != gc.data_type, c,
|
||||
alloc_key_data_type_wrong,
|
||||
"bucket %llu:%llu gen %u has wrong data_type"
|
||||
@ -916,23 +918,19 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
#define copy_bucket_field(_errtype, _f) \
|
||||
if (fsck_err_on(new._f != gc._f, c, _errtype, \
|
||||
"bucket %llu:%llu gen %u data type %s has wrong " #_f \
|
||||
": got %u, should be %u", \
|
||||
": got %llu, should be %llu", \
|
||||
iter->pos.inode, iter->pos.offset, \
|
||||
gc.gen, \
|
||||
bch2_data_type_str(gc.data_type), \
|
||||
new._f, gc._f)) \
|
||||
(u64) new._f, (u64) gc._f)) \
|
||||
new._f = gc._f; \
|
||||
|
||||
copy_bucket_field(alloc_key_gen_wrong,
|
||||
gen);
|
||||
copy_bucket_field(alloc_key_dirty_sectors_wrong,
|
||||
dirty_sectors);
|
||||
copy_bucket_field(alloc_key_cached_sectors_wrong,
|
||||
cached_sectors);
|
||||
copy_bucket_field(alloc_key_stripe_wrong,
|
||||
stripe);
|
||||
copy_bucket_field(alloc_key_stripe_redundancy_wrong,
|
||||
stripe_redundancy);
|
||||
copy_bucket_field(alloc_key_gen_wrong, gen);
|
||||
copy_bucket_field(alloc_key_dirty_sectors_wrong, dirty_sectors);
|
||||
copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
|
||||
copy_bucket_field(alloc_key_stripe_wrong, stripe);
|
||||
copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
|
||||
copy_bucket_field(alloc_key_fragmentation_lru_wrong, fragmentation_lru);
|
||||
#undef copy_bucket_field
|
||||
|
||||
if (!bch2_alloc_v4_cmp(*old, new))
|
||||
@ -946,7 +944,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
a->v = new;
|
||||
|
||||
/*
|
||||
* The trigger normally makes sure this is set, but we're not running
|
||||
* The trigger normally makes sure these are set, but we're not running
|
||||
* triggers:
|
||||
*/
|
||||
if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
|
||||
|
@ -1,11 +1,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_locking.h"
|
||||
#include "btree_update.h"
|
||||
#include "btree_update_interior.h"
|
||||
#include "btree_write_buffer.h"
|
||||
#include "error.h"
|
||||
#include "extents.h"
|
||||
#include "journal.h"
|
||||
#include "journal_io.h"
|
||||
#include "journal_reclaim.h"
|
||||
@ -492,6 +494,41 @@ int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* In check and repair code, when checking references to write buffer btrees we
|
||||
* need to issue a flush before we have a definitive error: this issues a flush
|
||||
* if this is a key we haven't yet checked.
|
||||
*/
|
||||
int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans,
|
||||
struct bkey_s_c referring_k,
|
||||
struct bkey_buf *last_flushed)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_buf tmp;
|
||||
int ret = 0;
|
||||
|
||||
bch2_bkey_buf_init(&tmp);
|
||||
|
||||
if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) {
|
||||
bch2_bkey_buf_reassemble(&tmp, c, referring_k);
|
||||
|
||||
if (bkey_is_btree_ptr(referring_k.k)) {
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_btree_interior_updates_flush(c);
|
||||
}
|
||||
|
||||
ret = bch2_btree_write_buffer_flush_sync(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_bkey_buf_copy(last_flushed, c, tmp.k);
|
||||
ret = -BCH_ERR_transaction_restart_write_buffer_flush;
|
||||
}
|
||||
err:
|
||||
bch2_bkey_buf_exit(&tmp, c);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);
|
||||
|
@ -23,6 +23,9 @@ int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
|
||||
int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *);
|
||||
int bch2_btree_write_buffer_tryflush(struct btree_trans *);
|
||||
|
||||
struct bkey_buf;
|
||||
int bch2_btree_write_buffer_maybe_flush(struct btree_trans *, struct bkey_s_c, struct bkey_buf *);
|
||||
|
||||
struct journal_keys_to_wb {
|
||||
struct btree_write_buffer_keys *wb;
|
||||
size_t room;
|
||||
|
@ -132,14 +132,9 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
|
||||
{
|
||||
struct io_timer *ret = NULL;
|
||||
|
||||
spin_lock(&clock->timer_lock);
|
||||
|
||||
if (clock->timers.used &&
|
||||
time_after_eq(now, clock->timers.data[0]->expire))
|
||||
heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
|
||||
|
||||
spin_unlock(&clock->timer_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -148,8 +143,10 @@ void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
|
||||
struct io_timer *timer;
|
||||
unsigned long now = atomic64_add_return(sectors, &clock->now);
|
||||
|
||||
spin_lock(&clock->timer_lock);
|
||||
while ((timer = get_expired_timer(clock, now)))
|
||||
timer->fn(timer);
|
||||
spin_unlock(&clock->timer_lock);
|
||||
}
|
||||
|
||||
void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
|
||||
|
@ -5,7 +5,9 @@
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_update.h"
|
||||
#include "buckets.h"
|
||||
#include "compress.h"
|
||||
#include "data_update.h"
|
||||
#include "disk_groups.h"
|
||||
#include "ec.h"
|
||||
#include "error.h"
|
||||
#include "extents.h"
|
||||
@ -454,6 +456,38 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
struct bch_io_opts *io_opts,
|
||||
struct data_update_opts *data_opts)
|
||||
{
|
||||
printbuf_tabstop_push(out, 20);
|
||||
prt_str(out, "rewrite ptrs:\t");
|
||||
bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "kill ptrs:\t");
|
||||
bch2_prt_u64_base2(out, data_opts->kill_ptrs);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "target:\t");
|
||||
bch2_target_to_text(out, c, data_opts->target);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "compression:\t");
|
||||
bch2_compression_opt_to_text(out, background_compression(*io_opts));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "extra replicas:\t");
|
||||
prt_u64(out, data_opts->extra_replicas);
|
||||
}
|
||||
|
||||
void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
|
||||
{
|
||||
bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
|
||||
prt_newline(out);
|
||||
bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
|
||||
}
|
||||
|
||||
int bch2_extent_drop_ptrs(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k,
|
||||
@ -643,6 +677,16 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||
if (!(durability_have + durability_removing))
|
||||
m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
|
||||
|
||||
if (!m->op.nr_replicas) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_data_update_to_text(&buf, m);
|
||||
WARN(1, "trying to move an extent, but nr_replicas=0\n%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
ret = -BCH_ERR_data_update_done;
|
||||
goto done;
|
||||
}
|
||||
|
||||
m->op.nr_replicas_required = m->op.nr_replicas;
|
||||
|
||||
if (reserve_sectors) {
|
||||
|
@ -17,6 +17,9 @@ struct data_update_opts {
|
||||
unsigned write_flags;
|
||||
};
|
||||
|
||||
void bch2_data_update_opts_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bch_io_opts *, struct data_update_opts *);
|
||||
|
||||
struct data_update {
|
||||
/* extent being updated: */
|
||||
enum btree_id btree_id;
|
||||
@ -27,6 +30,8 @@ struct data_update {
|
||||
struct bch_write_op op;
|
||||
};
|
||||
|
||||
void bch2_data_update_to_text(struct printbuf *, struct data_update *);
|
||||
|
||||
int bch2_data_update_index_update(struct bch_write_op *);
|
||||
|
||||
void bch2_data_update_read_done(struct data_update *,
|
||||
|
@ -610,7 +610,7 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
|
||||
list_sort(&c->btree_trans_list, list_ptr_order_cmp);
|
||||
|
||||
list_for_each_entry(trans, &c->btree_trans_list, list) {
|
||||
if ((ulong) trans < i->iter)
|
||||
if ((ulong) trans <= i->iter)
|
||||
continue;
|
||||
|
||||
i->iter = (ulong) trans;
|
||||
@ -832,16 +832,16 @@ static const struct file_operations btree_transaction_stats_op = {
|
||||
static void btree_deadlock_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans;
|
||||
pid_t iter = 0;
|
||||
ulong iter = 0;
|
||||
restart:
|
||||
seqmutex_lock(&c->btree_trans_lock);
|
||||
list_for_each_entry(trans, &c->btree_trans_list, list) {
|
||||
struct task_struct *task = READ_ONCE(trans->locking_wait.task);
|
||||
list_sort(&c->btree_trans_list, list_ptr_order_cmp);
|
||||
|
||||
if (!task || task->pid <= iter)
|
||||
list_for_each_entry(trans, &c->btree_trans_list, list) {
|
||||
if ((ulong) trans <= iter)
|
||||
continue;
|
||||
|
||||
iter = task->pid;
|
||||
iter = (ulong) trans;
|
||||
|
||||
if (!closure_get_not_zero(&trans->ref))
|
||||
continue;
|
||||
|
@ -48,7 +48,7 @@ static inline unsigned eytzinger1_right_child(unsigned i)
|
||||
|
||||
static inline unsigned eytzinger1_first(unsigned size)
|
||||
{
|
||||
return rounddown_pow_of_two(size);
|
||||
return size ? rounddown_pow_of_two(size) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned eytzinger1_last(unsigned size)
|
||||
@ -101,7 +101,9 @@ static inline unsigned eytzinger1_prev(unsigned i, unsigned size)
|
||||
|
||||
static inline unsigned eytzinger1_extra(unsigned size)
|
||||
{
|
||||
return (size + 1 - rounddown_pow_of_two(size)) << 1;
|
||||
return size
|
||||
? (size + 1 - rounddown_pow_of_two(size)) << 1
|
||||
: 0;
|
||||
}
|
||||
|
||||
static inline unsigned __eytzinger1_to_inorder(unsigned i, unsigned size,
|
||||
|
@ -194,6 +194,12 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
|
||||
* discard_new_inode() expects it to be set...
|
||||
*/
|
||||
inode->v.i_flags |= I_NEW;
|
||||
/*
|
||||
* We don't want bch2_evict_inode() to delete the inode on disk,
|
||||
* we just raced and had another inode in cache. Normally new
|
||||
* inodes don't have nlink == 0 - except tmpfiles do...
|
||||
*/
|
||||
set_nlink(&inode->v, 1);
|
||||
discard_new_inode(&inode->v);
|
||||
inode = old;
|
||||
} else {
|
||||
@ -2026,6 +2032,8 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
|
||||
__bch2_fs_stop(c);
|
||||
deactivate_locked_super(sb);
|
||||
err:
|
||||
if (ret)
|
||||
pr_err("error: %s", bch2_err_str(ret));
|
||||
/*
|
||||
* On an inconsistency error in recovery we might see an -EROFS derived
|
||||
* errorcode (from the journal), but we don't want to return that to
|
||||
@ -2065,7 +2073,8 @@ int __init bch2_vfs_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT);
|
||||
bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT |
|
||||
SLAB_ACCOUNT);
|
||||
if (!bch2_inode_cache)
|
||||
goto err;
|
||||
|
||||
|
@ -389,7 +389,6 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
|
||||
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
k = bkey_i_to_s_c(sk.k);
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
if (!bch2_bkey_matches_ptr(c, k,
|
||||
rbio->pick.ptr,
|
||||
@ -1004,6 +1003,9 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
|
||||
rbio->promote = promote;
|
||||
INIT_WORK(&rbio->work, NULL);
|
||||
|
||||
if (flags & BCH_READ_NODECODE)
|
||||
orig->pick = pick;
|
||||
|
||||
rbio->bio.bi_opf = orig->bio.bi_opf;
|
||||
rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
|
||||
rbio->bio.bi_end_io = bch2_read_endio;
|
||||
|
@ -1095,7 +1095,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_dev_journal_alloc(struct bch_dev *ca)
|
||||
int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
|
||||
{
|
||||
unsigned nr;
|
||||
int ret;
|
||||
@ -1117,7 +1117,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
|
||||
min(1 << 13,
|
||||
(1 << 24) / ca->mi.bucket_size));
|
||||
|
||||
ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
|
||||
ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
|
||||
err:
|
||||
bch_err_fn(ca, ret);
|
||||
return ret;
|
||||
@ -1129,7 +1129,7 @@ int bch2_fs_journal_alloc(struct bch_fs *c)
|
||||
if (ca->journal.nr)
|
||||
continue;
|
||||
|
||||
int ret = bch2_dev_journal_alloc(ca);
|
||||
int ret = bch2_dev_journal_alloc(ca, true);
|
||||
if (ret) {
|
||||
percpu_ref_put(&ca->io_ref);
|
||||
return ret;
|
||||
@ -1184,9 +1184,11 @@ void bch2_fs_journal_stop(struct journal *j)
|
||||
journal_quiesce(j);
|
||||
cancel_delayed_work_sync(&j->write_work);
|
||||
|
||||
BUG_ON(!bch2_journal_error(j) &&
|
||||
test_bit(JOURNAL_replay_done, &j->flags) &&
|
||||
j->last_empty_seq != journal_cur_seq(j));
|
||||
WARN(!bch2_journal_error(j) &&
|
||||
test_bit(JOURNAL_replay_done, &j->flags) &&
|
||||
j->last_empty_seq != journal_cur_seq(j),
|
||||
"journal shutdown error: cur seq %llu but last empty seq %llu",
|
||||
journal_cur_seq(j), j->last_empty_seq);
|
||||
|
||||
if (!bch2_journal_error(j))
|
||||
clear_bit(JOURNAL_running, &j->flags);
|
||||
@ -1418,8 +1420,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
unsigned long now = jiffies;
|
||||
u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
|
||||
|
||||
if (!out->nr_tabstops)
|
||||
printbuf_tabstop_push(out, 28);
|
||||
printbuf_tabstops_reset(out);
|
||||
printbuf_tabstop_push(out, 28);
|
||||
out->atomic++;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -433,7 +433,7 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *);
|
||||
|
||||
int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
|
||||
unsigned nr);
|
||||
int bch2_dev_journal_alloc(struct bch_dev *);
|
||||
int bch2_dev_journal_alloc(struct bch_dev *, bool);
|
||||
int bch2_fs_journal_alloc(struct bch_fs *);
|
||||
|
||||
void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
|
||||
|
@ -415,6 +415,8 @@ static int journal_entry_btree_keys_validate(struct bch_fs *c,
|
||||
flags|BCH_VALIDATE_journal);
|
||||
if (ret == FSCK_DELETED_KEY)
|
||||
continue;
|
||||
else if (ret)
|
||||
return ret;
|
||||
|
||||
k = bkey_next(k);
|
||||
}
|
||||
@ -1762,11 +1764,13 @@ static CLOSURE_CALLBACK(journal_write_preflush)
|
||||
|
||||
if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
|
||||
spin_lock(&j->lock);
|
||||
closure_wait(&j->async_wait, cl);
|
||||
if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
|
||||
closure_wait(&j->async_wait, cl);
|
||||
spin_unlock(&j->lock);
|
||||
continue_at(cl, journal_write_preflush, j->wq);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&j->lock);
|
||||
|
||||
continue_at(cl, journal_write_preflush, j->wq);
|
||||
return;
|
||||
}
|
||||
|
||||
if (w->separate_flush) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user