mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-11 15:40:50 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
0d36938bb8
1
.get_maintainer.ignore
Normal file
1
.get_maintainer.ignore
Normal file
@ -0,0 +1 @@
|
||||
Christoph Hellwig <hch@lst.de>
|
@ -5850,6 +5850,7 @@ S: Odd Fixes
|
||||
|
||||
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
|
||||
M: "J. Bruce Fields" <bfields@fieldses.org>
|
||||
M: Jeff Layton <jlayton@poochiereds.net>
|
||||
L: linux-nfs@vger.kernel.org
|
||||
W: http://nfs.sourceforge.net/
|
||||
S: Supported
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
|
||||
|
||||
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
|
||||
|
||||
bootpImage uImage: zImage
|
||||
zImage: Image
|
||||
|
||||
$(BOOT_TARGETS): vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
|
@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||
}
|
||||
|
||||
/* the mmap semaphore is taken only if not in an atomic context */
|
||||
atomic = in_atomic();
|
||||
atomic = faulthandler_disabled();
|
||||
|
||||
if (!atomic)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
|
||||
.irq_mask = wakeupgen_mask,
|
||||
.irq_unmask = wakeupgen_unmask,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_type = irq_chip_set_type_parent,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
|
@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
||||
inject_abt32(vcpu, false, addr);
|
||||
|
||||
inject_abt64(vcpu, false, addr);
|
||||
else
|
||||
inject_abt64(vcpu, false, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
||||
inject_abt32(vcpu, true, addr);
|
||||
|
||||
inject_abt64(vcpu, true, addr);
|
||||
else
|
||||
inject_abt64(vcpu, true, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
||||
inject_undef32(vcpu);
|
||||
|
||||
inject_undef64(vcpu);
|
||||
else
|
||||
inject_undef64(vcpu);
|
||||
}
|
||||
|
@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
||||
.set noat
|
||||
SAVE_ALL
|
||||
FEXPORT(handle_\exception\ext)
|
||||
__BUILD_clear_\clear
|
||||
__build_clear_\clear
|
||||
.set at
|
||||
__BUILD_\verbose \exception
|
||||
move a0, sp
|
||||
|
@ -191,6 +191,9 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
|
||||
|
||||
pci_device_add(dev, bus);
|
||||
|
||||
/* Setup MSI caps & disable MSI/MSI-X interrupts */
|
||||
pci_msi_setup_pci_dev(dev);
|
||||
|
||||
return dev;
|
||||
}
|
||||
EXPORT_SYMBOL(of_create_pci_dev);
|
||||
|
@ -79,12 +79,12 @@ do { \
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
/* frame pointer must be last for get_wchan */
|
||||
#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
|
||||
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
|
||||
|
||||
#define __EXTRA_CLOBBER \
|
||||
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
|
||||
"r12", "r13", "r14", "r15", "flags"
|
||||
"r12", "r13", "r14", "r15"
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
#define __switch_canary \
|
||||
@ -100,11 +100,7 @@ do { \
|
||||
#define __switch_canary_iparam
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
|
||||
/*
|
||||
* There is no need to save or restore flags, because flags are always
|
||||
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
|
||||
* has no effect.
|
||||
*/
|
||||
/* Save restore flags to clear handle leaking NT */
|
||||
#define switch_to(prev, next, last) \
|
||||
asm volatile(SAVE_CONTEXT \
|
||||
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
|
||||
|
@ -1424,7 +1424,7 @@ static inline void __x2apic_disable(void)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
if (cpu_has_apic)
|
||||
if (!cpu_has_apic)
|
||||
return;
|
||||
|
||||
rdmsrl(MSR_IA32_APICBASE, msr);
|
||||
@ -1483,10 +1483,13 @@ void x2apic_setup(void)
|
||||
|
||||
static __init void x2apic_disable(void)
|
||||
{
|
||||
u32 x2apic_id;
|
||||
u32 x2apic_id, state = x2apic_state;
|
||||
|
||||
if (x2apic_state != X2APIC_ON)
|
||||
goto out;
|
||||
x2apic_mode = 0;
|
||||
x2apic_state = X2APIC_DISABLED;
|
||||
|
||||
if (state != X2APIC_ON)
|
||||
return;
|
||||
|
||||
x2apic_id = read_apic_id();
|
||||
if (x2apic_id >= 255)
|
||||
@ -1494,9 +1497,6 @@ static __init void x2apic_disable(void)
|
||||
|
||||
__x2apic_disable();
|
||||
register_lapic_address(mp_lapic_addr);
|
||||
out:
|
||||
x2apic_state = X2APIC_DISABLED;
|
||||
x2apic_mode = 0;
|
||||
}
|
||||
|
||||
static __init void x2apic_enable(void)
|
||||
|
@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
||||
irq_data->chip = &lapic_controller;
|
||||
irq_data->chip_data = data;
|
||||
irq_data->hwirq = virq + i;
|
||||
err = assign_irq_vector_policy(virq, irq_data->node, data,
|
||||
err = assign_irq_vector_policy(virq + i, irq_data->node, data,
|
||||
info);
|
||||
if (err)
|
||||
goto error;
|
||||
|
@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
||||
dst_fpu->fpregs_active = 0;
|
||||
dst_fpu->last_cpu = -1;
|
||||
|
||||
if (src_fpu->fpstate_active)
|
||||
if (src_fpu->fpstate_active && cpu_has_fpu)
|
||||
fpu_copy(dst_fpu, src_fpu);
|
||||
|
||||
return 0;
|
||||
|
@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
|
||||
write_cr0(cr0);
|
||||
|
||||
/* Flush out any pending x87 state: */
|
||||
asm volatile ("fninit");
|
||||
#ifdef CONFIG_MATH_EMULATION
|
||||
if (!cpu_has_fpu)
|
||||
fpstate_init_soft(¤t->thread.fpu.state.soft);
|
||||
else
|
||||
#endif
|
||||
asm volatile ("fninit");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
||||
static void mwait_idle(void)
|
||||
{
|
||||
if (!current_set_polling_and_test()) {
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
|
||||
smp_mb(); /* quirk */
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
@ -419,6 +420,7 @@ static void mwait_idle(void)
|
||||
__sti_mwait(0, 0);
|
||||
else
|
||||
local_irq_enable();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
} else {
|
||||
local_irq_enable();
|
||||
}
|
||||
|
@ -702,11 +702,11 @@ static ssize_t flags_show(struct device *dev,
|
||||
u16 flags = to_nfit_memdev(dev)->flags;
|
||||
|
||||
return sprintf(buf, "%s%s%s%s%s\n",
|
||||
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
|
||||
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
|
||||
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
|
||||
flags & ACPI_NFIT_MEM_ARMED ? "arm " : "",
|
||||
flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : "");
|
||||
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
|
||||
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
|
||||
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
|
||||
flags & ACPI_NFIT_MEM_ARMED ? "not_armed " : "",
|
||||
flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
|
||||
}
|
||||
static DEVICE_ATTR_RO(flags);
|
||||
|
||||
@ -849,12 +849,12 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
|
||||
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
|
||||
continue;
|
||||
|
||||
dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n",
|
||||
dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
|
||||
nvdimm_name(nvdimm),
|
||||
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
|
||||
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
|
||||
mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
|
||||
mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : "");
|
||||
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
|
||||
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
|
||||
mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
|
||||
mem_flags & ACPI_NFIT_MEM_ARMED ? " not_armed" : "");
|
||||
|
||||
}
|
||||
|
||||
@ -1024,7 +1024,7 @@ static void wmb_blk(struct nfit_blk *nfit_blk)
|
||||
wmb_pmem();
|
||||
}
|
||||
|
||||
static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
|
||||
static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
|
||||
{
|
||||
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
|
||||
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
|
||||
@ -1032,7 +1032,7 @@ static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
|
||||
if (mmio->num_lines)
|
||||
offset = to_interleave_offset(offset, mmio);
|
||||
|
||||
return readq(mmio->base + offset);
|
||||
return readl(mmio->base + offset);
|
||||
}
|
||||
|
||||
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
|
||||
|
@ -3756,6 +3756,14 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
|
||||
|
||||
/*
|
||||
* For flush requests, request_idx starts at the end of the
|
||||
* tag space. Since we don't support FLUSH/FUA, simply return
|
||||
* 0 as there's nothing to be done.
|
||||
*/
|
||||
if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
|
||||
return 0;
|
||||
|
||||
cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
|
||||
&cmd->command_dma, GFP_KERNEL);
|
||||
if (!cmd->command)
|
||||
|
@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
|
||||
BUG_ON(!imxtm->base);
|
||||
|
||||
imxtm->type = type;
|
||||
imxtm->irq = irq;
|
||||
|
||||
_mxc_timer_init(imxtm);
|
||||
}
|
||||
|
@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
|
||||
}
|
||||
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
|
||||
|
||||
MODULE_LICENSE("GPLv2");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
|
@ -1075,34 +1075,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
||||
const union child_device_config *p_child;
|
||||
union child_device_config *child_dev_ptr;
|
||||
int i, child_device_num, count;
|
||||
u8 expected_size;
|
||||
u16 block_size;
|
||||
u16 block_size;
|
||||
|
||||
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
|
||||
if (!p_defs) {
|
||||
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
|
||||
return;
|
||||
}
|
||||
if (bdb->version < 195) {
|
||||
expected_size = 33;
|
||||
} else if (bdb->version == 195) {
|
||||
expected_size = 37;
|
||||
} else if (bdb->version <= 197) {
|
||||
expected_size = 38;
|
||||
} else {
|
||||
expected_size = 38;
|
||||
DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
|
||||
expected_size, bdb->version);
|
||||
}
|
||||
|
||||
if (expected_size > sizeof(*p_child)) {
|
||||
DRM_ERROR("child_device_config cannot fit in p_child\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (p_defs->child_dev_size != expected_size) {
|
||||
DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
|
||||
p_defs->child_dev_size, expected_size, bdb->version);
|
||||
if (p_defs->child_dev_size < sizeof(*p_child)) {
|
||||
DRM_ERROR("General definiton block child device size is too small.\n");
|
||||
return;
|
||||
}
|
||||
/* get the block size of general definitions */
|
||||
@ -1149,7 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
||||
|
||||
child_dev_ptr = dev_priv->vbt.child_dev + count;
|
||||
count++;
|
||||
memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
|
||||
memcpy(child_dev_ptr, p_child, sizeof(*p_child));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
|
||||
|
||||
static const int skl_rates[] = { 162000, 216000, 270000,
|
||||
324000, 432000, 540000 };
|
||||
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
|
||||
243000, 270000, 324000, 405000,
|
||||
420000, 432000, 540000 };
|
||||
static const int default_rates[] = { 162000, 270000, 540000 };
|
||||
|
||||
/**
|
||||
@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
|
||||
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
|
||||
}
|
||||
|
||||
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
|
||||
{
|
||||
/* WaDisableHBR2:skl */
|
||||
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
|
||||
return false;
|
||||
|
||||
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
|
||||
(INTEL_INFO(dev)->gen >= 9))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
|
||||
{
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
*source_rates = skl_rates;
|
||||
return ARRAY_SIZE(skl_rates);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
*source_rates = chv_rates;
|
||||
return ARRAY_SIZE(chv_rates);
|
||||
}
|
||||
|
||||
*source_rates = default_rates;
|
||||
|
||||
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
|
||||
/* WaDisableHBR2:skl */
|
||||
return (DP_LINK_BW_2_7 >> 3) + 1;
|
||||
else if (INTEL_INFO(dev)->gen >= 8 ||
|
||||
(IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
|
||||
/* This depends on the fact that 5.4 is last value in the array */
|
||||
if (intel_dp_source_supports_hbr2(dev))
|
||||
return (DP_LINK_BW_5_4 >> 3) + 1;
|
||||
else
|
||||
return (DP_LINK_BW_2_7 >> 3) + 1;
|
||||
@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
||||
}
|
||||
}
|
||||
|
||||
/* Training Pattern 3 support, both source and sink */
|
||||
/* Training Pattern 3 support, Intel platforms that support HBR2 alone
|
||||
* have support for TP3 hence that check is used along with dpcd check
|
||||
* to ensure TP3 can be enabled.
|
||||
* SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
|
||||
* supported but still not enabled.
|
||||
*/
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
|
||||
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
|
||||
(IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
|
||||
intel_dp_source_supports_hbr2(dev)) {
|
||||
intel_dp->use_tps3 = true;
|
||||
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
|
||||
} else
|
||||
|
@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
|
||||
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
|
||||
if (ret)
|
||||
goto unpin_ctx_obj;
|
||||
|
||||
ctx_obj->dirty = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
|
||||
/* we can race here at startup, some boards seem to trigger
|
||||
* hotplug irqs when they shouldn't. */
|
||||
if (!rdev->mode_info.mode_config_initialized)
|
||||
return;
|
||||
|
||||
mutex_lock(&mode_config->mutex);
|
||||
if (mode_config->num_connector) {
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head)
|
||||
|
@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
|
||||
* convert it to descriptor.
|
||||
*/
|
||||
if (!button->gpiod && gpio_is_valid(button->gpio)) {
|
||||
unsigned flags = 0;
|
||||
unsigned flags = GPIOF_IN;
|
||||
|
||||
if (button->active_low)
|
||||
flags |= GPIOF_ACTIVE_LOW;
|
||||
|
@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
|
||||
.irq_mask = irq_chip_mask_parent,
|
||||
.irq_unmask = irq_chip_unmask_parent,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_wake = irq_chip_set_wake_parent,
|
||||
.irq_set_type = irq_chip_set_type_parent,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND |
|
||||
IRQCHIP_SKIP_SET_WAKE,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
#endif
|
||||
|
@ -240,7 +240,7 @@ config DVB_SI21XX
|
||||
|
||||
config DVB_TS2020
|
||||
tristate "Montage Tehnology TS2020 based tuners"
|
||||
depends on DVB_CORE
|
||||
depends on DVB_CORE && I2C
|
||||
select REGMAP_I2C
|
||||
default m if !MEDIA_SUBDRV_AUTOSELECT
|
||||
help
|
||||
|
@ -2,6 +2,7 @@ config VIDEO_COBALT
|
||||
tristate "Cisco Cobalt support"
|
||||
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
|
||||
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
|
||||
depends on SND
|
||||
select I2C_ALGOBIT
|
||||
select VIDEO_ADV7604
|
||||
select VIDEO_ADV7511
|
||||
|
@ -139,7 +139,7 @@ done:
|
||||
also know about dropped frames. */
|
||||
cb->vb.v4l2_buf.sequence = s->sequence++;
|
||||
vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
|
||||
VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
|
||||
VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
|
||||
}
|
||||
|
||||
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
|
||||
|
@ -130,10 +130,11 @@ err:
|
||||
|
||||
int mantis_dma_init(struct mantis_pci *mantis)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
|
||||
if (mantis_alloc_buffers(mantis) < 0) {
|
||||
err = mantis_alloc_buffers(mantis);
|
||||
if (err < 0) {
|
||||
dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
|
||||
|
||||
/* Stop RISC Engine */
|
||||
|
@ -184,125 +184,9 @@ out:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct ir_raw_timings_manchester ir_rc5_timings = {
|
||||
.leader = RC5_UNIT,
|
||||
.pulse_space_start = 0,
|
||||
.clock = RC5_UNIT,
|
||||
.trailer_space = RC5_UNIT * 10,
|
||||
};
|
||||
|
||||
static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
|
||||
{
|
||||
.leader = RC5_UNIT,
|
||||
.pulse_space_start = 0,
|
||||
.clock = RC5_UNIT,
|
||||
.trailer_space = RC5X_SPACE,
|
||||
},
|
||||
{
|
||||
.clock = RC5_UNIT,
|
||||
.trailer_space = RC5_UNIT * 10,
|
||||
},
|
||||
};
|
||||
|
||||
static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
|
||||
.leader = RC5_UNIT,
|
||||
.pulse_space_start = 0,
|
||||
.clock = RC5_UNIT,
|
||||
.trailer_space = RC5_UNIT * 10,
|
||||
};
|
||||
|
||||
static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
|
||||
unsigned int important_bits)
|
||||
{
|
||||
/* all important bits of scancode should be set in mask */
|
||||
if (~scancode->mask & important_bits)
|
||||
return -EINVAL;
|
||||
/* extra bits in mask should be zero in data */
|
||||
if (scancode->mask & scancode->data & ~important_bits)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ir_rc5_encode() - Encode a scancode as a stream of raw events
|
||||
*
|
||||
* @protocols: allowed protocols
|
||||
* @scancode: scancode filter describing scancode (helps distinguish between
|
||||
* protocol subtypes when scancode is ambiguous)
|
||||
* @events: array of raw ir events to write into
|
||||
* @max: maximum size of @events
|
||||
*
|
||||
* Returns: The number of events written.
|
||||
* -ENOBUFS if there isn't enough space in the array to fit the
|
||||
* encoding. In this case all @max events will have been written.
|
||||
* -EINVAL if the scancode is ambiguous or invalid.
|
||||
*/
|
||||
static int ir_rc5_encode(u64 protocols,
|
||||
const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max)
|
||||
{
|
||||
int ret;
|
||||
struct ir_raw_event *e = events;
|
||||
unsigned int data, xdata, command, commandx, system;
|
||||
|
||||
/* Detect protocol and convert scancode to raw data */
|
||||
if (protocols & RC_BIT_RC5 &&
|
||||
!ir_rc5_validate_filter(scancode, 0x1f7f)) {
|
||||
/* decode scancode */
|
||||
command = (scancode->data & 0x003f) >> 0;
|
||||
commandx = (scancode->data & 0x0040) >> 6;
|
||||
system = (scancode->data & 0x1f00) >> 8;
|
||||
/* encode data */
|
||||
data = !commandx << 12 | system << 6 | command;
|
||||
|
||||
/* Modulate the data */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
|
||||
data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (protocols & RC_BIT_RC5X &&
|
||||
!ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
|
||||
/* decode scancode */
|
||||
xdata = (scancode->data & 0x00003f) >> 0;
|
||||
command = (scancode->data & 0x003f00) >> 8;
|
||||
commandx = (scancode->data & 0x004000) >> 14;
|
||||
system = (scancode->data & 0x1f0000) >> 16;
|
||||
/* commandx and system overlap, bits must match when encoded */
|
||||
if (commandx == (system & 0x1))
|
||||
return -EINVAL;
|
||||
/* encode data */
|
||||
data = 1 << 18 | system << 12 | command << 6 | xdata;
|
||||
|
||||
/* Modulate the data */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
|
||||
CHECK_RC5X_NBITS,
|
||||
data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc5x_timings[1],
|
||||
RC5X_NBITS - CHECK_RC5X_NBITS,
|
||||
data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (protocols & RC_BIT_RC5_SZ &&
|
||||
!ir_rc5_validate_filter(scancode, 0x2fff)) {
|
||||
/* RC5-SZ scancode is raw enough for Manchester as it is */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
|
||||
RC5_SZ_NBITS, scancode->data & 0x2fff);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return e - events;
|
||||
}
|
||||
|
||||
static struct ir_raw_handler rc5_handler = {
|
||||
.protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
|
||||
.decode = ir_rc5_decode,
|
||||
.encode = ir_rc5_encode,
|
||||
};
|
||||
|
||||
static int __init ir_rc5_decode_init(void)
|
||||
|
@ -291,133 +291,11 @@ out:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
|
||||
{
|
||||
.leader = RC6_PREFIX_PULSE,
|
||||
.pulse_space_start = 0,
|
||||
.clock = RC6_UNIT,
|
||||
.invert = 1,
|
||||
.trailer_space = RC6_PREFIX_SPACE,
|
||||
},
|
||||
{
|
||||
.clock = RC6_UNIT,
|
||||
.invert = 1,
|
||||
},
|
||||
{
|
||||
.clock = RC6_UNIT * 2,
|
||||
.invert = 1,
|
||||
},
|
||||
{
|
||||
.clock = RC6_UNIT,
|
||||
.invert = 1,
|
||||
.trailer_space = RC6_SUFFIX_SPACE,
|
||||
},
|
||||
};
|
||||
|
||||
static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
|
||||
unsigned int important_bits)
|
||||
{
|
||||
/* all important bits of scancode should be set in mask */
|
||||
if (~scancode->mask & important_bits)
|
||||
return -EINVAL;
|
||||
/* extra bits in mask should be zero in data */
|
||||
if (scancode->mask & scancode->data & ~important_bits)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ir_rc6_encode() - Encode a scancode as a stream of raw events
|
||||
*
|
||||
* @protocols: allowed protocols
|
||||
* @scancode: scancode filter describing scancode (helps distinguish between
|
||||
* protocol subtypes when scancode is ambiguous)
|
||||
* @events: array of raw ir events to write into
|
||||
* @max: maximum size of @events
|
||||
*
|
||||
* Returns: The number of events written.
|
||||
* -ENOBUFS if there isn't enough space in the array to fit the
|
||||
* encoding. In this case all @max events will have been written.
|
||||
* -EINVAL if the scancode is ambiguous or invalid.
|
||||
*/
|
||||
static int ir_rc6_encode(u64 protocols,
|
||||
const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max)
|
||||
{
|
||||
int ret;
|
||||
struct ir_raw_event *e = events;
|
||||
|
||||
if (protocols & RC_BIT_RC6_0 &&
|
||||
!ir_rc6_validate_filter(scancode, 0xffff)) {
|
||||
|
||||
/* Modulate the preamble */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate the header (Start Bit & Mode-0) */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[1],
|
||||
RC6_HEADER_NBITS, (1 << 3));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate Trailer Bit */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[2], 1, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate rest of the data */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[3], RC6_0_NBITS,
|
||||
scancode->data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
} else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
|
||||
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
|
||||
!ir_rc6_validate_filter(scancode, 0x8fffffff)) {
|
||||
|
||||
/* Modulate the preamble */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate the header (Start Bit & Header-version 6 */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[1],
|
||||
RC6_HEADER_NBITS, (1 << 3 | 6));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate Trailer Bit */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[2], 1, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate rest of the data */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[3],
|
||||
fls(scancode->mask),
|
||||
scancode->data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return e - events;
|
||||
}
|
||||
|
||||
static struct ir_raw_handler rc6_handler = {
|
||||
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
|
||||
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
|
||||
RC_BIT_RC6_MCE,
|
||||
.decode = ir_rc6_decode,
|
||||
.encode = ir_rc6_encode,
|
||||
};
|
||||
|
||||
static int __init ir_rc6_decode_init(void)
|
||||
|
@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvt_write_wakeup_codes(struct rc_dev *dev,
|
||||
const u8 *wakeup_sample_buf, int count)
|
||||
{
|
||||
int i = 0;
|
||||
u8 reg, reg_learn_mode;
|
||||
unsigned long flags;
|
||||
struct nvt_dev *nvt = dev->priv;
|
||||
|
||||
nvt_dbg_wake("writing wakeup samples");
|
||||
|
||||
reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
|
||||
reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
|
||||
reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
|
||||
|
||||
/* Lock the learn area to prevent racing with wake-isr */
|
||||
spin_lock_irqsave(&nvt->nvt_lock, flags);
|
||||
|
||||
/* Enable fifo writes */
|
||||
nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
|
||||
|
||||
/* Clear cir wake rx fifo */
|
||||
nvt_clear_cir_wake_fifo(nvt);
|
||||
|
||||
if (count > WAKE_FIFO_LEN) {
|
||||
nvt_dbg_wake("HW FIFO too small for all wake samples");
|
||||
count = WAKE_FIFO_LEN;
|
||||
}
|
||||
|
||||
if (count)
|
||||
pr_info("Wake samples (%d) =", count);
|
||||
else
|
||||
pr_info("Wake sample fifo cleared");
|
||||
|
||||
/* Write wake samples to fifo */
|
||||
for (i = 0; i < count; i++) {
|
||||
pr_cont(" %02x", wakeup_sample_buf[i]);
|
||||
nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
|
||||
CIR_WAKE_WR_FIFO_DATA);
|
||||
}
|
||||
pr_cont("\n");
|
||||
|
||||
/* Switch cir to wakeup mode and disable fifo writing */
|
||||
nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
|
||||
|
||||
/* Set number of bytes needed for wake */
|
||||
nvt_cir_wake_reg_write(nvt, count ? count :
|
||||
CIR_WAKE_FIFO_CMP_BYTES,
|
||||
CIR_WAKE_FIFO_CMP_DEEP);
|
||||
|
||||
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
|
||||
struct rc_scancode_filter *sc_filter)
|
||||
{
|
||||
u8 *reg_buf;
|
||||
u8 buf_val;
|
||||
int i, ret, count;
|
||||
unsigned int val;
|
||||
struct ir_raw_event *raw;
|
||||
bool complete;
|
||||
|
||||
/* Require both mask and data to be set before actually committing */
|
||||
if (!sc_filter->mask || !sc_filter->data)
|
||||
return 0;
|
||||
|
||||
raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
|
||||
if (!raw)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
|
||||
raw, WAKE_FIFO_LEN);
|
||||
complete = (ret != -ENOBUFS);
|
||||
if (!complete)
|
||||
ret = WAKE_FIFO_LEN;
|
||||
else if (ret < 0)
|
||||
goto out_raw;
|
||||
|
||||
reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
|
||||
if (!reg_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_raw;
|
||||
}
|
||||
|
||||
/* Inspect the ir samples */
|
||||
for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
|
||||
val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
|
||||
|
||||
/* Split too large values into several smaller ones */
|
||||
while (val > 0 && count < WAKE_FIFO_LEN) {
|
||||
|
||||
/* Skip last value for better comparison tolerance */
|
||||
if (complete && i == ret - 1 && val < BUF_LEN_MASK)
|
||||
break;
|
||||
|
||||
/* Clamp values to BUF_LEN_MASK at most */
|
||||
buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
|
||||
|
||||
reg_buf[count] = buf_val;
|
||||
val -= buf_val;
|
||||
if ((raw[i]).pulse)
|
||||
reg_buf[count] |= BUF_PULSE_BIT;
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
ret = nvt_write_wakeup_codes(dev, reg_buf, count);
|
||||
|
||||
kfree(reg_buf);
|
||||
out_raw:
|
||||
kfree(raw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Dummy implementation. nuvoton is agnostic to the protocol used */
|
||||
static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
|
||||
u64 *rc_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* nvt_tx_ir
|
||||
*
|
||||
@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
|
||||
/* Set up the rc device */
|
||||
rdev->priv = nvt;
|
||||
rdev->driver_type = RC_DRIVER_IR_RAW;
|
||||
rdev->encode_wakeup = true;
|
||||
rdev->allowed_protocols = RC_BIT_ALL;
|
||||
rdev->open = nvt_open;
|
||||
rdev->close = nvt_close;
|
||||
rdev->tx_ir = nvt_tx_ir;
|
||||
rdev->s_tx_carrier = nvt_set_tx_carrier;
|
||||
rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
|
||||
rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
|
||||
rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
|
||||
rdev->input_phys = "nuvoton/cir0";
|
||||
rdev->input_id.bustype = BUS_HOST;
|
||||
|
@ -63,7 +63,6 @@ static int debug;
|
||||
*/
|
||||
#define TX_BUF_LEN 256
|
||||
#define RX_BUF_LEN 32
|
||||
#define WAKE_FIFO_LEN 67
|
||||
|
||||
struct nvt_dev {
|
||||
struct pnp_dev *pdev;
|
||||
|
@ -25,8 +25,6 @@ struct ir_raw_handler {
|
||||
|
||||
u64 protocols; /* which are handled by this handler */
|
||||
int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
|
||||
int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max);
|
||||
|
||||
/* These two should only be used by the lirc decoder */
|
||||
int (*raw_register)(struct rc_dev *dev);
|
||||
@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
|
||||
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
|
||||
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
|
||||
|
||||
/* functions for IR encoders */
|
||||
|
||||
static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
|
||||
unsigned int pulse,
|
||||
u32 duration)
|
||||
{
|
||||
init_ir_raw_event(ev);
|
||||
ev->duration = duration;
|
||||
ev->pulse = pulse;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct ir_raw_timings_manchester - Manchester coding timings
|
||||
* @leader: duration of leader pulse (if any) 0 if continuing
|
||||
* existing signal (see @pulse_space_start)
|
||||
* @pulse_space_start: 1 for starting with pulse (0 for starting with space)
|
||||
* @clock: duration of each pulse/space in ns
|
||||
* @invert: if set clock logic is inverted
|
||||
* (0 = space + pulse, 1 = pulse + space)
|
||||
* @trailer_space: duration of trailer space in ns
|
||||
*/
|
||||
struct ir_raw_timings_manchester {
|
||||
unsigned int leader;
|
||||
unsigned int pulse_space_start:1;
|
||||
unsigned int clock;
|
||||
unsigned int invert:1;
|
||||
unsigned int trailer_space;
|
||||
};
|
||||
|
||||
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
|
||||
const struct ir_raw_timings_manchester *timings,
|
||||
unsigned int n, unsigned int data);
|
||||
|
||||
/*
|
||||
* Routines from rc-raw.c to be used internally and by decoders
|
||||
*/
|
||||
u64 ir_raw_get_allowed_protocols(void);
|
||||
u64 ir_raw_get_encode_protocols(void);
|
||||
int ir_raw_event_register(struct rc_dev *dev);
|
||||
void ir_raw_event_unregister(struct rc_dev *dev);
|
||||
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
|
||||
|
@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
|
||||
static DEFINE_MUTEX(ir_raw_handler_lock);
|
||||
static LIST_HEAD(ir_raw_handler_list);
|
||||
static u64 available_protocols;
|
||||
static u64 encode_protocols;
|
||||
|
||||
static int ir_raw_event_thread(void *data)
|
||||
{
|
||||
@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
|
||||
return protocols;
|
||||
}
|
||||
|
||||
/* used internally by the sysfs interface */
|
||||
u64
|
||||
ir_raw_get_encode_protocols(void)
|
||||
{
|
||||
u64 protocols;
|
||||
|
||||
mutex_lock(&ir_raw_handler_lock);
|
||||
protocols = encode_protocols;
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
return protocols;
|
||||
}
|
||||
|
||||
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
|
||||
{
|
||||
/* the caller will update dev->enabled_protocols */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
|
||||
* @ev: Pointer to pointer to next free event. *@ev is incremented for
|
||||
* each raw event filled.
|
||||
* @max: Maximum number of raw events to fill.
|
||||
* @timings: Manchester modulation timings.
|
||||
* @n: Number of bits of data.
|
||||
* @data: Data bits to encode.
|
||||
*
|
||||
* Encodes the @n least significant bits of @data using Manchester (bi-phase)
|
||||
* modulation with the timing characteristics described by @timings, writing up
|
||||
* to @max raw IR events using the *@ev pointer.
|
||||
*
|
||||
* Returns: 0 on success.
|
||||
* -ENOBUFS if there isn't enough space in the array to fit the
|
||||
* full encoded data. In this case all @max events will have been
|
||||
* written.
|
||||
*/
|
||||
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
|
||||
const struct ir_raw_timings_manchester *timings,
|
||||
unsigned int n, unsigned int data)
|
||||
{
|
||||
bool need_pulse;
|
||||
unsigned int i;
|
||||
int ret = -ENOBUFS;
|
||||
|
||||
i = 1 << (n - 1);
|
||||
|
||||
if (timings->leader) {
|
||||
if (!max--)
|
||||
return ret;
|
||||
if (timings->pulse_space_start) {
|
||||
init_ir_raw_event_duration((*ev)++, 1, timings->leader);
|
||||
|
||||
if (!max--)
|
||||
return ret;
|
||||
init_ir_raw_event_duration((*ev), 0, timings->leader);
|
||||
} else {
|
||||
init_ir_raw_event_duration((*ev), 1, timings->leader);
|
||||
}
|
||||
i >>= 1;
|
||||
} else {
|
||||
/* continue existing signal */
|
||||
--(*ev);
|
||||
}
|
||||
/* from here on *ev will point to the last event rather than the next */
|
||||
|
||||
while (n && i > 0) {
|
||||
need_pulse = !(data & i);
|
||||
if (timings->invert)
|
||||
need_pulse = !need_pulse;
|
||||
if (need_pulse == !!(*ev)->pulse) {
|
||||
(*ev)->duration += timings->clock;
|
||||
} else {
|
||||
if (!max--)
|
||||
goto nobufs;
|
||||
init_ir_raw_event_duration(++(*ev), need_pulse,
|
||||
timings->clock);
|
||||
}
|
||||
|
||||
if (!max--)
|
||||
goto nobufs;
|
||||
init_ir_raw_event_duration(++(*ev), !need_pulse,
|
||||
timings->clock);
|
||||
i >>= 1;
|
||||
}
|
||||
|
||||
if (timings->trailer_space) {
|
||||
if (!(*ev)->pulse)
|
||||
(*ev)->duration += timings->trailer_space;
|
||||
else if (!max--)
|
||||
goto nobufs;
|
||||
else
|
||||
init_ir_raw_event_duration(++(*ev), 0,
|
||||
timings->trailer_space);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
nobufs:
|
||||
/* point to the next event rather than last event before returning */
|
||||
++(*ev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ir_raw_gen_manchester);
|
||||
|
||||
/**
|
||||
* ir_raw_encode_scancode() - Encode a scancode as raw events
|
||||
*
|
||||
* @protocols: permitted protocols
|
||||
* @scancode: scancode filter describing a single scancode
|
||||
* @events: array of raw events to write into
|
||||
* @max: max number of raw events
|
||||
*
|
||||
* Attempts to encode the scancode as raw events.
|
||||
*
|
||||
* Returns: The number of events written.
|
||||
* -ENOBUFS if there isn't enough space in the array to fit the
|
||||
* encoding. In this case all @max events will have been written.
|
||||
* -EINVAL if the scancode is ambiguous or invalid, or if no
|
||||
* compatible encoder was found.
|
||||
*/
|
||||
int ir_raw_encode_scancode(u64 protocols,
|
||||
const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max)
|
||||
{
|
||||
struct ir_raw_handler *handler;
|
||||
int ret = -EINVAL;
|
||||
|
||||
mutex_lock(&ir_raw_handler_lock);
|
||||
list_for_each_entry(handler, &ir_raw_handler_list, list) {
|
||||
if (handler->protocols & protocols && handler->encode) {
|
||||
ret = handler->encode(protocols, scancode, events, max);
|
||||
if (ret >= 0 || ret == -ENOBUFS)
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ir_raw_encode_scancode);
|
||||
|
||||
/*
|
||||
* Used to (un)register raw event clients
|
||||
*/
|
||||
@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
|
||||
list_for_each_entry(raw, &ir_raw_client_list, list)
|
||||
ir_raw_handler->raw_register(raw->dev);
|
||||
available_protocols |= ir_raw_handler->protocols;
|
||||
if (ir_raw_handler->encode)
|
||||
encode_protocols |= ir_raw_handler->protocols;
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
|
||||
return 0;
|
||||
@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
|
||||
list_for_each_entry(raw, &ir_raw_client_list, list)
|
||||
ir_raw_handler->raw_unregister(raw->dev);
|
||||
available_protocols &= ~ir_raw_handler->protocols;
|
||||
if (ir_raw_handler->encode)
|
||||
encode_protocols &= ~ir_raw_handler->protocols;
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ir_raw_handler_unregister);
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <media/rc-core.h>
|
||||
|
||||
#define DRIVER_NAME "rc-loopback"
|
||||
@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int loop_set_wakeup_filter(struct rc_dev *dev,
|
||||
struct rc_scancode_filter *sc_filter)
|
||||
{
|
||||
static const unsigned int max = 512;
|
||||
struct ir_raw_event *raw;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* fine to disable filter */
|
||||
if (!sc_filter->mask)
|
||||
return 0;
|
||||
|
||||
/* encode the specified filter and loop it back */
|
||||
raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
|
||||
ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
|
||||
raw, max);
|
||||
/* still loop back the partial raw IR even if it's incomplete */
|
||||
if (ret == -ENOBUFS)
|
||||
ret = max;
|
||||
if (ret >= 0) {
|
||||
/* do the loopback */
|
||||
for (i = 0; i < ret; ++i)
|
||||
ir_raw_event_store(dev, &raw[i]);
|
||||
ir_raw_event_handle(dev);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
kfree(raw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init loop_init(void)
|
||||
{
|
||||
struct rc_dev *rc;
|
||||
@ -229,7 +195,6 @@ static int __init loop_init(void)
|
||||
rc->map_name = RC_MAP_EMPTY;
|
||||
rc->priv = &loopdev;
|
||||
rc->driver_type = RC_DRIVER_IR_RAW;
|
||||
rc->encode_wakeup = true;
|
||||
rc->allowed_protocols = RC_BIT_ALL;
|
||||
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
|
||||
rc->min_timeout = 1;
|
||||
@ -244,7 +209,6 @@ static int __init loop_init(void)
|
||||
rc->s_idle = loop_set_idle;
|
||||
rc->s_learning_mode = loop_set_learning_mode;
|
||||
rc->s_carrier_report = loop_set_carrier_report;
|
||||
rc->s_wakeup_filter = loop_set_wakeup_filter;
|
||||
|
||||
loopdev.txmask = RXMASK_REGULAR;
|
||||
loopdev.txcarrier = 36000;
|
||||
|
@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
|
||||
} else {
|
||||
enabled = dev->enabled_wakeup_protocols;
|
||||
allowed = dev->allowed_wakeup_protocols;
|
||||
if (dev->encode_wakeup && !allowed)
|
||||
allowed = ir_raw_get_encode_protocols();
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->lock);
|
||||
@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
|
||||
path ? path : "N/A");
|
||||
kfree(path);
|
||||
|
||||
if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
|
||||
if (dev->driver_type == RC_DRIVER_IR_RAW) {
|
||||
/* Load raw decoders, if they aren't already */
|
||||
if (!raw_init) {
|
||||
IR_dprintk(1, "Loading raw decoders\n");
|
||||
ir_raw_init();
|
||||
raw_init = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev->driver_type == RC_DRIVER_IR_RAW) {
|
||||
/* calls ir_register_device so unlock mutex here*/
|
||||
mutex_unlock(&dev->lock);
|
||||
rc = ir_raw_event_register(dev);
|
||||
|
@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
|
||||
break;
|
||||
case VB2_BUF_STATE_PREPARING:
|
||||
case VB2_BUF_STATE_DEQUEUED:
|
||||
case VB2_BUF_STATE_REQUEUEING:
|
||||
/* nothing */
|
||||
break;
|
||||
}
|
||||
@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
|
||||
|
||||
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
|
||||
state != VB2_BUF_STATE_ERROR &&
|
||||
state != VB2_BUF_STATE_QUEUED))
|
||||
state != VB2_BUF_STATE_QUEUED &&
|
||||
state != VB2_BUF_STATE_REQUEUEING))
|
||||
state = VB2_BUF_STATE_ERROR;
|
||||
|
||||
#ifdef CONFIG_VIDEO_ADV_DEBUG
|
||||
@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
|
||||
for (plane = 0; plane < vb->num_planes; ++plane)
|
||||
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
|
||||
|
||||
/* Add the buffer to the done buffers list */
|
||||
spin_lock_irqsave(&q->done_lock, flags);
|
||||
vb->state = state;
|
||||
if (state != VB2_BUF_STATE_QUEUED)
|
||||
if (state == VB2_BUF_STATE_QUEUED ||
|
||||
state == VB2_BUF_STATE_REQUEUEING) {
|
||||
vb->state = VB2_BUF_STATE_QUEUED;
|
||||
} else {
|
||||
/* Add the buffer to the done buffers list */
|
||||
list_add_tail(&vb->done_entry, &q->done_list);
|
||||
vb->state = state;
|
||||
}
|
||||
atomic_dec(&q->owned_by_drv_count);
|
||||
spin_unlock_irqrestore(&q->done_lock, flags);
|
||||
|
||||
if (state == VB2_BUF_STATE_QUEUED) {
|
||||
switch (state) {
|
||||
case VB2_BUF_STATE_QUEUED:
|
||||
return;
|
||||
case VB2_BUF_STATE_REQUEUEING:
|
||||
if (q->start_streaming_called)
|
||||
__enqueue_in_driver(vb);
|
||||
return;
|
||||
default:
|
||||
/* Inform any processes that may be waiting for buffers */
|
||||
wake_up(&q->done_wq);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Inform any processes that may be waiting for buffers */
|
||||
wake_up(&q->done_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_buffer_done);
|
||||
|
||||
@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
|
||||
|
||||
static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
|
||||
{
|
||||
static bool __check_once __read_mostly;
|
||||
static bool check_once;
|
||||
|
||||
if (__check_once)
|
||||
if (check_once)
|
||||
return;
|
||||
|
||||
__check_once = true;
|
||||
__WARN();
|
||||
check_once = true;
|
||||
WARN_ON(1);
|
||||
|
||||
pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
|
||||
pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
|
||||
if (vb->vb2_queue->allow_zero_bytesused)
|
||||
pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
|
||||
pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
|
||||
else
|
||||
pr_warn_once("use the actual size instead.\n");
|
||||
pr_warn("use the actual size instead.\n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -854,6 +854,18 @@ static int pcan_usb_probe(struct usb_interface *intf)
|
||||
/*
|
||||
* describe the PCAN-USB adapter
|
||||
*/
|
||||
static const struct can_bittiming_const pcan_usb_const = {
|
||||
.name = "pcan_usb",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 64,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
const struct peak_usb_adapter pcan_usb = {
|
||||
.name = "PCAN-USB",
|
||||
.device_id = PCAN_USB_PRODUCT_ID,
|
||||
@ -862,17 +874,7 @@ const struct peak_usb_adapter pcan_usb = {
|
||||
.clock = {
|
||||
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
|
||||
},
|
||||
.bittiming_const = {
|
||||
.name = "pcan_usb",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 64,
|
||||
.brp_inc = 1,
|
||||
},
|
||||
.bittiming_const = &pcan_usb_const,
|
||||
|
||||
/* size of device private data */
|
||||
.sizeof_dev_private = sizeof(struct pcan_usb),
|
||||
|
@ -792,9 +792,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
|
||||
dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
|
||||
|
||||
dev->can.clock = peak_usb_adapter->clock;
|
||||
dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
|
||||
dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
|
||||
dev->can.do_set_bittiming = peak_usb_set_bittiming;
|
||||
dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
|
||||
dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
|
||||
dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
|
||||
dev->can.do_set_mode = peak_usb_set_mode;
|
||||
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
|
||||
|
@ -48,8 +48,8 @@ struct peak_usb_adapter {
|
||||
u32 device_id;
|
||||
u32 ctrlmode_supported;
|
||||
struct can_clock clock;
|
||||
const struct can_bittiming_const bittiming_const;
|
||||
const struct can_bittiming_const data_bittiming_const;
|
||||
const struct can_bittiming_const * const bittiming_const;
|
||||
const struct can_bittiming_const * const data_bittiming_const;
|
||||
unsigned int ctrl_count;
|
||||
|
||||
int (*intf_probe)(struct usb_interface *intf);
|
||||
|
@ -990,6 +990,30 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
|
||||
}
|
||||
|
||||
/* describes the PCAN-USB FD adapter */
|
||||
static const struct can_bittiming_const pcan_usb_fd_const = {
|
||||
.name = "pcan_usb_fd",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 64,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 16,
|
||||
.sjw_max = 16,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const pcan_usb_fd_data_const = {
|
||||
.name = "pcan_usb_fd",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
const struct peak_usb_adapter pcan_usb_fd = {
|
||||
.name = "PCAN-USB FD",
|
||||
.device_id = PCAN_USBFD_PRODUCT_ID,
|
||||
@ -999,28 +1023,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
|
||||
.clock = {
|
||||
.freq = PCAN_UFD_CRYSTAL_HZ,
|
||||
},
|
||||
.bittiming_const = {
|
||||
.name = "pcan_usb_fd",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 64,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 16,
|
||||
.sjw_max = 16,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
},
|
||||
.data_bittiming_const = {
|
||||
.name = "pcan_usb_fd",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
},
|
||||
.bittiming_const = &pcan_usb_fd_const,
|
||||
.data_bittiming_const = &pcan_usb_fd_data_const,
|
||||
|
||||
/* size of device private data */
|
||||
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
|
||||
@ -1058,6 +1062,30 @@ const struct peak_usb_adapter pcan_usb_fd = {
|
||||
};
|
||||
|
||||
/* describes the PCAN-USB Pro FD adapter */
|
||||
static const struct can_bittiming_const pcan_usb_pro_fd_const = {
|
||||
.name = "pcan_usb_pro_fd",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 64,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 16,
|
||||
.sjw_max = 16,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
|
||||
.name = "pcan_usb_pro_fd",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
const struct peak_usb_adapter pcan_usb_pro_fd = {
|
||||
.name = "PCAN-USB Pro FD",
|
||||
.device_id = PCAN_USBPROFD_PRODUCT_ID,
|
||||
@ -1067,28 +1095,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
|
||||
.clock = {
|
||||
.freq = PCAN_UFD_CRYSTAL_HZ,
|
||||
},
|
||||
.bittiming_const = {
|
||||
.name = "pcan_usb_pro_fd",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 64,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 16,
|
||||
.sjw_max = 16,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
},
|
||||
.data_bittiming_const = {
|
||||
.name = "pcan_usb_pro_fd",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
},
|
||||
.bittiming_const = &pcan_usb_pro_fd_const,
|
||||
.data_bittiming_const = &pcan_usb_pro_fd_data_const,
|
||||
|
||||
/* size of device private data */
|
||||
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
|
||||
|
@ -1004,6 +1004,18 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
|
||||
/*
|
||||
* describe the PCAN-USB Pro adapter
|
||||
*/
|
||||
static const struct can_bittiming_const pcan_usb_pro_const = {
|
||||
.name = "pcan_usb_pro",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
const struct peak_usb_adapter pcan_usb_pro = {
|
||||
.name = "PCAN-USB Pro",
|
||||
.device_id = PCAN_USBPRO_PRODUCT_ID,
|
||||
@ -1012,17 +1024,7 @@ const struct peak_usb_adapter pcan_usb_pro = {
|
||||
.clock = {
|
||||
.freq = PCAN_USBPRO_CRYSTAL_HZ,
|
||||
},
|
||||
.bittiming_const = {
|
||||
.name = "pcan_usb_pro",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 1024,
|
||||
.brp_inc = 1,
|
||||
},
|
||||
.bittiming_const = &pcan_usb_pro_const,
|
||||
|
||||
/* size of device private data */
|
||||
.sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
|
||||
|
@ -65,7 +65,7 @@ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
|
||||
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
|
||||
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
|
||||
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
|
||||
obj-$(CONFIG_SH_ETH) += renesas/
|
||||
obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
|
||||
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
|
||||
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
|
||||
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
|
||||
|
@ -801,6 +801,9 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
|
||||
|
||||
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
|
||||
{
|
||||
if (pdata->phy_dev)
|
||||
phy_disconnect(pdata->phy_dev);
|
||||
|
||||
mdiobus_unregister(pdata->mdio_bus);
|
||||
mdiobus_free(pdata->mdio_bus);
|
||||
pdata->mdio_bus = NULL;
|
||||
|
@ -1463,9 +1463,10 @@ static int xgene_enet_remove(struct platform_device *pdev)
|
||||
mac_ops->tx_disable(pdata);
|
||||
|
||||
xgene_enet_napi_del(pdata);
|
||||
xgene_enet_mdio_remove(pdata);
|
||||
xgene_enet_delete_desc_rings(pdata);
|
||||
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
|
||||
xgene_enet_mdio_remove(pdata);
|
||||
unregister_netdev(ndev);
|
||||
xgene_enet_delete_desc_rings(pdata);
|
||||
pdata->port_ops->shutdown(pdata);
|
||||
free_netdev(ndev);
|
||||
|
||||
|
@ -2125,6 +2125,8 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
|
||||
int ret = 0;
|
||||
int timeout = 0;
|
||||
u32 reg;
|
||||
u32 dma_ctrl;
|
||||
int i;
|
||||
|
||||
/* Disable TDMA to stop add more frames in TX DMA */
|
||||
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
|
||||
@ -2168,6 +2170,20 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
dma_ctrl = 0;
|
||||
for (i = 0; i < priv->hw_params->rx_queues; i++)
|
||||
dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
|
||||
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
|
||||
reg &= ~dma_ctrl;
|
||||
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
|
||||
|
||||
dma_ctrl = 0;
|
||||
for (i = 0; i < priv->hw_params->tx_queues; i++)
|
||||
dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
|
||||
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
|
||||
reg &= ~dma_ctrl;
|
||||
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2835,8 +2851,6 @@ static void bcmgenet_timeout(struct net_device *dev)
|
||||
|
||||
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
|
||||
|
||||
bcmgenet_disable_tx_napi(priv);
|
||||
|
||||
for (q = 0; q < priv->hw_params->tx_queues; q++)
|
||||
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
|
||||
bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
|
||||
@ -2852,8 +2866,6 @@ static void bcmgenet_timeout(struct net_device *dev)
|
||||
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
|
||||
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
|
||||
|
||||
bcmgenet_enable_tx_napi(priv);
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
dev->stats.tx_errors++;
|
||||
|
@ -1783,7 +1783,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
|
||||
return ret;
|
||||
|
||||
fep->mii_timeout = 0;
|
||||
init_completion(&fep->mdio_done);
|
||||
reinit_completion(&fep->mdio_done);
|
||||
|
||||
/* start a read op */
|
||||
writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
|
||||
@ -1822,7 +1822,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
|
||||
return ret;
|
||||
|
||||
fep->mii_timeout = 0;
|
||||
init_completion(&fep->mdio_done);
|
||||
reinit_completion(&fep->mdio_done);
|
||||
|
||||
/* start a write op */
|
||||
writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
|
||||
|
@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
|
||||
|
||||
static inline bool fm10k_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
|
||||
|
@ -6584,7 +6584,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
|
||||
|
||||
static inline bool igb_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
|
||||
|
@ -1848,7 +1848,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
|
||||
|
||||
static inline bool ixgbe_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -748,7 +748,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
|
||||
|
||||
static inline bool ixgbevf_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1307,7 +1307,12 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
|
||||
}
|
||||
}
|
||||
|
||||
if (core_stats) {
|
||||
if (!core_stats)
|
||||
return stats_count;
|
||||
|
||||
if (nic_data->datapath_caps &
|
||||
1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
|
||||
/* Use vadaptor stats. */
|
||||
core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
|
||||
stats[EF10_STAT_rx_multicast] +
|
||||
stats[EF10_STAT_rx_broadcast];
|
||||
@ -1327,6 +1332,26 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
|
||||
core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
|
||||
core_stats->rx_errors = core_stats->rx_crc_errors;
|
||||
core_stats->tx_errors = stats[EF10_STAT_tx_bad];
|
||||
} else {
|
||||
/* Use port stats. */
|
||||
core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
|
||||
core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
|
||||
core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
|
||||
core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
|
||||
core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
|
||||
stats[GENERIC_STAT_rx_nodesc_trunc] +
|
||||
stats[GENERIC_STAT_rx_noskb_drops];
|
||||
core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
|
||||
core_stats->rx_length_errors =
|
||||
stats[EF10_STAT_port_rx_gtjumbo] +
|
||||
stats[EF10_STAT_port_rx_length_error];
|
||||
core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
|
||||
core_stats->rx_frame_errors =
|
||||
stats[EF10_STAT_port_rx_align_error];
|
||||
core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
|
||||
core_stats->rx_errors = (core_stats->rx_length_errors +
|
||||
core_stats->rx_crc_errors +
|
||||
core_stats->rx_frame_errors);
|
||||
}
|
||||
|
||||
return stats_count;
|
||||
|
@ -292,6 +292,15 @@ struct phy_device *fixed_phy_register(unsigned int irq,
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* propagate the fixed link values to struct phy_device */
|
||||
phy->link = status->link;
|
||||
if (status->link) {
|
||||
phy->speed = status->speed;
|
||||
phy->duplex = status->duplex;
|
||||
phy->pause = status->pause;
|
||||
phy->asym_pause = status->asym_pause;
|
||||
}
|
||||
|
||||
of_node_get(np);
|
||||
phy->dev.of_node = np;
|
||||
|
||||
|
@ -1041,10 +1041,14 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
|
||||
int value = -1;
|
||||
|
||||
if (phydrv->read_mmd_indirect == NULL) {
|
||||
mmd_phy_indirect(phydev->bus, prtad, devad, addr);
|
||||
struct mii_bus *bus = phydev->bus;
|
||||
|
||||
mutex_lock(&bus->mdio_lock);
|
||||
mmd_phy_indirect(bus, prtad, devad, addr);
|
||||
|
||||
/* Read the content of the MMD's selected register */
|
||||
value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
|
||||
value = bus->read(bus, addr, MII_MMD_DATA);
|
||||
mutex_unlock(&bus->mdio_lock);
|
||||
} else {
|
||||
value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
|
||||
}
|
||||
@ -1074,10 +1078,14 @@ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
|
||||
struct phy_driver *phydrv = phydev->drv;
|
||||
|
||||
if (phydrv->write_mmd_indirect == NULL) {
|
||||
mmd_phy_indirect(phydev->bus, prtad, devad, addr);
|
||||
struct mii_bus *bus = phydev->bus;
|
||||
|
||||
mutex_lock(&bus->mdio_lock);
|
||||
mmd_phy_indirect(bus, prtad, devad, addr);
|
||||
|
||||
/* Write the data into MMD's selected register */
|
||||
phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
|
||||
bus->write(bus, addr, MII_MMD_DATA, data);
|
||||
mutex_unlock(&bus->mdio_lock);
|
||||
} else {
|
||||
phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
|
||||
if (c45_ids)
|
||||
dev->c45_ids = *c45_ids;
|
||||
dev->bus = bus;
|
||||
dev->dev.parent = bus->parent;
|
||||
dev->dev.parent = &bus->dev;
|
||||
dev->dev.bus = &mdio_bus_type;
|
||||
dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
|
||||
dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);
|
||||
|
@ -778,7 +778,7 @@ int usbnet_stop (struct net_device *net)
|
||||
{
|
||||
struct usbnet *dev = netdev_priv(net);
|
||||
struct driver_info *info = dev->driver_info;
|
||||
int retval, pm;
|
||||
int retval, pm, mpn;
|
||||
|
||||
clear_bit(EVENT_DEV_OPEN, &dev->flags);
|
||||
netif_stop_queue (net);
|
||||
@ -809,6 +809,8 @@ int usbnet_stop (struct net_device *net)
|
||||
|
||||
usbnet_purge_paused_rxq(dev);
|
||||
|
||||
mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
|
||||
|
||||
/* deferred work (task, timer, softirq) must also stop.
|
||||
* can't flush_scheduled_work() until we drop rtnl (later),
|
||||
* else workers could deadlock; so make workers a NOP.
|
||||
@ -819,8 +821,7 @@ int usbnet_stop (struct net_device *net)
|
||||
if (!pm)
|
||||
usb_autopm_put_interface(dev->intf);
|
||||
|
||||
if (info->manage_power &&
|
||||
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
|
||||
if (info->manage_power && mpn)
|
||||
info->manage_power(dev, 0);
|
||||
else
|
||||
usb_autopm_put_interface(dev->intf);
|
||||
|
@ -2256,6 +2256,8 @@ static int vxlan_open(struct net_device *dev)
|
||||
|
||||
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
|
||||
ret = vxlan_igmp_join(vxlan);
|
||||
if (ret == -EADDRINUSE)
|
||||
ret = 0;
|
||||
if (ret) {
|
||||
vxlan_sock_release(vs);
|
||||
return ret;
|
||||
|
@ -2,7 +2,7 @@
|
||||
# PCI configuration
|
||||
#
|
||||
config PCI_BUS_ADDR_T_64BIT
|
||||
def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
|
||||
def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
|
||||
depends on PCI
|
||||
|
||||
config PCI_MSI
|
||||
|
@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
|
||||
else if (type == PCI_EXP_TYPE_UPSTREAM ||
|
||||
type == PCI_EXP_TYPE_DOWNSTREAM) {
|
||||
parent = pci_upstream_bridge(pdev);
|
||||
if (!parent->has_secondary_link)
|
||||
|
||||
/*
|
||||
* Usually there's an upstream device (Root Port or Switch
|
||||
* Downstream Port), but we can't assume one exists.
|
||||
*/
|
||||
if (parent && !parent->has_secondary_link)
|
||||
pdev->has_secondary_link = 1;
|
||||
}
|
||||
}
|
||||
@ -1103,7 +1108,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
|
||||
|
||||
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
|
||||
|
||||
static void pci_msi_setup_pci_dev(struct pci_dev *dev)
|
||||
void pci_msi_setup_pci_dev(struct pci_dev *dev)
|
||||
{
|
||||
/*
|
||||
* Disable the MSI hardware to avoid screaming interrupts
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.6.0.17"
|
||||
#define DRV_VERSION "1.6.0.17a"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
|
@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
||||
unsigned long ptr;
|
||||
struct fc_rport_priv *rdata;
|
||||
spinlock_t *io_lock = NULL;
|
||||
int io_lock_acquired = 0;
|
||||
|
||||
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
/* initialize rest of io_req */
|
||||
io_lock_acquired = 1;
|
||||
io_req->port_id = rport->port_id;
|
||||
io_req->start_time = jiffies;
|
||||
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
|
||||
@ -571,7 +573,7 @@ out:
|
||||
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
|
||||
|
||||
/* if only we issued IO, will we have the io lock */
|
||||
if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
|
||||
if (io_lock_acquired)
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
atomic_dec(&fnic->in_flight);
|
||||
|
@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
err = blk_pre_runtime_suspend(sdev->request_queue);
|
||||
if (err)
|
||||
return err;
|
||||
if (pm && pm->runtime_suspend)
|
||||
if (pm && pm->runtime_suspend) {
|
||||
err = blk_pre_runtime_suspend(sdev->request_queue);
|
||||
if (err)
|
||||
return err;
|
||||
err = pm->runtime_suspend(dev);
|
||||
blk_post_runtime_suspend(sdev->request_queue, err);
|
||||
|
||||
blk_post_runtime_suspend(sdev->request_queue, err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int err = 0;
|
||||
|
||||
blk_pre_runtime_resume(sdev->request_queue);
|
||||
if (pm && pm->runtime_resume)
|
||||
if (pm && pm->runtime_resume) {
|
||||
blk_pre_runtime_resume(sdev->request_queue);
|
||||
err = pm->runtime_resume(dev);
|
||||
blk_post_runtime_resume(sdev->request_queue, err);
|
||||
|
||||
blk_post_runtime_resume(sdev->request_queue, err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -844,14 +844,15 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
||||
struct wb_iter iter;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!bdi_has_dirty_io(bdi))
|
||||
return;
|
||||
restart:
|
||||
rcu_read_lock();
|
||||
bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) {
|
||||
if (!wb_has_dirty_io(wb) ||
|
||||
(skip_if_busy && writeback_in_progress(wb)))
|
||||
/* SYNC_ALL writes out I_DIRTY_TIME too */
|
||||
if (!wb_has_dirty_io(wb) &&
|
||||
(base_work->sync_mode == WB_SYNC_NONE ||
|
||||
list_empty(&wb->b_dirty_time)))
|
||||
continue;
|
||||
if (skip_if_busy && writeback_in_progress(wb))
|
||||
continue;
|
||||
|
||||
base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages);
|
||||
@ -899,8 +900,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
if (bdi_has_dirty_io(bdi) &&
|
||||
(!skip_if_busy || !writeback_in_progress(&bdi->wb))) {
|
||||
if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
|
||||
base_work->auto_free = 0;
|
||||
base_work->single_wait = 0;
|
||||
base_work->single_done = 0;
|
||||
@ -2275,8 +2275,12 @@ void sync_inodes_sb(struct super_block *sb)
|
||||
};
|
||||
struct backing_dev_info *bdi = sb->s_bdi;
|
||||
|
||||
/* Nothing to do? */
|
||||
if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
|
||||
/*
|
||||
* Can't skip on !bdi_has_dirty() because we should wait for !dirty
|
||||
* inodes under writeback and I_DIRTY_TIME inodes ignored by
|
||||
* bdi_has_dirty() need to be written out too.
|
||||
*/
|
||||
if (bdi == &noop_backing_dev_info)
|
||||
return;
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
|
||||
|
@ -347,6 +347,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
|
||||
return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_eld_sad - Get ELD SAD structures.
|
||||
* @eld: pointer to an eld memory structure with sad_count set
|
||||
*/
|
||||
static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
|
||||
{
|
||||
unsigned int ver, mnl;
|
||||
|
||||
ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
|
||||
if (ver != 2 && ver != 31)
|
||||
return NULL;
|
||||
|
||||
mnl = drm_eld_mnl(eld);
|
||||
if (mnl > 16)
|
||||
return NULL;
|
||||
|
||||
return eld + DRM_ELD_CEA_SAD(mnl, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_eld_sad_count - Get ELD SAD count.
|
||||
* @eld: pointer to an eld memory structure with sad_count set
|
||||
|
@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
|
||||
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
|
||||
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
|
||||
void *vcpu_info);
|
||||
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
|
||||
#endif
|
||||
|
||||
/* Handling of unhandled and spurious interrupts: */
|
||||
|
@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page)
|
||||
return atomic_read(&(page)->_mapcount) >= 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true only if the page has been allocated with
|
||||
* ALLOC_NO_WATERMARKS and the low watermark was not
|
||||
* met implying that the system is under some pressure.
|
||||
*/
|
||||
static inline bool page_is_pfmemalloc(struct page *page)
|
||||
{
|
||||
/*
|
||||
* Page index cannot be this large so this must be
|
||||
* a pfmemalloc page.
|
||||
*/
|
||||
return page->index == -1UL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only to be called by the page allocator on a freshly allocated
|
||||
* page.
|
||||
*/
|
||||
static inline void set_page_pfmemalloc(struct page *page)
|
||||
{
|
||||
page->index = -1UL;
|
||||
}
|
||||
|
||||
static inline void clear_page_pfmemalloc(struct page *page)
|
||||
{
|
||||
page->index = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Different kinds of faults, as returned by handle_mm_fault().
|
||||
* Used to decide whether a process gets delivered SIGBUS or
|
||||
|
@ -63,15 +63,6 @@ struct page {
|
||||
union {
|
||||
pgoff_t index; /* Our offset within mapping. */
|
||||
void *freelist; /* sl[aou]b first free object */
|
||||
bool pfmemalloc; /* If set by the page allocator,
|
||||
* ALLOC_NO_WATERMARKS was set
|
||||
* and the low watermark was not
|
||||
* met implying that the system
|
||||
* is under some pressure. The
|
||||
* caller should try ensure
|
||||
* this page is only used to
|
||||
* free other pages.
|
||||
*/
|
||||
};
|
||||
|
||||
union {
|
||||
|
@ -1202,6 +1202,7 @@ struct msix_entry {
|
||||
u16 entry; /* driver uses to specify entry, OS writes */
|
||||
};
|
||||
|
||||
void pci_msi_setup_pci_dev(struct pci_dev *dev);
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
int pci_msi_vec_count(struct pci_dev *dev);
|
||||
|
@ -1637,20 +1637,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
/*
|
||||
* Propagate page->pfmemalloc to the skb if we can. The problem is
|
||||
* that not all callers have unique ownership of the page. If
|
||||
* pfmemalloc is set, we check the mapping as a mapping implies
|
||||
* page->index is set (index and pfmemalloc share space).
|
||||
* If it's a valid mapping, we cannot use page->pfmemalloc but we
|
||||
* do not lose pfmemalloc information as the pages would not be
|
||||
* allocated using __GFP_MEMALLOC.
|
||||
* Propagate page pfmemalloc to the skb if we can. The problem is
|
||||
* that not all callers have unique ownership of the page but rely
|
||||
* on page_is_pfmemalloc doing the right thing(tm).
|
||||
*/
|
||||
frag->page.p = page;
|
||||
frag->page_offset = off;
|
||||
skb_frag_size_set(frag, size);
|
||||
|
||||
page = compound_head(page);
|
||||
if (page->pfmemalloc && !page->mapping)
|
||||
if (page_is_pfmemalloc(page))
|
||||
skb->pfmemalloc = true;
|
||||
}
|
||||
|
||||
@ -2298,7 +2294,7 @@ static inline struct page *dev_alloc_page(void)
|
||||
static inline void skb_propagate_pfmemalloc(struct page *page,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (page && page->pfmemalloc)
|
||||
if (page_is_pfmemalloc(page))
|
||||
skb->pfmemalloc = true;
|
||||
}
|
||||
|
||||
|
@ -74,8 +74,6 @@ enum rc_filter_type {
|
||||
* @input_dev: the input child device used to communicate events to userspace
|
||||
* @driver_type: specifies if protocol decoding is done in hardware or software
|
||||
* @idle: used to keep track of RX state
|
||||
* @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
|
||||
* wakeup protocols is the set of all raw encoders
|
||||
* @allowed_protocols: bitmask with the supported RC_BIT_* protocols
|
||||
* @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
|
||||
* @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
|
||||
@ -136,7 +134,6 @@ struct rc_dev {
|
||||
struct input_dev *input_dev;
|
||||
enum rc_driver_type driver_type;
|
||||
bool idle;
|
||||
bool encode_wakeup;
|
||||
u64 allowed_protocols;
|
||||
u64 enabled_protocols;
|
||||
u64 allowed_wakeup_protocols;
|
||||
@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
|
||||
#define US_TO_NS(usec) ((usec) * 1000)
|
||||
#define MS_TO_US(msec) ((msec) * 1000)
|
||||
#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
|
||||
#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
|
||||
|
||||
void ir_raw_event_handle(struct rc_dev *dev);
|
||||
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
|
||||
@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
|
||||
int ir_raw_event_store_with_filter(struct rc_dev *dev,
|
||||
struct ir_raw_event *ev);
|
||||
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
|
||||
int ir_raw_encode_scancode(u64 protocols,
|
||||
const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max);
|
||||
|
||||
static inline void ir_raw_event_reset(struct rc_dev *dev)
|
||||
{
|
||||
|
@ -139,6 +139,7 @@ enum vb2_io_modes {
|
||||
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
|
||||
* @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
|
||||
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
|
||||
* @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
|
||||
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
|
||||
* in a hardware operation
|
||||
* @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
|
||||
@ -152,6 +153,7 @@ enum vb2_buffer_state {
|
||||
VB2_BUF_STATE_PREPARING,
|
||||
VB2_BUF_STATE_PREPARED,
|
||||
VB2_BUF_STATE_QUEUED,
|
||||
VB2_BUF_STATE_REQUEUEING,
|
||||
VB2_BUF_STATE_ACTIVE,
|
||||
VB2_BUF_STATE_DONE,
|
||||
VB2_BUF_STATE_ERROR,
|
||||
|
@ -984,6 +984,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_chip_set_type_parent - Set IRQ type on the parent interrupt
|
||||
* @data: Pointer to interrupt specific data
|
||||
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
|
||||
*
|
||||
* Conditional, as the underlying parent chip might not implement it.
|
||||
*/
|
||||
int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
|
||||
{
|
||||
data = data->parent_data;
|
||||
|
||||
if (data->chip->irq_set_type)
|
||||
return data->chip->irq_set_type(data, type);
|
||||
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
|
||||
* @data: Pointer to interrupt specific data
|
||||
@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
|
||||
if (data->chip && data->chip->irq_retrigger)
|
||||
return data->chip->irq_retrigger(data);
|
||||
|
||||
return -ENOSYS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
|
||||
spin_unlock(&base->lock);
|
||||
base = new_base;
|
||||
spin_lock(&base->lock);
|
||||
timer->flags &= ~TIMER_BASEMASK;
|
||||
timer->flags |= base->cpu;
|
||||
WRITE_ONCE(timer->flags,
|
||||
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1343,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
|
||||
set_page_owner(page, order, gfp_flags);
|
||||
|
||||
/*
|
||||
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
|
||||
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
|
||||
* allocate the page. The expectation is that the caller is taking
|
||||
* steps that will free more memory. The caller should avoid the page
|
||||
* being used for !PFMEMALLOC purposes.
|
||||
*/
|
||||
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
|
||||
if (alloc_flags & ALLOC_NO_WATERMARKS)
|
||||
set_page_pfmemalloc(page);
|
||||
else
|
||||
clear_page_pfmemalloc(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3345,7 +3348,7 @@ refill:
|
||||
atomic_add(size - 1, &page->_count);
|
||||
|
||||
/* reset page count bias and offset to start of new frag */
|
||||
nc->pfmemalloc = page->pfmemalloc;
|
||||
nc->pfmemalloc = page_is_pfmemalloc(page);
|
||||
nc->pagecnt_bias = size;
|
||||
nc->offset = size;
|
||||
}
|
||||
|
@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
|
||||
}
|
||||
|
||||
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
|
||||
if (unlikely(page->pfmemalloc))
|
||||
if (page_is_pfmemalloc(page))
|
||||
pfmemalloc_active = true;
|
||||
|
||||
nr_pages = (1 << cachep->gfporder);
|
||||
@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
|
||||
add_zone_page_state(page_zone(page),
|
||||
NR_SLAB_UNRECLAIMABLE, nr_pages);
|
||||
__SetPageSlab(page);
|
||||
if (page->pfmemalloc)
|
||||
if (page_is_pfmemalloc(page))
|
||||
SetPageSlabPfmemalloc(page);
|
||||
|
||||
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
|
||||
|
@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||
inc_slabs_node(s, page_to_nid(page), page->objects);
|
||||
page->slab_cache = s;
|
||||
__SetPageSlab(page);
|
||||
if (page->pfmemalloc)
|
||||
if (page_is_pfmemalloc(page))
|
||||
SetPageSlabPfmemalloc(page);
|
||||
|
||||
start = page_address(page);
|
||||
|
@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
|
||||
struct p9_client *clnt = fid->clnt;
|
||||
struct p9_req_t *req;
|
||||
int total = 0;
|
||||
*err = 0;
|
||||
|
||||
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
|
||||
fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
|
||||
@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
|
||||
struct p9_client *clnt = fid->clnt;
|
||||
struct p9_req_t *req;
|
||||
int total = 0;
|
||||
*err = 0;
|
||||
|
||||
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
|
||||
fid->fid, (unsigned long long) offset,
|
||||
|
@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
||||
|
||||
if (skb && frag_size) {
|
||||
skb->head_frag = 1;
|
||||
if (virt_to_head_page(data)->pfmemalloc)
|
||||
if (page_is_pfmemalloc(virt_to_head_page(data)))
|
||||
skb->pfmemalloc = 1;
|
||||
}
|
||||
return skb;
|
||||
|
@ -361,6 +361,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
|
||||
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
|
||||
|
||||
ip6gre_tunnel_unlink(ign, t);
|
||||
ip6_tnl_dst_reset(t);
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
|
@ -219,7 +219,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
|
||||
#define BROADCAST_ONE 1
|
||||
#define BROADCAST_REGISTERED 2
|
||||
#define BROADCAST_PROMISC_ONLY 4
|
||||
static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
||||
static int pfkey_broadcast(struct sk_buff *skb,
|
||||
int broadcast_flags, struct sock *one_sk,
|
||||
struct net *net)
|
||||
{
|
||||
@ -244,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
||||
* socket.
|
||||
*/
|
||||
if (pfk->promisc)
|
||||
pfkey_broadcast_one(skb, &skb2, allocation, sk);
|
||||
pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
|
||||
|
||||
/* the exact target will be processed later */
|
||||
if (sk == one_sk)
|
||||
@ -259,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
||||
continue;
|
||||
}
|
||||
|
||||
err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk);
|
||||
err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
|
||||
|
||||
/* Error is cleare after succecful sending to at least one
|
||||
* registered KM */
|
||||
@ -269,7 +269,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
||||
rcu_read_unlock();
|
||||
|
||||
if (one_sk != NULL)
|
||||
err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
|
||||
err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
|
||||
|
||||
kfree_skb(skb2);
|
||||
kfree_skb(skb);
|
||||
@ -292,7 +292,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
|
||||
hdr = (struct sadb_msg *) pfk->dump.skb->data;
|
||||
hdr->sadb_msg_seq = 0;
|
||||
hdr->sadb_msg_errno = rc;
|
||||
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
|
||||
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
|
||||
&pfk->sk, sock_net(&pfk->sk));
|
||||
pfk->dump.skb = NULL;
|
||||
}
|
||||
@ -333,7 +333,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
|
||||
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
|
||||
sizeof(uint64_t));
|
||||
|
||||
pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
|
||||
pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1365,7 +1365,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
|
||||
|
||||
xfrm_state_put(x);
|
||||
|
||||
pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
|
||||
pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1452,7 +1452,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
|
||||
hdr->sadb_msg_seq = c->seq;
|
||||
hdr->sadb_msg_pid = c->portid;
|
||||
|
||||
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
|
||||
pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1565,7 +1565,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
|
||||
out_hdr->sadb_msg_reserved = 0;
|
||||
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
|
||||
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
|
||||
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
|
||||
pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1670,7 +1670,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, sock_net(sk));
|
||||
pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1689,7 +1689,7 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
|
||||
hdr->sadb_msg_errno = (uint8_t) 0;
|
||||
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
|
||||
|
||||
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
|
||||
return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
|
||||
}
|
||||
|
||||
static int key_notify_sa_flush(const struct km_event *c)
|
||||
@ -1710,7 +1710,7 @@ static int key_notify_sa_flush(const struct km_event *c)
|
||||
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
|
||||
hdr->sadb_msg_reserved = 0;
|
||||
|
||||
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
|
||||
pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1767,7 +1767,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
|
||||
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
|
||||
|
||||
if (pfk->dump.skb)
|
||||
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
|
||||
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
|
||||
&pfk->sk, sock_net(&pfk->sk));
|
||||
pfk->dump.skb = out_skb;
|
||||
|
||||
@ -1847,7 +1847,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
|
||||
new_hdr->sadb_msg_errno = 0;
|
||||
}
|
||||
|
||||
pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
|
||||
pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2181,7 +2181,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
|
||||
out_hdr->sadb_msg_errno = 0;
|
||||
out_hdr->sadb_msg_seq = c->seq;
|
||||
out_hdr->sadb_msg_pid = c->portid;
|
||||
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
|
||||
pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
|
||||
return 0;
|
||||
|
||||
}
|
||||
@ -2401,7 +2401,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
|
||||
out_hdr->sadb_msg_errno = 0;
|
||||
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
|
||||
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
|
||||
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
|
||||
pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
@ -2655,7 +2655,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
|
||||
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
|
||||
|
||||
if (pfk->dump.skb)
|
||||
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
|
||||
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
|
||||
&pfk->sk, sock_net(&pfk->sk));
|
||||
pfk->dump.skb = out_skb;
|
||||
|
||||
@ -2708,7 +2708,7 @@ static int key_notify_policy_flush(const struct km_event *c)
|
||||
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
|
||||
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
|
||||
hdr->sadb_msg_reserved = 0;
|
||||
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
|
||||
pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
|
||||
return 0;
|
||||
|
||||
}
|
||||
@ -2770,7 +2770,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
|
||||
void *ext_hdrs[SADB_EXT_MAX];
|
||||
int err;
|
||||
|
||||
pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
|
||||
pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
|
||||
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
|
||||
|
||||
memset(ext_hdrs, 0, sizeof(ext_hdrs));
|
||||
@ -2992,7 +2992,7 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
|
||||
out_hdr->sadb_msg_seq = 0;
|
||||
out_hdr->sadb_msg_pid = 0;
|
||||
|
||||
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3182,7 +3182,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
|
||||
xfrm_ctx->ctx_len);
|
||||
}
|
||||
|
||||
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
}
|
||||
|
||||
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
|
||||
@ -3380,7 +3380,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
|
||||
n_port->sadb_x_nat_t_port_port = sport;
|
||||
n_port->sadb_x_nat_t_port_reserved = 0;
|
||||
|
||||
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_KEY_MIGRATE
|
||||
@ -3572,7 +3572,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
|
||||
}
|
||||
|
||||
/* broadcast migrate message to sockets */
|
||||
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
|
||||
pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -2401,7 +2401,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
* sendmsg(), but that's what we've got...
|
||||
*/
|
||||
if (netlink_tx_is_mmaped(sk) &&
|
||||
msg->msg_iter.type == ITER_IOVEC &&
|
||||
iter_is_iovec(&msg->msg_iter) &&
|
||||
msg->msg_iter.nr_segs == 1 &&
|
||||
msg->msg_iter.iov->iov_base == NULL) {
|
||||
err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
|
||||
|
@ -490,6 +490,19 @@ static bool u32_destroy(struct tcf_proto *tp, bool force)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (tp_c->refcnt > 1)
|
||||
return false;
|
||||
|
||||
if (tp_c->refcnt == 1) {
|
||||
struct tc_u_hnode *ht;
|
||||
|
||||
for (ht = rtnl_dereference(tp_c->hlist);
|
||||
ht;
|
||||
ht = rtnl_dereference(ht->next))
|
||||
if (!ht_empty(ht))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (root_ht && --root_ht->refcnt == 0)
|
||||
|
@ -3132,11 +3132,18 @@ bool sctp_verify_asconf(const struct sctp_association *asoc,
|
||||
case SCTP_PARAM_IPV4_ADDRESS:
|
||||
if (length != sizeof(sctp_ipv4addr_param_t))
|
||||
return false;
|
||||
/* ensure there is only one addr param and it's in the
|
||||
* beginning of addip_hdr params, or we reject it.
|
||||
*/
|
||||
if (param.v != addip->addip_hdr.params)
|
||||
return false;
|
||||
addr_param_seen = true;
|
||||
break;
|
||||
case SCTP_PARAM_IPV6_ADDRESS:
|
||||
if (length != sizeof(sctp_ipv6addr_param_t))
|
||||
return false;
|
||||
if (param.v != addip->addip_hdr.params)
|
||||
return false;
|
||||
addr_param_seen = true;
|
||||
break;
|
||||
case SCTP_PARAM_ADD_IP:
|
||||
|
@ -702,7 +702,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
|
||||
* outstanding data and rely on the retransmission limit be reached
|
||||
* to shutdown the association.
|
||||
*/
|
||||
if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
|
||||
if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
|
||||
t->asoc->overall_error_count = 0;
|
||||
|
||||
/* Clear the hb_sent flag to signal that we had a good
|
||||
|
@ -380,8 +380,8 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
|
||||
return 0;
|
||||
|
||||
if (!initxattrs)
|
||||
return call_int_hook(inode_init_security, 0, inode, dir, qstr,
|
||||
NULL, NULL, NULL);
|
||||
return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
|
||||
dir, qstr, NULL, NULL, NULL);
|
||||
memset(new_xattrs, 0, sizeof(new_xattrs));
|
||||
lsm_xattr = new_xattrs;
|
||||
ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
|
||||
@ -409,8 +409,8 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
|
||||
{
|
||||
if (unlikely(IS_PRIVATE(inode)))
|
||||
return -EOPNOTSUPP;
|
||||
return call_int_hook(inode_init_security, 0, inode, dir, qstr,
|
||||
name, value, len);
|
||||
return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
|
||||
qstr, name, value, len);
|
||||
}
|
||||
EXPORT_SYMBOL(security_old_inode_init_security);
|
||||
|
||||
@ -1281,7 +1281,8 @@ int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
|
||||
|
||||
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
|
||||
{
|
||||
return call_int_hook(socket_getpeersec_dgram, 0, sock, skb, secid);
|
||||
return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
|
||||
skb, secid);
|
||||
}
|
||||
EXPORT_SYMBOL(security_socket_getpeersec_dgram);
|
||||
|
||||
|
@ -521,6 +521,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
||||
goto out_child;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normally perf_session__new would do this, but it doesn't have the
|
||||
* evlist.
|
||||
*/
|
||||
if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
|
||||
pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
|
||||
rec->tool.ordered_events = false;
|
||||
}
|
||||
|
||||
if (!rec->evlist->nr_groups)
|
||||
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
|
||||
|
||||
@ -965,9 +974,11 @@ static struct record record = {
|
||||
.tool = {
|
||||
.sample = process_sample_event,
|
||||
.fork = perf_event__process_fork,
|
||||
.exit = perf_event__process_exit,
|
||||
.comm = perf_event__process_comm,
|
||||
.mmap = perf_event__process_mmap,
|
||||
.mmap2 = perf_event__process_mmap2,
|
||||
.ordered_events = true,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -601,8 +601,8 @@ static void display_sig(int sig __maybe_unused)
|
||||
|
||||
static void display_setup_sig(void)
|
||||
{
|
||||
signal(SIGSEGV, display_sig);
|
||||
signal(SIGFPE, display_sig);
|
||||
signal(SIGSEGV, sighandler_dump_stack);
|
||||
signal(SIGFPE, sighandler_dump_stack);
|
||||
signal(SIGINT, display_sig);
|
||||
signal(SIGQUIT, display_sig);
|
||||
signal(SIGTERM, display_sig);
|
||||
|
@ -1387,6 +1387,24 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
|
||||
event->fork.ptid);
|
||||
int err = 0;
|
||||
|
||||
if (dump_trace)
|
||||
perf_event__fprintf_task(event, stdout);
|
||||
|
||||
/*
|
||||
* There may be an existing thread that is not actually the parent,
|
||||
* either because we are processing events out of order, or because the
|
||||
* (fork) event that would have removed the thread was lost. Assume the
|
||||
* latter case and continue on as best we can.
|
||||
*/
|
||||
if (parent->pid_ != (pid_t)event->fork.ppid) {
|
||||
dump_printf("removing erroneous parent thread %d/%d\n",
|
||||
parent->pid_, parent->tid);
|
||||
machine__remove_thread(machine, parent);
|
||||
thread__put(parent);
|
||||
parent = machine__findnew_thread(machine, event->fork.ppid,
|
||||
event->fork.ptid);
|
||||
}
|
||||
|
||||
/* if a thread currently exists for the thread id remove it */
|
||||
if (thread != NULL) {
|
||||
machine__remove_thread(machine, thread);
|
||||
@ -1395,8 +1413,6 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
|
||||
|
||||
thread = machine__findnew_thread(machine, event->fork.pid,
|
||||
event->fork.tid);
|
||||
if (dump_trace)
|
||||
perf_event__fprintf_task(event, stdout);
|
||||
|
||||
if (thread == NULL || parent == NULL ||
|
||||
thread__fork(thread, parent, sample->time) < 0) {
|
||||
|
@ -191,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread,
|
||||
if (thread->pid_ == parent->pid_)
|
||||
return 0;
|
||||
|
||||
if (thread->mg == parent->mg) {
|
||||
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
|
||||
thread->pid_, thread->tid, parent->pid_, parent->tid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* But this one is new process, copy maps. */
|
||||
for (i = 0; i < MAP__NR_TYPES; ++i)
|
||||
if (map_groups__clone(thread->mg, parent->mg, i) < 0)
|
||||
|
Loading…
x
Reference in New Issue
Block a user