mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
Minor merge needed, due to function move.
Main excitement here is Peter Zijlstra's lockless rbtree optimization to speed module address lookup. He found some abusers of the module lock doing that too. A little bit of parameter work here too; including Dan Streetman's breaking up the big param mutex so writing a parameter can load another module (yeah, really). Unfortunately that broke the usual suspects, !CONFIG_MODULES and !CONFIG_SYSFS, so those fixes were appended too. Cheers, Rusty. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJVkgKHAAoJENkgDmzRrbjxQpwQAJVmBN6jF3SnwbQXv9vRixjH 58V33sb1G1RW+kXxQ3/e8jLX/4VaN479CufruXQp+IJWXsN/CH0lbC3k8m7u50d7 b1Zeqd/Yrh79rkc11b0X1698uGCSMlzz+V54Z0QOTEEX+nSu2ZZvccFS4UaHkn3z rqDo00lb7rxQz8U25qro2OZrG6D3ub2q20TkWUB8EO4AOHkPn8KWP2r429Axrr0K wlDWDTTt8/IsvPbuPf3T15RAhq1avkMXWn9nDXDjyWbpLfTn8NFnWmtesgY7Jl4t GjbXC5WYekX3w2ZDB9KaT/DAMQ1a7RbMXNSz4RX4VbzDl+yYeSLmIh2G9fZb1PbB PsIxrOgy4BquOWsJPm+zeFPSC3q9Cfu219L4AmxSjiZxC3dlosg5rIB892Mjoyv4 qxmg6oiqtc4Jxv+Gl9lRFVOqyHZrTC5IJ+xgfv1EyP6kKMUKLlDZtxZAuQxpUyxR HZLq220RYnYSvkWauikq4M8fqFM8bdt6hLJnv7bVqllseROk9stCvjSiE3A9szH5 OgtOfYV5GhOeb8pCZqJKlGDw+RoJ21jtNCgOr6DgkNKV9CX/kL/Puwv8gnA0B0eh dxCeB7f/gcLl7Cg3Z3gVVcGlgak6JWrLf5ITAJhBZ8Lv+AtL2DKmwEWS/iIMRmek tLdh/a9GiCitqS0bT7GE =tWPQ -----END PGP SIGNATURE----- Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux Pull module updates from Rusty Russell: "Main excitement here is Peter Zijlstra's lockless rbtree optimization to speed module address lookup. He found some abusers of the module lock doing that too. A little bit of parameter work here too; including Dan Streetman's breaking up the big param mutex so writing a parameter can load another module (yeah, really). Unfortunately that broke the usual suspects, !CONFIG_MODULES and !CONFIG_SYSFS, so those fixes were appended too" * tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (26 commits) modules: only use mod->param_lock if CONFIG_MODULES param: fix module param locks when !CONFIG_SYSFS. rcu: merge fix for Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE() module: add per-module param_lock module: make perm const params: suppress unused variable error, warn once just in case code changes. modules: clarify CONFIG_MODULE_COMPRESS help, suggest 'N'. kernel/module.c: avoid ifdefs for sig_enforce declaration kernel/workqueue.c: remove ifdefs over wq_power_efficient kernel/params.c: export param_ops_bool_enable_only kernel/params.c: generalize bool_enable_only kernel/module.c: use generic module param operaters for sig_enforce kernel/params: constify struct kernel_param_ops uses sysfs: tightened sysfs permission checks module: Rework module_addr_{min,max} module: Use __module_address() for module_address_lookup() module: Make the mod_tree stuff conditional on PERF_EVENTS || TRACING module: Optimize __module_address() using a latched RB-tree rbtree: Implement generic latch_tree seqlock: Introduce raw_read_seqcount_latch() ...
This commit is contained in:
commit
02201e3f1b
@ -1572,7 +1572,7 @@ static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
|
||||
}
|
||||
|
||||
#define param_check_sfb_size(name, p) __param_check(name, p, void)
|
||||
static struct kernel_param_ops param_ops_sfb_size = {
|
||||
static const struct kernel_param_ops param_ops_sfb_size = {
|
||||
.set = param_set_sfb_size,
|
||||
.get = param_get_sfb_size,
|
||||
};
|
||||
|
@ -185,9 +185,9 @@ static int hostaudio_open(struct inode *inode, struct file *file)
|
||||
int ret;
|
||||
|
||||
#ifdef DEBUG
|
||||
kparam_block_sysfs_write(dsp);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
printk(KERN_DEBUG "hostaudio: open called (host: %s)\n", dsp);
|
||||
kparam_unblock_sysfs_write(dsp);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
#endif
|
||||
|
||||
state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL);
|
||||
@ -199,11 +199,11 @@ static int hostaudio_open(struct inode *inode, struct file *file)
|
||||
if (file->f_mode & FMODE_WRITE)
|
||||
w = 1;
|
||||
|
||||
kparam_block_sysfs_write(dsp);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
mutex_lock(&hostaudio_mutex);
|
||||
ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0);
|
||||
mutex_unlock(&hostaudio_mutex);
|
||||
kparam_unblock_sysfs_write(dsp);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
||||
if (ret < 0) {
|
||||
kfree(state);
|
||||
@ -260,17 +260,17 @@ static int hostmixer_open_mixdev(struct inode *inode, struct file *file)
|
||||
if (file->f_mode & FMODE_WRITE)
|
||||
w = 1;
|
||||
|
||||
kparam_block_sysfs_write(mixer);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
mutex_lock(&hostaudio_mutex);
|
||||
ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0);
|
||||
mutex_unlock(&hostaudio_mutex);
|
||||
kparam_unblock_sysfs_write(mixer);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
||||
if (ret < 0) {
|
||||
kparam_block_sysfs_write(dsp);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
printk(KERN_ERR "hostaudio_open_mixdev failed to open '%s', "
|
||||
"err = %d\n", dsp, -ret);
|
||||
kparam_unblock_sysfs_write(dsp);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
kfree(state);
|
||||
return ret;
|
||||
}
|
||||
@ -326,10 +326,10 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
static int __init hostaudio_init_module(void)
|
||||
{
|
||||
__kernel_param_lock();
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
printk(KERN_INFO "UML Audio Relay (host dsp = %s, host mixer = %s)\n",
|
||||
dsp, mixer);
|
||||
__kernel_param_unlock();
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
||||
module_data.dev_audio = register_sound_dsp(&hostaudio_fops, -1);
|
||||
if (module_data.dev_audio < 0) {
|
||||
|
@ -297,7 +297,7 @@ static int mmu_audit_set(const char *val, const struct kernel_param *kp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops audit_param_ops = {
|
||||
static const struct kernel_param_ops audit_param_ops = {
|
||||
.set = mmu_audit_set,
|
||||
.get = param_get_bool,
|
||||
};
|
||||
|
@ -104,7 +104,7 @@ static int param_set_local64(const char *val, const struct kernel_param *kp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_local64 = {
|
||||
static const struct kernel_param_ops param_ops_local64 = {
|
||||
.get = param_get_local64,
|
||||
.set = param_set_local64,
|
||||
};
|
||||
|
@ -99,7 +99,7 @@ static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
|
||||
return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops null_queue_mode_param_ops = {
|
||||
static const struct kernel_param_ops null_queue_mode_param_ops = {
|
||||
.set = null_set_queue_mode,
|
||||
.get = param_get_int,
|
||||
};
|
||||
@ -127,7 +127,7 @@ static int null_set_irqmode(const char *str, const struct kernel_param *kp)
|
||||
NULL_IRQ_TIMER);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops null_irqmode_param_ops = {
|
||||
static const struct kernel_param_ops null_irqmode_param_ops = {
|
||||
.set = null_set_irqmode,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
@ -208,7 +208,7 @@ static int set_param_timeout(const char *val, const struct kernel_param *kp)
|
||||
return rv;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_timeout = {
|
||||
static const struct kernel_param_ops param_ops_timeout = {
|
||||
.set = set_param_timeout,
|
||||
.get = param_get_int,
|
||||
};
|
||||
@ -270,14 +270,14 @@ static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_wdog_ifnum = {
|
||||
static const struct kernel_param_ops param_ops_wdog_ifnum = {
|
||||
.set = set_param_wdog_ifnum,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
#define param_check_wdog_ifnum param_check_int
|
||||
|
||||
static struct kernel_param_ops param_ops_str = {
|
||||
static const struct kernel_param_ops param_ops_str = {
|
||||
.set = set_param_str,
|
||||
.get = get_param_str,
|
||||
};
|
||||
|
@ -120,7 +120,7 @@ static struct dmatest_info {
|
||||
|
||||
static int dmatest_run_set(const char *val, const struct kernel_param *kp);
|
||||
static int dmatest_run_get(char *val, const struct kernel_param *kp);
|
||||
static struct kernel_param_ops run_ops = {
|
||||
static const struct kernel_param_ops run_ops = {
|
||||
.set = dmatest_run_set,
|
||||
.get = dmatest_run_get,
|
||||
};
|
||||
@ -195,7 +195,7 @@ static int dmatest_wait_get(char *val, const struct kernel_param *kp)
|
||||
return param_get_bool(val, kp);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops wait_ops = {
|
||||
static const struct kernel_param_ops wait_ops = {
|
||||
.get = dmatest_wait_get,
|
||||
.set = param_set_bool,
|
||||
};
|
||||
|
@ -199,7 +199,7 @@ static int ide_set_dev_param_mask(const char *s, const struct kernel_param *kp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_ide_dev_mask = {
|
||||
static const struct kernel_param_ops param_ops_ide_dev_mask = {
|
||||
.set = ide_set_dev_param_mask
|
||||
};
|
||||
|
||||
|
@ -99,7 +99,7 @@ module_param(register_always, bool, 0444);
|
||||
MODULE_PARM_DESC(register_always,
|
||||
"Use memory registration even for contiguous memory regions");
|
||||
|
||||
static struct kernel_param_ops srp_tmo_ops;
|
||||
static const struct kernel_param_ops srp_tmo_ops;
|
||||
|
||||
static int srp_reconnect_delay = 10;
|
||||
module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
|
||||
@ -184,7 +184,7 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
|
||||
return res;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops srp_tmo_ops = {
|
||||
static const struct kernel_param_ops srp_tmo_ops = {
|
||||
.get = srp_tmo_get,
|
||||
.set = srp_tmo_set,
|
||||
};
|
||||
|
@ -94,7 +94,7 @@ static int ati_remote2_get_mode_mask(char *buffer,
|
||||
|
||||
static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
|
||||
#define param_check_channel_mask(name, p) __param_check(name, p, unsigned int)
|
||||
static struct kernel_param_ops param_ops_channel_mask = {
|
||||
static const struct kernel_param_ops param_ops_channel_mask = {
|
||||
.set = ati_remote2_set_channel_mask,
|
||||
.get = ati_remote2_get_channel_mask,
|
||||
};
|
||||
@ -103,7 +103,7 @@ MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<
|
||||
|
||||
static unsigned int mode_mask = ATI_REMOTE2_MAX_MODE_MASK;
|
||||
#define param_check_mode_mask(name, p) __param_check(name, p, unsigned int)
|
||||
static struct kernel_param_ops param_ops_mode_mask = {
|
||||
static const struct kernel_param_ops param_ops_mode_mask = {
|
||||
.set = ati_remote2_set_mode_mask,
|
||||
.get = ati_remote2_get_mode_mask,
|
||||
};
|
||||
|
@ -47,7 +47,7 @@ MODULE_LICENSE("GPL");
|
||||
static unsigned int psmouse_max_proto = PSMOUSE_AUTO;
|
||||
static int psmouse_set_maxproto(const char *val, const struct kernel_param *);
|
||||
static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp);
|
||||
static struct kernel_param_ops param_ops_proto_abbrev = {
|
||||
static const struct kernel_param_ops param_ops_proto_abbrev = {
|
||||
.set = psmouse_set_maxproto,
|
||||
.get = psmouse_get_maxproto,
|
||||
};
|
||||
|
@ -115,7 +115,7 @@ static int param_set_axis(const char *val, const struct kernel_param *kp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_axis = {
|
||||
static const struct kernel_param_ops param_ops_axis = {
|
||||
.set = param_set_axis,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
@ -162,7 +162,7 @@ static int __init ubiblock_set_param(const char *val,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops ubiblock_param_ops = {
|
||||
static const struct kernel_param_ops ubiblock_param_ops = {
|
||||
.set = ubiblock_set_param,
|
||||
};
|
||||
module_param_cb(block, &ubiblock_param_ops, NULL, 0);
|
||||
|
@ -279,7 +279,7 @@ MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
|
||||
MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
|
||||
MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
|
||||
|
||||
/* Careful: must be accessed under kparam_block_sysfs_write */
|
||||
/* Careful: must be accessed under kernel_param_lock() */
|
||||
static char *myri10ge_fw_name = NULL;
|
||||
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
|
||||
@ -3427,7 +3427,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
|
||||
}
|
||||
}
|
||||
|
||||
kparam_block_sysfs_write(myri10ge_fw_name);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
if (myri10ge_fw_name != NULL) {
|
||||
char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
|
||||
if (fw_name) {
|
||||
@ -3435,7 +3435,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
|
||||
set_fw_name(mgp, fw_name, true);
|
||||
}
|
||||
}
|
||||
kparam_unblock_sysfs_write(myri10ge_fw_name);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
||||
if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
|
||||
myri10ge_fw_names[mgp->board_number] != NULL &&
|
||||
|
@ -62,7 +62,7 @@ static int mtu_max_set(const char *val, const struct kernel_param *kp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops mtu_max_ops = {
|
||||
static const struct kernel_param_ops mtu_max_ops = {
|
||||
.set = mtu_max_set,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
@ -91,7 +91,7 @@ static int ring_order_set(const char *val, const struct kernel_param *kp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops ring_order_ops = {
|
||||
static const struct kernel_param_ops ring_order_ops = {
|
||||
.set = ring_order_set,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
|
@ -821,15 +821,15 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
|
||||
|
||||
lbtf_deb_enter(LBTF_DEB_USB);
|
||||
|
||||
kparam_block_sysfs_write(fw_name);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
|
||||
if (ret < 0) {
|
||||
pr_err("request_firmware() failed with %#x\n", ret);
|
||||
pr_err("firmware %s not found\n", lbtf_fw_name);
|
||||
kparam_unblock_sysfs_write(fw_name);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
goto done;
|
||||
}
|
||||
kparam_unblock_sysfs_write(fw_name);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
||||
if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
|
||||
goto release_fw;
|
||||
|
@ -448,42 +448,42 @@ static int param_set_battery_voltage(const char *key,
|
||||
|
||||
#define param_get_battery_voltage param_get_int
|
||||
|
||||
static struct kernel_param_ops param_ops_ac_online = {
|
||||
static const struct kernel_param_ops param_ops_ac_online = {
|
||||
.set = param_set_ac_online,
|
||||
.get = param_get_ac_online,
|
||||
};
|
||||
|
||||
static struct kernel_param_ops param_ops_usb_online = {
|
||||
static const struct kernel_param_ops param_ops_usb_online = {
|
||||
.set = param_set_usb_online,
|
||||
.get = param_get_usb_online,
|
||||
};
|
||||
|
||||
static struct kernel_param_ops param_ops_battery_status = {
|
||||
static const struct kernel_param_ops param_ops_battery_status = {
|
||||
.set = param_set_battery_status,
|
||||
.get = param_get_battery_status,
|
||||
};
|
||||
|
||||
static struct kernel_param_ops param_ops_battery_present = {
|
||||
static const struct kernel_param_ops param_ops_battery_present = {
|
||||
.set = param_set_battery_present,
|
||||
.get = param_get_battery_present,
|
||||
};
|
||||
|
||||
static struct kernel_param_ops param_ops_battery_technology = {
|
||||
static const struct kernel_param_ops param_ops_battery_technology = {
|
||||
.set = param_set_battery_technology,
|
||||
.get = param_get_battery_technology,
|
||||
};
|
||||
|
||||
static struct kernel_param_ops param_ops_battery_health = {
|
||||
static const struct kernel_param_ops param_ops_battery_health = {
|
||||
.set = param_set_battery_health,
|
||||
.get = param_get_battery_health,
|
||||
};
|
||||
|
||||
static struct kernel_param_ops param_ops_battery_capacity = {
|
||||
static const struct kernel_param_ops param_ops_battery_capacity = {
|
||||
.set = param_set_battery_capacity,
|
||||
.get = param_get_battery_capacity,
|
||||
};
|
||||
|
||||
static struct kernel_param_ops param_ops_battery_voltage = {
|
||||
static const struct kernel_param_ops param_ops_battery_voltage = {
|
||||
.set = param_set_battery_voltage,
|
||||
.get = param_get_battery_voltage,
|
||||
};
|
||||
|
@ -119,7 +119,7 @@ static int duration_set(const char *arg, const struct kernel_param *kp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops duration_ops = {
|
||||
static const struct kernel_param_ops duration_ops = {
|
||||
.set = duration_set,
|
||||
.get = param_get_int,
|
||||
};
|
||||
@ -167,7 +167,7 @@ static int window_size_set(const char *arg, const struct kernel_param *kp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops window_size_ops = {
|
||||
static const struct kernel_param_ops window_size_ops = {
|
||||
.set = window_size_set,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
@ -1345,7 +1345,7 @@ static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
|
||||
|
||||
#define param_check_vmidfilter(name, p) __param_check(name, p, void)
|
||||
|
||||
static struct kernel_param_ops param_ops_vmidfilter = {
|
||||
static const struct kernel_param_ops param_ops_vmidfilter = {
|
||||
.set = param_set_vmidfilter,
|
||||
.get = param_get_vmidfilter,
|
||||
};
|
||||
|
@ -988,7 +988,7 @@ static int sysrq_reset_seq_param_set(const char *buffer,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_sysrq_reset_seq = {
|
||||
static const struct kernel_param_ops param_ops_sysrq_reset_seq = {
|
||||
.get = param_get_ushort,
|
||||
.set = sysrq_reset_seq_param_set,
|
||||
};
|
||||
|
@ -1599,7 +1599,7 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
|
||||
char file_arr[] = "CMVxy.bin";
|
||||
char *file;
|
||||
|
||||
kparam_block_sysfs_write(cmv_file);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
/* set proper name corresponding modem version and line type */
|
||||
if (cmv_file[sc->modem_index] == NULL) {
|
||||
if (UEA_CHIP_VERSION(sc) == ADI930)
|
||||
@ -1618,7 +1618,7 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
|
||||
strlcat(cmv_name, file, UEA_FW_NAME_MAX);
|
||||
if (ver == 2)
|
||||
strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
|
||||
kparam_unblock_sysfs_write(cmv_file);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
}
|
||||
|
||||
static int request_cmvs_old(struct uea_softc *sc,
|
||||
|
@ -1977,7 +1977,7 @@ static int param_set_scroll(const char *val, const struct kernel_param *kp)
|
||||
|
||||
return 0;
|
||||
}
|
||||
static struct kernel_param_ops param_ops_scroll = {
|
||||
static const struct kernel_param_ops param_ops_scroll = {
|
||||
.set = param_set_scroll,
|
||||
};
|
||||
#define param_check_scroll(name, p) __param_check(name, p, void)
|
||||
|
@ -754,9 +754,9 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
|
||||
/* Prepare startup mode */
|
||||
|
||||
kparam_block_sysfs_write(mode_option);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
|
||||
kparam_unblock_sysfs_write(mode_option);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
if (! ((rc == 1) || (rc == 2))) {
|
||||
rc = -EINVAL;
|
||||
dev_err(info->device, "mode %s not found\n", mode_option);
|
||||
|
@ -691,7 +691,7 @@ static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
|
||||
return strlen(buffer) + 1;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops vm_cmdline_param_ops = {
|
||||
static const struct kernel_param_ops vm_cmdline_param_ops = {
|
||||
.set = vm_cmdline_set,
|
||||
.get = vm_cmdline_get,
|
||||
};
|
||||
|
@ -2847,7 +2847,7 @@ static int param_set_portnr(const char *val, const struct kernel_param *kp)
|
||||
*((unsigned int *)kp->arg) = num;
|
||||
return 0;
|
||||
}
|
||||
static struct kernel_param_ops param_ops_portnr = {
|
||||
static const struct kernel_param_ops param_ops_portnr = {
|
||||
.set = param_set_portnr,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
|
@ -475,6 +475,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||
(volatile typeof(x) *)&(x); })
|
||||
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
|
||||
|
||||
/**
|
||||
* lockless_dereference() - safely load a pointer for later dereference
|
||||
* @p: The pointer to load
|
||||
*
|
||||
* Similar to rcu_dereference(), but for situations where the pointed-to
|
||||
* object's lifetime is managed by something other than RCU. That
|
||||
* "something other" might be reference counting or simple immortality.
|
||||
*/
|
||||
#define lockless_dereference(p) \
|
||||
({ \
|
||||
typeof(p) _________p1 = READ_ONCE(p); \
|
||||
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
|
||||
(_________p1); \
|
||||
})
|
||||
|
||||
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
|
||||
#ifdef CONFIG_KPROBES
|
||||
# define __kprobes __attribute__((__section__(".kprobes.text")))
|
||||
|
@ -813,13 +813,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
#endif
|
||||
|
||||
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
|
||||
#define VERIFY_OCTAL_PERMISSIONS(perms) \
|
||||
(BUILD_BUG_ON_ZERO((perms) < 0) + \
|
||||
BUILD_BUG_ON_ZERO((perms) > 0777) + \
|
||||
/* User perms >= group perms >= other perms */ \
|
||||
BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
|
||||
BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
|
||||
/* Other writable? Generally considered a bad idea. */ \
|
||||
BUILD_BUG_ON_ZERO((perms) & 2) + \
|
||||
#define VERIFY_OCTAL_PERMISSIONS(perms) \
|
||||
(BUILD_BUG_ON_ZERO((perms) < 0) + \
|
||||
BUILD_BUG_ON_ZERO((perms) > 0777) + \
|
||||
/* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
|
||||
BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
|
||||
BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
|
||||
/* USER_WRITABLE >= GROUP_WRITABLE */ \
|
||||
BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
|
||||
/* OTHER_WRITABLE? Generally considered a bad idea. */ \
|
||||
BUILD_BUG_ON_ZERO((perms) & 2) + \
|
||||
(perms))
|
||||
#endif
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/rbtree_latch.h>
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/module.h>
|
||||
@ -210,6 +211,13 @@ enum module_state {
|
||||
MODULE_STATE_UNFORMED, /* Still setting it up. */
|
||||
};
|
||||
|
||||
struct module;
|
||||
|
||||
struct mod_tree_node {
|
||||
struct module *mod;
|
||||
struct latch_tree_node node;
|
||||
};
|
||||
|
||||
struct module {
|
||||
enum module_state state;
|
||||
|
||||
@ -232,6 +240,9 @@ struct module {
|
||||
unsigned int num_syms;
|
||||
|
||||
/* Kernel parameters. */
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct mutex param_lock;
|
||||
#endif
|
||||
struct kernel_param *kp;
|
||||
unsigned int num_kp;
|
||||
|
||||
@ -271,8 +282,15 @@ struct module {
|
||||
/* Startup function. */
|
||||
int (*init)(void);
|
||||
|
||||
/* If this is non-NULL, vfree after init() returns */
|
||||
void *module_init;
|
||||
/*
|
||||
* If this is non-NULL, vfree() after init() returns.
|
||||
*
|
||||
* Cacheline align here, such that:
|
||||
* module_init, module_core, init_size, core_size,
|
||||
* init_text_size, core_text_size and mtn_core::{mod,node[0]}
|
||||
* are on the same cacheline.
|
||||
*/
|
||||
void *module_init ____cacheline_aligned;
|
||||
|
||||
/* Here is the actual code + data, vfree'd on unload. */
|
||||
void *module_core;
|
||||
@ -283,6 +301,16 @@ struct module {
|
||||
/* The size of the executable code in each section. */
|
||||
unsigned int init_text_size, core_text_size;
|
||||
|
||||
#ifdef CONFIG_MODULES_TREE_LOOKUP
|
||||
/*
|
||||
* We want mtn_core::{mod,node[0]} to be in the same cacheline as the
|
||||
* above entries such that a regular lookup will only touch one
|
||||
* cacheline.
|
||||
*/
|
||||
struct mod_tree_node mtn_core;
|
||||
struct mod_tree_node mtn_init;
|
||||
#endif
|
||||
|
||||
/* Size of RO sections of the module (text+rodata) */
|
||||
unsigned int init_ro_size, core_ro_size;
|
||||
|
||||
@ -369,7 +397,7 @@ struct module {
|
||||
ctor_fn_t *ctors;
|
||||
unsigned int num_ctors;
|
||||
#endif
|
||||
};
|
||||
} ____cacheline_aligned;
|
||||
#ifndef MODULE_ARCH_INIT
|
||||
#define MODULE_ARCH_INIT {}
|
||||
#endif
|
||||
@ -423,14 +451,22 @@ struct symsearch {
|
||||
bool unused;
|
||||
};
|
||||
|
||||
/* Search for an exported symbol by name. */
|
||||
/*
|
||||
* Search for an exported symbol by name.
|
||||
*
|
||||
* Must be called with module_mutex held or preemption disabled.
|
||||
*/
|
||||
const struct kernel_symbol *find_symbol(const char *name,
|
||||
struct module **owner,
|
||||
const unsigned long **crc,
|
||||
bool gplok,
|
||||
bool warn);
|
||||
|
||||
/* Walk the exported symbol table */
|
||||
/*
|
||||
* Walk the exported symbol table
|
||||
*
|
||||
* Must be called with module_mutex held or preemption disabled.
|
||||
*/
|
||||
bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
|
||||
struct module *owner,
|
||||
void *data), void *data);
|
||||
|
@ -67,8 +67,9 @@ enum {
|
||||
|
||||
struct kernel_param {
|
||||
const char *name;
|
||||
struct module *mod;
|
||||
const struct kernel_param_ops *ops;
|
||||
u16 perm;
|
||||
const u16 perm;
|
||||
s8 level;
|
||||
u8 flags;
|
||||
union {
|
||||
@ -108,7 +109,7 @@ struct kparam_array
|
||||
*
|
||||
* @perm is 0 if the the variable is not to appear in sysfs, or 0444
|
||||
* for world-readable, 0644 for root-writable, etc. Note that if it
|
||||
* is writable, you may need to use kparam_block_sysfs_write() around
|
||||
* is writable, you may need to use kernel_param_lock() around
|
||||
* accesses (esp. charp, which can be kfreed when it changes).
|
||||
*
|
||||
* The @type is simply pasted to refer to a param_ops_##type and a
|
||||
@ -216,16 +217,16 @@ struct kparam_array
|
||||
parameters. */
|
||||
#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
|
||||
/* Default value instead of permissions? */ \
|
||||
static const char __param_str_##name[] = prefix #name; \
|
||||
static const char __param_str_##name[] = prefix #name; \
|
||||
static struct kernel_param __moduleparam_const __param_##name \
|
||||
__used \
|
||||
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
|
||||
= { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \
|
||||
level, flags, { arg } }
|
||||
= { __param_str_##name, THIS_MODULE, ops, \
|
||||
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
|
||||
|
||||
/* Obsolete - use module_param_cb() */
|
||||
#define module_param_call(name, set, get, arg, perm) \
|
||||
static struct kernel_param_ops __param_ops_##name = \
|
||||
static const struct kernel_param_ops __param_ops_##name = \
|
||||
{ .flags = 0, (void *)set, (void *)get }; \
|
||||
__module_param_call(MODULE_PARAM_PREFIX, \
|
||||
name, &__param_ops_##name, arg, \
|
||||
@ -238,58 +239,14 @@ __check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kparam_block_sysfs_write - make sure a parameter isn't written via sysfs.
|
||||
* @name: the name of the parameter
|
||||
*
|
||||
* There's no point blocking write on a paramter that isn't writable via sysfs!
|
||||
*/
|
||||
#define kparam_block_sysfs_write(name) \
|
||||
do { \
|
||||
BUG_ON(!(__param_##name.perm & 0222)); \
|
||||
__kernel_param_lock(); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* kparam_unblock_sysfs_write - allows sysfs to write to a parameter again.
|
||||
* @name: the name of the parameter
|
||||
*/
|
||||
#define kparam_unblock_sysfs_write(name) \
|
||||
do { \
|
||||
BUG_ON(!(__param_##name.perm & 0222)); \
|
||||
__kernel_param_unlock(); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* kparam_block_sysfs_read - make sure a parameter isn't read via sysfs.
|
||||
* @name: the name of the parameter
|
||||
*
|
||||
* This also blocks sysfs writes.
|
||||
*/
|
||||
#define kparam_block_sysfs_read(name) \
|
||||
do { \
|
||||
BUG_ON(!(__param_##name.perm & 0444)); \
|
||||
__kernel_param_lock(); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* kparam_unblock_sysfs_read - allows sysfs to read a parameter again.
|
||||
* @name: the name of the parameter
|
||||
*/
|
||||
#define kparam_unblock_sysfs_read(name) \
|
||||
do { \
|
||||
BUG_ON(!(__param_##name.perm & 0444)); \
|
||||
__kernel_param_unlock(); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
extern void __kernel_param_lock(void);
|
||||
extern void __kernel_param_unlock(void);
|
||||
extern void kernel_param_lock(struct module *mod);
|
||||
extern void kernel_param_unlock(struct module *mod);
|
||||
#else
|
||||
static inline void __kernel_param_lock(void)
|
||||
static inline void kernel_param_lock(struct module *mod)
|
||||
{
|
||||
}
|
||||
static inline void __kernel_param_unlock(void)
|
||||
static inline void kernel_param_unlock(struct module *mod)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
@ -386,64 +343,70 @@ static inline void destroy_params(const struct kernel_param *params,
|
||||
#define __param_check(name, p, type) \
|
||||
static inline type __always_unused *__check_##name(void) { return(p); }
|
||||
|
||||
extern struct kernel_param_ops param_ops_byte;
|
||||
extern const struct kernel_param_ops param_ops_byte;
|
||||
extern int param_set_byte(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_byte(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_byte(name, p) __param_check(name, p, unsigned char)
|
||||
|
||||
extern struct kernel_param_ops param_ops_short;
|
||||
extern const struct kernel_param_ops param_ops_short;
|
||||
extern int param_set_short(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_short(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_short(name, p) __param_check(name, p, short)
|
||||
|
||||
extern struct kernel_param_ops param_ops_ushort;
|
||||
extern const struct kernel_param_ops param_ops_ushort;
|
||||
extern int param_set_ushort(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_ushort(name, p) __param_check(name, p, unsigned short)
|
||||
|
||||
extern struct kernel_param_ops param_ops_int;
|
||||
extern const struct kernel_param_ops param_ops_int;
|
||||
extern int param_set_int(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_int(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_int(name, p) __param_check(name, p, int)
|
||||
|
||||
extern struct kernel_param_ops param_ops_uint;
|
||||
extern const struct kernel_param_ops param_ops_uint;
|
||||
extern int param_set_uint(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
|
||||
|
||||
extern struct kernel_param_ops param_ops_long;
|
||||
extern const struct kernel_param_ops param_ops_long;
|
||||
extern int param_set_long(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_long(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_long(name, p) __param_check(name, p, long)
|
||||
|
||||
extern struct kernel_param_ops param_ops_ulong;
|
||||
extern const struct kernel_param_ops param_ops_ulong;
|
||||
extern int param_set_ulong(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_ulong(name, p) __param_check(name, p, unsigned long)
|
||||
|
||||
extern struct kernel_param_ops param_ops_ullong;
|
||||
extern const struct kernel_param_ops param_ops_ullong;
|
||||
extern int param_set_ullong(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
|
||||
|
||||
extern struct kernel_param_ops param_ops_charp;
|
||||
extern const struct kernel_param_ops param_ops_charp;
|
||||
extern int param_set_charp(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_charp(name, p) __param_check(name, p, char *)
|
||||
|
||||
/* We used to allow int as well as bool. We're taking that away! */
|
||||
extern struct kernel_param_ops param_ops_bool;
|
||||
extern const struct kernel_param_ops param_ops_bool;
|
||||
extern int param_set_bool(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_bool(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_bool(name, p) __param_check(name, p, bool)
|
||||
|
||||
extern struct kernel_param_ops param_ops_invbool;
|
||||
extern const struct kernel_param_ops param_ops_bool_enable_only;
|
||||
extern int param_set_bool_enable_only(const char *val,
|
||||
const struct kernel_param *kp);
|
||||
/* getter is the same as for the regular bool */
|
||||
#define param_check_bool_enable_only param_check_bool
|
||||
|
||||
extern const struct kernel_param_ops param_ops_invbool;
|
||||
extern int param_set_invbool(const char *val, const struct kernel_param *kp);
|
||||
extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_invbool(name, p) __param_check(name, p, bool)
|
||||
|
||||
/* An int, which can only be set like a bool (though it shows as an int). */
|
||||
extern struct kernel_param_ops param_ops_bint;
|
||||
extern const struct kernel_param_ops param_ops_bint;
|
||||
extern int param_set_bint(const char *val, const struct kernel_param *kp);
|
||||
#define param_get_bint param_get_int
|
||||
#define param_check_bint param_check_int
|
||||
@ -487,9 +450,9 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
|
||||
perm, -1, 0); \
|
||||
__MODULE_PARM_TYPE(name, "array of " #type)
|
||||
|
||||
extern struct kernel_param_ops param_array_ops;
|
||||
extern const struct kernel_param_ops param_array_ops;
|
||||
|
||||
extern struct kernel_param_ops param_ops_string;
|
||||
extern const struct kernel_param_ops param_ops_string;
|
||||
extern int param_set_copystring(const char *val, const struct kernel_param *);
|
||||
extern int param_get_string(char *buffer, const struct kernel_param *kp);
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
struct rb_node {
|
||||
unsigned long __rb_parent_color;
|
||||
@ -73,11 +74,11 @@ extern struct rb_node *rb_first_postorder(const struct rb_root *);
|
||||
extern struct rb_node *rb_next_postorder(const struct rb_node *);
|
||||
|
||||
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
|
||||
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root);
|
||||
|
||||
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
|
||||
struct rb_node ** rb_link)
|
||||
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
|
||||
struct rb_node **rb_link)
|
||||
{
|
||||
node->__rb_parent_color = (unsigned long)parent;
|
||||
node->rb_left = node->rb_right = NULL;
|
||||
@ -85,6 +86,15 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
|
||||
*rb_link = node;
|
||||
}
|
||||
|
||||
static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
|
||||
struct rb_node **rb_link)
|
||||
{
|
||||
node->__rb_parent_color = (unsigned long)parent;
|
||||
node->rb_left = node->rb_right = NULL;
|
||||
|
||||
rcu_assign_pointer(*rb_link, node);
|
||||
}
|
||||
|
||||
#define rb_entry_safe(ptr, type, member) \
|
||||
({ typeof(ptr) ____ptr = (ptr); \
|
||||
____ptr ? rb_entry(____ptr, type, member) : NULL; \
|
||||
|
@ -123,11 +123,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
|
||||
{
|
||||
if (parent) {
|
||||
if (parent->rb_left == old)
|
||||
parent->rb_left = new;
|
||||
WRITE_ONCE(parent->rb_left, new);
|
||||
else
|
||||
parent->rb_right = new;
|
||||
WRITE_ONCE(parent->rb_right, new);
|
||||
} else
|
||||
root->rb_node = new;
|
||||
WRITE_ONCE(root->rb_node, new);
|
||||
}
|
||||
|
||||
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
@ -137,7 +137,8 @@ static __always_inline struct rb_node *
|
||||
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||
const struct rb_augment_callbacks *augment)
|
||||
{
|
||||
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
|
||||
struct rb_node *child = node->rb_right;
|
||||
struct rb_node *tmp = node->rb_left;
|
||||
struct rb_node *parent, *rebalance;
|
||||
unsigned long pc;
|
||||
|
||||
@ -167,6 +168,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||
tmp = parent;
|
||||
} else {
|
||||
struct rb_node *successor = child, *child2;
|
||||
|
||||
tmp = child->rb_left;
|
||||
if (!tmp) {
|
||||
/*
|
||||
@ -180,6 +182,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||
*/
|
||||
parent = successor;
|
||||
child2 = successor->rb_right;
|
||||
|
||||
augment->copy(node, successor);
|
||||
} else {
|
||||
/*
|
||||
@ -201,19 +204,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||
successor = tmp;
|
||||
tmp = tmp->rb_left;
|
||||
} while (tmp);
|
||||
parent->rb_left = child2 = successor->rb_right;
|
||||
successor->rb_right = child;
|
||||
child2 = successor->rb_right;
|
||||
WRITE_ONCE(parent->rb_left, child2);
|
||||
WRITE_ONCE(successor->rb_right, child);
|
||||
rb_set_parent(child, successor);
|
||||
|
||||
augment->copy(node, successor);
|
||||
augment->propagate(parent, successor);
|
||||
}
|
||||
|
||||
successor->rb_left = tmp = node->rb_left;
|
||||
tmp = node->rb_left;
|
||||
WRITE_ONCE(successor->rb_left, tmp);
|
||||
rb_set_parent(tmp, successor);
|
||||
|
||||
pc = node->__rb_parent_color;
|
||||
tmp = __rb_parent(pc);
|
||||
__rb_change_child(node, successor, tmp, root);
|
||||
|
||||
if (child2) {
|
||||
successor->__rb_parent_color = pc;
|
||||
rb_set_parent_color(child2, parent, RB_BLACK);
|
||||
|
212
include/linux/rbtree_latch.h
Normal file
212
include/linux/rbtree_latch.h
Normal file
@ -0,0 +1,212 @@
|
||||
/*
|
||||
* Latched RB-trees
|
||||
*
|
||||
* Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org>
|
||||
*
|
||||
* Since RB-trees have non-atomic modifications they're not immediately suited
|
||||
* for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for
|
||||
* lockless lookups; we cannot guarantee they return a correct result.
|
||||
*
|
||||
* The simplest solution is a seqlock + RB-tree, this will allow lockless
|
||||
* lookups; but has the constraint (inherent to the seqlock) that read sides
|
||||
* cannot nest in write sides.
|
||||
*
|
||||
* If we need to allow unconditional lookups (say as required for NMI context
|
||||
* usage) we need a more complex setup; this data structure provides this by
|
||||
* employing the latch technique -- see @raw_write_seqcount_latch -- to
|
||||
* implement a latched RB-tree which does allow for unconditional lookups by
|
||||
* virtue of always having (at least) one stable copy of the tree.
|
||||
*
|
||||
* However, while we have the guarantee that there is at all times one stable
|
||||
* copy, this does not guarantee an iteration will not observe modifications.
|
||||
* What might have been a stable copy at the start of the iteration, need not
|
||||
* remain so for the duration of the iteration.
|
||||
*
|
||||
* Therefore, this does require a lockless RB-tree iteration to be non-fatal;
|
||||
* see the comment in lib/rbtree.c. Note however that we only require the first
|
||||
* condition -- not seeing partial stores -- because the latch thing isolates
|
||||
* us from loops. If we were to interrupt a modification the lookup would be
|
||||
* pointed at the stable tree and complete while the modification was halted.
|
||||
*/
|
||||
|
||||
#ifndef RB_TREE_LATCH_H
|
||||
#define RB_TREE_LATCH_H
|
||||
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
struct latch_tree_node {
|
||||
struct rb_node node[2];
|
||||
};
|
||||
|
||||
struct latch_tree_root {
|
||||
seqcount_t seq;
|
||||
struct rb_root tree[2];
|
||||
};
|
||||
|
||||
/**
|
||||
* latch_tree_ops - operators to define the tree order
|
||||
* @less: used for insertion; provides the (partial) order between two elements.
|
||||
* @comp: used for lookups; provides the order between the search key and an element.
|
||||
*
|
||||
* The operators are related like:
|
||||
*
|
||||
* comp(a->key,b) < 0 := less(a,b)
|
||||
* comp(a->key,b) > 0 := less(b,a)
|
||||
* comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
|
||||
*
|
||||
* If these operators define a partial order on the elements we make no
|
||||
* guarantee on which of the elements matching the key is found. See
|
||||
* latch_tree_find().
|
||||
*/
|
||||
struct latch_tree_ops {
|
||||
bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b);
|
||||
int (*comp)(void *key, struct latch_tree_node *b);
|
||||
};
|
||||
|
||||
static __always_inline struct latch_tree_node *
|
||||
__lt_from_rb(struct rb_node *node, int idx)
|
||||
{
|
||||
return container_of(node, struct latch_tree_node, node[idx]);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx,
|
||||
bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
|
||||
{
|
||||
struct rb_root *root = <r->tree[idx];
|
||||
struct rb_node **link = &root->rb_node;
|
||||
struct rb_node *node = <n->node[idx];
|
||||
struct rb_node *parent = NULL;
|
||||
struct latch_tree_node *ltp;
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
ltp = __lt_from_rb(parent, idx);
|
||||
|
||||
if (less(ltn, ltp))
|
||||
link = &parent->rb_left;
|
||||
else
|
||||
link = &parent->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node_rcu(node, parent, link);
|
||||
rb_insert_color(node, root);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx)
|
||||
{
|
||||
rb_erase(<n->node[idx], <r->tree[idx]);
|
||||
}
|
||||
|
||||
static __always_inline struct latch_tree_node *
|
||||
__lt_find(void *key, struct latch_tree_root *ltr, int idx,
|
||||
int (*comp)(void *key, struct latch_tree_node *node))
|
||||
{
|
||||
struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node);
|
||||
struct latch_tree_node *ltn;
|
||||
int c;
|
||||
|
||||
while (node) {
|
||||
ltn = __lt_from_rb(node, idx);
|
||||
c = comp(key, ltn);
|
||||
|
||||
if (c < 0)
|
||||
node = rcu_dereference_raw(node->rb_left);
|
||||
else if (c > 0)
|
||||
node = rcu_dereference_raw(node->rb_right);
|
||||
else
|
||||
return ltn;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* latch_tree_insert() - insert @node into the trees @root
|
||||
* @node: nodes to insert
|
||||
* @root: trees to insert @node into
|
||||
* @ops: operators defining the node order
|
||||
*
|
||||
* It inserts @node into @root in an ordered fashion such that we can always
|
||||
* observe one complete tree. See the comment for raw_write_seqcount_latch().
|
||||
*
|
||||
* The inserts use rcu_assign_pointer() to publish the element such that the
|
||||
* tree structure is stored before we can observe the new @node.
|
||||
*
|
||||
* All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
|
||||
* serialized.
|
||||
*/
|
||||
static __always_inline void
|
||||
latch_tree_insert(struct latch_tree_node *node,
|
||||
struct latch_tree_root *root,
|
||||
const struct latch_tree_ops *ops)
|
||||
{
|
||||
raw_write_seqcount_latch(&root->seq);
|
||||
__lt_insert(node, root, 0, ops->less);
|
||||
raw_write_seqcount_latch(&root->seq);
|
||||
__lt_insert(node, root, 1, ops->less);
|
||||
}
|
||||
|
||||
/**
|
||||
* latch_tree_erase() - removes @node from the trees @root
|
||||
* @node: nodes to remote
|
||||
* @root: trees to remove @node from
|
||||
* @ops: operators defining the node order
|
||||
*
|
||||
* Removes @node from the trees @root in an ordered fashion such that we can
|
||||
* always observe one complete tree. See the comment for
|
||||
* raw_write_seqcount_latch().
|
||||
*
|
||||
* It is assumed that @node will observe one RCU quiescent state before being
|
||||
* reused of freed.
|
||||
*
|
||||
* All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
|
||||
* serialized.
|
||||
*/
|
||||
static __always_inline void
|
||||
latch_tree_erase(struct latch_tree_node *node,
|
||||
struct latch_tree_root *root,
|
||||
const struct latch_tree_ops *ops)
|
||||
{
|
||||
raw_write_seqcount_latch(&root->seq);
|
||||
__lt_erase(node, root, 0);
|
||||
raw_write_seqcount_latch(&root->seq);
|
||||
__lt_erase(node, root, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* latch_tree_find() - find the node matching @key in the trees @root
|
||||
* @key: search key
|
||||
* @root: trees to search for @key
|
||||
* @ops: operators defining the node order
|
||||
*
|
||||
* Does a lockless lookup in the trees @root for the node matching @key.
|
||||
*
|
||||
* It is assumed that this is called while holding the appropriate RCU read
|
||||
* side lock.
|
||||
*
|
||||
* If the operators define a partial order on the elements (there are multiple
|
||||
* elements which have the same key value) it is undefined which of these
|
||||
* elements will be found. Nor is it possible to iterate the tree to find
|
||||
* further elements with the same key value.
|
||||
*
|
||||
* Returns: a pointer to the node matching @key or NULL.
|
||||
*/
|
||||
static __always_inline struct latch_tree_node *
|
||||
latch_tree_find(void *key, struct latch_tree_root *root,
|
||||
const struct latch_tree_ops *ops)
|
||||
{
|
||||
struct latch_tree_node *node;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = raw_read_seqcount_latch(&root->seq);
|
||||
node = __lt_find(key, root, seq & 1, ops->comp);
|
||||
} while (read_seqcount_retry(&root->seq, seq));
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
#endif /* RB_TREE_LATCH_H */
|
@ -632,21 +632,6 @@ static inline void rcu_preempt_sleep_check(void)
|
||||
*/
|
||||
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
|
||||
|
||||
/**
|
||||
* lockless_dereference() - safely load a pointer for later dereference
|
||||
* @p: The pointer to load
|
||||
*
|
||||
* Similar to rcu_dereference(), but for situations where the pointed-to
|
||||
* object's lifetime is managed by something other than RCU. That
|
||||
* "something other" might be reference counting or simple immortality.
|
||||
*/
|
||||
#define lockless_dereference(p) \
|
||||
({ \
|
||||
typeof(p) _________p1 = READ_ONCE(p); \
|
||||
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
|
||||
(_________p1); \
|
||||
})
|
||||
|
||||
/**
|
||||
* rcu_assign_pointer() - assign to RCU-protected pointer
|
||||
* @p: pointer to assign to
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
@ -274,9 +275,87 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
|
||||
s->sequence++;
|
||||
}
|
||||
|
||||
/*
|
||||
static inline int raw_read_seqcount_latch(seqcount_t *s)
|
||||
{
|
||||
return lockless_dereference(s->sequence);
|
||||
}
|
||||
|
||||
/**
|
||||
* raw_write_seqcount_latch - redirect readers to even/odd copy
|
||||
* @s: pointer to seqcount_t
|
||||
*
|
||||
* The latch technique is a multiversion concurrency control method that allows
|
||||
* queries during non-atomic modifications. If you can guarantee queries never
|
||||
* interrupt the modification -- e.g. the concurrency is strictly between CPUs
|
||||
* -- you most likely do not need this.
|
||||
*
|
||||
* Where the traditional RCU/lockless data structures rely on atomic
|
||||
* modifications to ensure queries observe either the old or the new state the
|
||||
* latch allows the same for non-atomic updates. The trade-off is doubling the
|
||||
* cost of storage; we have to maintain two copies of the entire data
|
||||
* structure.
|
||||
*
|
||||
* Very simply put: we first modify one copy and then the other. This ensures
|
||||
* there is always one copy in a stable state, ready to give us an answer.
|
||||
*
|
||||
* The basic form is a data structure like:
|
||||
*
|
||||
* struct latch_struct {
|
||||
* seqcount_t seq;
|
||||
* struct data_struct data[2];
|
||||
* };
|
||||
*
|
||||
* Where a modification, which is assumed to be externally serialized, does the
|
||||
* following:
|
||||
*
|
||||
* void latch_modify(struct latch_struct *latch, ...)
|
||||
* {
|
||||
* smp_wmb(); <- Ensure that the last data[1] update is visible
|
||||
* latch->seq++;
|
||||
* smp_wmb(); <- Ensure that the seqcount update is visible
|
||||
*
|
||||
* modify(latch->data[0], ...);
|
||||
*
|
||||
* smp_wmb(); <- Ensure that the data[0] update is visible
|
||||
* latch->seq++;
|
||||
* smp_wmb(); <- Ensure that the seqcount update is visible
|
||||
*
|
||||
* modify(latch->data[1], ...);
|
||||
* }
|
||||
*
|
||||
* The query will have a form like:
|
||||
*
|
||||
* struct entry *latch_query(struct latch_struct *latch, ...)
|
||||
* {
|
||||
* struct entry *entry;
|
||||
* unsigned seq, idx;
|
||||
*
|
||||
* do {
|
||||
* seq = lockless_dereference(latch->seq);
|
||||
*
|
||||
* idx = seq & 0x01;
|
||||
* entry = data_query(latch->data[idx], ...);
|
||||
*
|
||||
* smp_rmb();
|
||||
* } while (seq != latch->seq);
|
||||
*
|
||||
* return entry;
|
||||
* }
|
||||
*
|
||||
* So during the modification, queries are first redirected to data[1]. Then we
|
||||
* modify data[0]. When that is complete, we redirect queries back to data[0]
|
||||
* and we can modify data[1].
|
||||
*
|
||||
* NOTE: The non-requirement for atomic modifications does _NOT_ include
|
||||
* the publishing of new entries in the case where data is a dynamic
|
||||
* data structure.
|
||||
*
|
||||
* An iteration might start in data[0] and get suspended long enough
|
||||
* to miss an entire modification sequence, once it resumes it might
|
||||
* observe the new entry.
|
||||
*
|
||||
* NOTE: When data is a dynamic data structure; one should use regular RCU
|
||||
* patterns to manage the lifetimes of the objects within.
|
||||
*/
|
||||
static inline void raw_write_seqcount_latch(seqcount_t *s)
|
||||
{
|
||||
|
29
init/Kconfig
29
init/Kconfig
@ -1941,26 +1941,21 @@ config MODULE_COMPRESS
|
||||
bool "Compress modules on installation"
|
||||
depends on MODULES
|
||||
help
|
||||
This option compresses the kernel modules when 'make
|
||||
modules_install' is run.
|
||||
|
||||
The modules will be compressed either using gzip or xz depend on the
|
||||
choice made in "Compression algorithm".
|
||||
Compresses kernel modules when 'make modules_install' is run; gzip or
|
||||
xz depending on "Compression algorithm" below.
|
||||
|
||||
module-init-tools has support for gzip format while kmod handle gzip
|
||||
and xz compressed modules.
|
||||
module-init-tools MAY support gzip, and kmod MAY support gzip and xz.
|
||||
|
||||
When a kernel module is installed from outside of the main kernel
|
||||
source and uses the Kbuild system for installing modules then that
|
||||
kernel module will also be compressed when it is installed.
|
||||
Out-of-tree kernel modules installed using Kbuild will also be
|
||||
compressed upon installation.
|
||||
|
||||
This option provides little benefit when the modules are to be used inside
|
||||
an initrd or initramfs, it generally is more efficient to compress the whole
|
||||
initrd or initramfs instead.
|
||||
Note: for modules inside an initrd or initramfs, it's more efficient
|
||||
to compress the whole initrd or initramfs instead.
|
||||
|
||||
This is fully compatible with signed modules while the signed module is
|
||||
compressed. module-init-tools or kmod handles decompression and provide to
|
||||
other layer the uncompressed but signed payload.
|
||||
Note: This is fully compatible with signed modules.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
choice
|
||||
prompt "Compression algorithm"
|
||||
@ -1982,6 +1977,10 @@ endchoice
|
||||
|
||||
endif # MODULES
|
||||
|
||||
config MODULES_TREE_LOOKUP
|
||||
def_bool y
|
||||
depends on PERF_EVENTS || TRACING
|
||||
|
||||
config INIT_ALL_POSSIBLE
|
||||
bool
|
||||
help
|
||||
|
@ -302,7 +302,7 @@ static int jump_label_add_module(struct module *mod)
|
||||
continue;
|
||||
|
||||
key = iterk;
|
||||
if (__module_address(iter->key) == mod) {
|
||||
if (within_module(iter->key, mod)) {
|
||||
/*
|
||||
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
|
||||
*/
|
||||
@ -339,7 +339,7 @@ static void jump_label_del_module(struct module *mod)
|
||||
|
||||
key = (struct static_key *)(unsigned long)iter->key;
|
||||
|
||||
if (__module_address(iter->key) == mod)
|
||||
if (within_module(iter->key, mod))
|
||||
continue;
|
||||
|
||||
prev = &key->next;
|
||||
@ -443,14 +443,16 @@ static void jump_label_update(struct static_key *key, int enable)
|
||||
{
|
||||
struct jump_entry *stop = __stop___jump_table;
|
||||
struct jump_entry *entry = jump_label_get_entries(key);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
struct module *mod = __module_address((unsigned long)key);
|
||||
struct module *mod;
|
||||
|
||||
__jump_label_mod_update(key, enable);
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_address((unsigned long)key);
|
||||
if (mod)
|
||||
stop = mod->jump_entries + mod->num_jump_entries;
|
||||
preempt_enable();
|
||||
#endif
|
||||
/* if there are no users, entry can be NULL */
|
||||
if (entry)
|
||||
|
317
kernel/module.c
317
kernel/module.c
@ -101,48 +101,201 @@
|
||||
DEFINE_MUTEX(module_mutex);
|
||||
EXPORT_SYMBOL_GPL(module_mutex);
|
||||
static LIST_HEAD(modules);
|
||||
|
||||
#ifdef CONFIG_MODULES_TREE_LOOKUP
|
||||
|
||||
/*
|
||||
* Use a latched RB-tree for __module_address(); this allows us to use
|
||||
* RCU-sched lookups of the address from any context.
|
||||
*
|
||||
* Because modules have two address ranges: init and core, we need two
|
||||
* latch_tree_nodes entries. Therefore we need the back-pointer from
|
||||
* mod_tree_node.
|
||||
*
|
||||
* Because init ranges are short lived we mark them unlikely and have placed
|
||||
* them outside the critical cacheline in struct module.
|
||||
*
|
||||
* This is conditional on PERF_EVENTS || TRACING because those can really hit
|
||||
* __module_address() hard by doing a lot of stack unwinding; potentially from
|
||||
* NMI context.
|
||||
*/
|
||||
|
||||
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
|
||||
{
|
||||
struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
|
||||
struct module *mod = mtn->mod;
|
||||
|
||||
if (unlikely(mtn == &mod->mtn_init))
|
||||
return (unsigned long)mod->module_init;
|
||||
|
||||
return (unsigned long)mod->module_core;
|
||||
}
|
||||
|
||||
static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
|
||||
{
|
||||
struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
|
||||
struct module *mod = mtn->mod;
|
||||
|
||||
if (unlikely(mtn == &mod->mtn_init))
|
||||
return (unsigned long)mod->init_size;
|
||||
|
||||
return (unsigned long)mod->core_size;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
|
||||
{
|
||||
return __mod_tree_val(a) < __mod_tree_val(b);
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
mod_tree_comp(void *key, struct latch_tree_node *n)
|
||||
{
|
||||
unsigned long val = (unsigned long)key;
|
||||
unsigned long start, end;
|
||||
|
||||
start = __mod_tree_val(n);
|
||||
if (val < start)
|
||||
return -1;
|
||||
|
||||
end = start + __mod_tree_size(n);
|
||||
if (val >= end)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct latch_tree_ops mod_tree_ops = {
|
||||
.less = mod_tree_less,
|
||||
.comp = mod_tree_comp,
|
||||
};
|
||||
|
||||
static struct mod_tree_root {
|
||||
struct latch_tree_root root;
|
||||
unsigned long addr_min;
|
||||
unsigned long addr_max;
|
||||
} mod_tree __cacheline_aligned = {
|
||||
.addr_min = -1UL,
|
||||
};
|
||||
|
||||
#define module_addr_min mod_tree.addr_min
|
||||
#define module_addr_max mod_tree.addr_max
|
||||
|
||||
static noinline void __mod_tree_insert(struct mod_tree_node *node)
|
||||
{
|
||||
latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
|
||||
}
|
||||
|
||||
static void __mod_tree_remove(struct mod_tree_node *node)
|
||||
{
|
||||
latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
* These modifications: insert, remove_init and remove; are serialized by the
|
||||
* module_mutex.
|
||||
*/
|
||||
static void mod_tree_insert(struct module *mod)
|
||||
{
|
||||
mod->mtn_core.mod = mod;
|
||||
mod->mtn_init.mod = mod;
|
||||
|
||||
__mod_tree_insert(&mod->mtn_core);
|
||||
if (mod->init_size)
|
||||
__mod_tree_insert(&mod->mtn_init);
|
||||
}
|
||||
|
||||
static void mod_tree_remove_init(struct module *mod)
|
||||
{
|
||||
if (mod->init_size)
|
||||
__mod_tree_remove(&mod->mtn_init);
|
||||
}
|
||||
|
||||
static void mod_tree_remove(struct module *mod)
|
||||
{
|
||||
__mod_tree_remove(&mod->mtn_core);
|
||||
mod_tree_remove_init(mod);
|
||||
}
|
||||
|
||||
static struct module *mod_find(unsigned long addr)
|
||||
{
|
||||
struct latch_tree_node *ltn;
|
||||
|
||||
ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
|
||||
if (!ltn)
|
||||
return NULL;
|
||||
|
||||
return container_of(ltn, struct mod_tree_node, node)->mod;
|
||||
}
|
||||
|
||||
#else /* MODULES_TREE_LOOKUP */
|
||||
|
||||
static unsigned long module_addr_min = -1UL, module_addr_max = 0;
|
||||
|
||||
static void mod_tree_insert(struct module *mod) { }
|
||||
static void mod_tree_remove_init(struct module *mod) { }
|
||||
static void mod_tree_remove(struct module *mod) { }
|
||||
|
||||
static struct module *mod_find(unsigned long addr)
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
list_for_each_entry_rcu(mod, &modules, list) {
|
||||
if (within_module(addr, mod))
|
||||
return mod;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* MODULES_TREE_LOOKUP */
|
||||
|
||||
/*
|
||||
* Bounds of module text, for speeding up __module_address.
|
||||
* Protected by module_mutex.
|
||||
*/
|
||||
static void __mod_update_bounds(void *base, unsigned int size)
|
||||
{
|
||||
unsigned long min = (unsigned long)base;
|
||||
unsigned long max = min + size;
|
||||
|
||||
if (min < module_addr_min)
|
||||
module_addr_min = min;
|
||||
if (max > module_addr_max)
|
||||
module_addr_max = max;
|
||||
}
|
||||
|
||||
static void mod_update_bounds(struct module *mod)
|
||||
{
|
||||
__mod_update_bounds(mod->module_core, mod->core_size);
|
||||
if (mod->init_size)
|
||||
__mod_update_bounds(mod->module_init, mod->init_size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KGDB_KDB
|
||||
struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
|
||||
#endif /* CONFIG_KGDB_KDB */
|
||||
|
||||
#ifdef CONFIG_MODULE_SIG
|
||||
#ifdef CONFIG_MODULE_SIG_FORCE
|
||||
static bool sig_enforce = true;
|
||||
#else
|
||||
static bool sig_enforce = false;
|
||||
|
||||
static int param_set_bool_enable_only(const char *val,
|
||||
const struct kernel_param *kp)
|
||||
static void module_assert_mutex(void)
|
||||
{
|
||||
int err;
|
||||
bool test;
|
||||
struct kernel_param dummy_kp = *kp;
|
||||
|
||||
dummy_kp.arg = &test;
|
||||
|
||||
err = param_set_bool(val, &dummy_kp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Don't let them unset it once it's set! */
|
||||
if (!test && sig_enforce)
|
||||
return -EROFS;
|
||||
|
||||
if (test)
|
||||
sig_enforce = true;
|
||||
return 0;
|
||||
lockdep_assert_held(&module_mutex);
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops param_ops_bool_enable_only = {
|
||||
.flags = KERNEL_PARAM_OPS_FL_NOARG,
|
||||
.set = param_set_bool_enable_only,
|
||||
.get = param_get_bool,
|
||||
};
|
||||
#define param_check_bool_enable_only param_check_bool
|
||||
static void module_assert_mutex_or_preempt(void)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
if (unlikely(!debug_locks))
|
||||
return;
|
||||
|
||||
WARN_ON(!rcu_read_lock_sched_held() &&
|
||||
!lockdep_is_held(&module_mutex));
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
|
||||
#ifndef CONFIG_MODULE_SIG_FORCE
|
||||
module_param(sig_enforce, bool_enable_only, 0644);
|
||||
#endif /* !CONFIG_MODULE_SIG_FORCE */
|
||||
#endif /* CONFIG_MODULE_SIG */
|
||||
|
||||
/* Block module loading/unloading? */
|
||||
int modules_disabled = 0;
|
||||
@ -153,10 +306,6 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
|
||||
|
||||
/* Bounds of module allocation, for speeding __module_address.
|
||||
* Protected by module_mutex. */
|
||||
static unsigned long module_addr_min = -1UL, module_addr_max = 0;
|
||||
|
||||
int register_module_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return blocking_notifier_chain_register(&module_notify_list, nb);
|
||||
@ -318,6 +467,8 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
|
||||
#endif
|
||||
};
|
||||
|
||||
module_assert_mutex_or_preempt();
|
||||
|
||||
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
|
||||
return true;
|
||||
|
||||
@ -457,6 +608,8 @@ static struct module *find_module_all(const char *name, size_t len,
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
module_assert_mutex();
|
||||
|
||||
list_for_each_entry(mod, &modules, list) {
|
||||
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
@ -1169,11 +1322,17 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
|
||||
{
|
||||
const unsigned long *crc;
|
||||
|
||||
/* Since this should be found in kernel (which can't be removed),
|
||||
* no locking is necessary. */
|
||||
/*
|
||||
* Since this should be found in kernel (which can't be removed), no
|
||||
* locking is necessary -- use preempt_disable() to placate lockdep.
|
||||
*/
|
||||
preempt_disable();
|
||||
if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
|
||||
&crc, true, false))
|
||||
&crc, true, false)) {
|
||||
preempt_enable();
|
||||
BUG();
|
||||
}
|
||||
preempt_enable();
|
||||
return check_version(sechdrs, versindex,
|
||||
VMLINUX_SYMBOL_STR(module_layout), mod, crc,
|
||||
NULL);
|
||||
@ -1661,6 +1820,10 @@ static void mod_sysfs_fini(struct module *mod)
|
||||
mod_kobject_put(mod);
|
||||
}
|
||||
|
||||
static void init_param_lock(struct module *mod)
|
||||
{
|
||||
mutex_init(&mod->param_lock);
|
||||
}
|
||||
#else /* !CONFIG_SYSFS */
|
||||
|
||||
static int mod_sysfs_setup(struct module *mod,
|
||||
@ -1683,6 +1846,9 @@ static void del_usage_links(struct module *mod)
|
||||
{
|
||||
}
|
||||
|
||||
static void init_param_lock(struct module *mod)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
||||
static void mod_sysfs_teardown(struct module *mod)
|
||||
@ -1852,10 +2018,11 @@ static void free_module(struct module *mod)
|
||||
mutex_lock(&module_mutex);
|
||||
/* Unlink carefully: kallsyms could be walking list. */
|
||||
list_del_rcu(&mod->list);
|
||||
mod_tree_remove(mod);
|
||||
/* Remove this module from bug list, this uses list_del_rcu */
|
||||
module_bug_cleanup(mod);
|
||||
/* Wait for RCU synchronizing before releasing mod->list and buglist. */
|
||||
synchronize_rcu();
|
||||
/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
|
||||
synchronize_sched();
|
||||
mutex_unlock(&module_mutex);
|
||||
|
||||
/* This may be NULL, but that's OK */
|
||||
@ -2384,22 +2551,6 @@ void * __weak module_alloc(unsigned long size)
|
||||
return vmalloc_exec(size);
|
||||
}
|
||||
|
||||
static void *module_alloc_update_bounds(unsigned long size)
|
||||
{
|
||||
void *ret = module_alloc(size);
|
||||
|
||||
if (ret) {
|
||||
mutex_lock(&module_mutex);
|
||||
/* Update module bounds. */
|
||||
if ((unsigned long)ret < module_addr_min)
|
||||
module_addr_min = (unsigned long)ret;
|
||||
if ((unsigned long)ret + size > module_addr_max)
|
||||
module_addr_max = (unsigned long)ret + size;
|
||||
mutex_unlock(&module_mutex);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_KMEMLEAK
|
||||
static void kmemleak_load_module(const struct module *mod,
|
||||
const struct load_info *info)
|
||||
@ -2805,7 +2956,7 @@ static int move_module(struct module *mod, struct load_info *info)
|
||||
void *ptr;
|
||||
|
||||
/* Do the allocs. */
|
||||
ptr = module_alloc_update_bounds(mod->core_size);
|
||||
ptr = module_alloc(mod->core_size);
|
||||
/*
|
||||
* The pointer to this block is stored in the module structure
|
||||
* which is inside the block. Just mark it as not being a
|
||||
@ -2819,7 +2970,7 @@ static int move_module(struct module *mod, struct load_info *info)
|
||||
mod->module_core = ptr;
|
||||
|
||||
if (mod->init_size) {
|
||||
ptr = module_alloc_update_bounds(mod->init_size);
|
||||
ptr = module_alloc(mod->init_size);
|
||||
/*
|
||||
* The pointer to this block is stored in the module structure
|
||||
* which is inside the block. This block doesn't need to be
|
||||
@ -3119,6 +3270,7 @@ static noinline int do_init_module(struct module *mod)
|
||||
mod->symtab = mod->core_symtab;
|
||||
mod->strtab = mod->core_strtab;
|
||||
#endif
|
||||
mod_tree_remove_init(mod);
|
||||
unset_module_init_ro_nx(mod);
|
||||
module_arch_freeing_init(mod);
|
||||
mod->module_init = NULL;
|
||||
@ -3127,11 +3279,11 @@ static noinline int do_init_module(struct module *mod)
|
||||
mod->init_text_size = 0;
|
||||
/*
|
||||
* We want to free module_init, but be aware that kallsyms may be
|
||||
* walking this with preempt disabled. In all the failure paths,
|
||||
* we call synchronize_rcu/synchronize_sched, but we don't want
|
||||
* to slow down the success path, so use actual RCU here.
|
||||
* walking this with preempt disabled. In all the failure paths, we
|
||||
* call synchronize_sched(), but we don't want to slow down the success
|
||||
* path, so use actual RCU here.
|
||||
*/
|
||||
call_rcu(&freeinit->rcu, do_free_init);
|
||||
call_rcu_sched(&freeinit->rcu, do_free_init);
|
||||
mutex_unlock(&module_mutex);
|
||||
wake_up_all(&module_wq);
|
||||
|
||||
@ -3188,7 +3340,9 @@ static int add_unformed_module(struct module *mod)
|
||||
err = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
mod_update_bounds(mod);
|
||||
list_add_rcu(&mod->list, &modules);
|
||||
mod_tree_insert(mod);
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
@ -3304,6 +3458,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
||||
if (err)
|
||||
goto unlink_mod;
|
||||
|
||||
init_param_lock(mod);
|
||||
|
||||
/* Now we've got everything in the final locations, we can
|
||||
* find optional sections. */
|
||||
err = find_module_sections(mod, info);
|
||||
@ -3402,8 +3558,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
||||
/* Unlink carefully: kallsyms could be walking list. */
|
||||
list_del_rcu(&mod->list);
|
||||
wake_up_all(&module_wq);
|
||||
/* Wait for RCU synchronizing before releasing mod->list. */
|
||||
synchronize_rcu();
|
||||
/* Wait for RCU-sched synchronizing before releasing mod->list. */
|
||||
synchronize_sched();
|
||||
mutex_unlock(&module_mutex);
|
||||
free_module:
|
||||
/* Free lock-classes; relies on the preceding sync_rcu() */
|
||||
@ -3527,19 +3683,15 @@ const char *module_address_lookup(unsigned long addr,
|
||||
char **modname,
|
||||
char *namebuf)
|
||||
{
|
||||
struct module *mod;
|
||||
const char *ret = NULL;
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
list_for_each_entry_rcu(mod, &modules, list) {
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
if (within_module(addr, mod)) {
|
||||
if (modname)
|
||||
*modname = mod->name;
|
||||
ret = get_ksymbol(mod, addr, size, offset);
|
||||
break;
|
||||
}
|
||||
mod = __module_address(addr);
|
||||
if (mod) {
|
||||
if (modname)
|
||||
*modname = mod->name;
|
||||
ret = get_ksymbol(mod, addr, size, offset);
|
||||
}
|
||||
/* Make a copy in here where it's safe */
|
||||
if (ret) {
|
||||
@ -3547,6 +3699,7 @@ const char *module_address_lookup(unsigned long addr,
|
||||
ret = namebuf;
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3670,6 +3823,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
module_assert_mutex();
|
||||
|
||||
list_for_each_entry(mod, &modules, list) {
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
@ -3844,13 +3999,15 @@ struct module *__module_address(unsigned long addr)
|
||||
if (addr < module_addr_min || addr > module_addr_max)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry_rcu(mod, &modules, list) {
|
||||
module_assert_mutex_or_preempt();
|
||||
|
||||
mod = mod_find(addr);
|
||||
if (mod) {
|
||||
BUG_ON(!within_module(addr, mod));
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
if (within_module(addr, mod))
|
||||
return mod;
|
||||
mod = NULL;
|
||||
}
|
||||
return NULL;
|
||||
return mod;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__module_address);
|
||||
|
||||
|
116
kernel/params.c
116
kernel/params.c
@ -25,15 +25,34 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ctype.h>
|
||||
|
||||
/* Protects all parameters, and incidentally kmalloced_param list. */
|
||||
#ifdef CONFIG_SYSFS
|
||||
/* Protects all built-in parameters, modules use their own param_lock */
|
||||
static DEFINE_MUTEX(param_lock);
|
||||
|
||||
/* Use the module's mutex, or if built-in use the built-in mutex */
|
||||
#ifdef CONFIG_MODULES
|
||||
#define KPARAM_MUTEX(mod) ((mod) ? &(mod)->param_lock : ¶m_lock)
|
||||
#else
|
||||
#define KPARAM_MUTEX(mod) (¶m_lock)
|
||||
#endif
|
||||
|
||||
static inline void check_kparam_locked(struct module *mod)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(KPARAM_MUTEX(mod)));
|
||||
}
|
||||
#else
|
||||
static inline void check_kparam_locked(struct module *mod)
|
||||
{
|
||||
}
|
||||
#endif /* !CONFIG_SYSFS */
|
||||
|
||||
/* This just allows us to keep track of which parameters are kmalloced. */
|
||||
struct kmalloced_param {
|
||||
struct list_head list;
|
||||
char val[];
|
||||
};
|
||||
static LIST_HEAD(kmalloced_params);
|
||||
static DEFINE_SPINLOCK(kmalloced_params_lock);
|
||||
|
||||
static void *kmalloc_parameter(unsigned int size)
|
||||
{
|
||||
@ -43,7 +62,10 @@ static void *kmalloc_parameter(unsigned int size)
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
spin_lock(&kmalloced_params_lock);
|
||||
list_add(&p->list, &kmalloced_params);
|
||||
spin_unlock(&kmalloced_params_lock);
|
||||
|
||||
return p->val;
|
||||
}
|
||||
|
||||
@ -52,6 +74,7 @@ static void maybe_kfree_parameter(void *param)
|
||||
{
|
||||
struct kmalloced_param *p;
|
||||
|
||||
spin_lock(&kmalloced_params_lock);
|
||||
list_for_each_entry(p, &kmalloced_params, list) {
|
||||
if (p->val == param) {
|
||||
list_del(&p->list);
|
||||
@ -59,6 +82,7 @@ static void maybe_kfree_parameter(void *param)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&kmalloced_params_lock);
|
||||
}
|
||||
|
||||
static char dash2underscore(char c)
|
||||
@ -119,10 +143,10 @@ static int parse_one(char *param,
|
||||
return -EINVAL;
|
||||
pr_debug("handling %s with %p\n", param,
|
||||
params[i].ops->set);
|
||||
mutex_lock(¶m_lock);
|
||||
kernel_param_lock(params[i].mod);
|
||||
param_check_unsafe(¶ms[i]);
|
||||
err = params[i].ops->set(val, ¶ms[i]);
|
||||
mutex_unlock(¶m_lock);
|
||||
kernel_param_unlock(params[i].mod);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@ -254,7 +278,7 @@ char *parse_args(const char *doing,
|
||||
return scnprintf(buffer, PAGE_SIZE, format, \
|
||||
*((type *)kp->arg)); \
|
||||
} \
|
||||
struct kernel_param_ops param_ops_##name = { \
|
||||
const struct kernel_param_ops param_ops_##name = { \
|
||||
.set = param_set_##name, \
|
||||
.get = param_get_##name, \
|
||||
}; \
|
||||
@ -306,7 +330,7 @@ static void param_free_charp(void *arg)
|
||||
maybe_kfree_parameter(*((char **)arg));
|
||||
}
|
||||
|
||||
struct kernel_param_ops param_ops_charp = {
|
||||
const struct kernel_param_ops param_ops_charp = {
|
||||
.set = param_set_charp,
|
||||
.get = param_get_charp,
|
||||
.free = param_free_charp,
|
||||
@ -331,13 +355,44 @@ int param_get_bool(char *buffer, const struct kernel_param *kp)
|
||||
}
|
||||
EXPORT_SYMBOL(param_get_bool);
|
||||
|
||||
struct kernel_param_ops param_ops_bool = {
|
||||
const struct kernel_param_ops param_ops_bool = {
|
||||
.flags = KERNEL_PARAM_OPS_FL_NOARG,
|
||||
.set = param_set_bool,
|
||||
.get = param_get_bool,
|
||||
};
|
||||
EXPORT_SYMBOL(param_ops_bool);
|
||||
|
||||
int param_set_bool_enable_only(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
int err = 0;
|
||||
bool new_value;
|
||||
bool orig_value = *(bool *)kp->arg;
|
||||
struct kernel_param dummy_kp = *kp;
|
||||
|
||||
dummy_kp.arg = &new_value;
|
||||
|
||||
err = param_set_bool(val, &dummy_kp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Don't let them unset it once it's set! */
|
||||
if (!new_value && orig_value)
|
||||
return -EROFS;
|
||||
|
||||
if (new_value)
|
||||
err = param_set_bool(val, kp);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(param_set_bool_enable_only);
|
||||
|
||||
const struct kernel_param_ops param_ops_bool_enable_only = {
|
||||
.flags = KERNEL_PARAM_OPS_FL_NOARG,
|
||||
.set = param_set_bool_enable_only,
|
||||
.get = param_get_bool,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(param_ops_bool_enable_only);
|
||||
|
||||
/* This one must be bool. */
|
||||
int param_set_invbool(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
@ -359,7 +414,7 @@ int param_get_invbool(char *buffer, const struct kernel_param *kp)
|
||||
}
|
||||
EXPORT_SYMBOL(param_get_invbool);
|
||||
|
||||
struct kernel_param_ops param_ops_invbool = {
|
||||
const struct kernel_param_ops param_ops_invbool = {
|
||||
.set = param_set_invbool,
|
||||
.get = param_get_invbool,
|
||||
};
|
||||
@ -367,12 +422,11 @@ EXPORT_SYMBOL(param_ops_invbool);
|
||||
|
||||
int param_set_bint(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
struct kernel_param boolkp;
|
||||
/* Match bool exactly, by re-using it. */
|
||||
struct kernel_param boolkp = *kp;
|
||||
bool v;
|
||||
int ret;
|
||||
|
||||
/* Match bool exactly, by re-using it. */
|
||||
boolkp = *kp;
|
||||
boolkp.arg = &v;
|
||||
|
||||
ret = param_set_bool(val, &boolkp);
|
||||
@ -382,7 +436,7 @@ int param_set_bint(const char *val, const struct kernel_param *kp)
|
||||
}
|
||||
EXPORT_SYMBOL(param_set_bint);
|
||||
|
||||
struct kernel_param_ops param_ops_bint = {
|
||||
const struct kernel_param_ops param_ops_bint = {
|
||||
.flags = KERNEL_PARAM_OPS_FL_NOARG,
|
||||
.set = param_set_bint,
|
||||
.get = param_get_int,
|
||||
@ -390,7 +444,8 @@ struct kernel_param_ops param_ops_bint = {
|
||||
EXPORT_SYMBOL(param_ops_bint);
|
||||
|
||||
/* We break the rule and mangle the string. */
|
||||
static int param_array(const char *name,
|
||||
static int param_array(struct module *mod,
|
||||
const char *name,
|
||||
const char *val,
|
||||
unsigned int min, unsigned int max,
|
||||
void *elem, int elemsize,
|
||||
@ -421,7 +476,7 @@ static int param_array(const char *name,
|
||||
/* nul-terminate and parse */
|
||||
save = val[len];
|
||||
((char *)val)[len] = '\0';
|
||||
BUG_ON(!mutex_is_locked(¶m_lock));
|
||||
check_kparam_locked(mod);
|
||||
ret = set(val, &kp);
|
||||
|
||||
if (ret != 0)
|
||||
@ -443,7 +498,7 @@ static int param_array_set(const char *val, const struct kernel_param *kp)
|
||||
const struct kparam_array *arr = kp->arr;
|
||||
unsigned int temp_num;
|
||||
|
||||
return param_array(kp->name, val, 1, arr->max, arr->elem,
|
||||
return param_array(kp->mod, kp->name, val, 1, arr->max, arr->elem,
|
||||
arr->elemsize, arr->ops->set, kp->level,
|
||||
arr->num ?: &temp_num);
|
||||
}
|
||||
@ -452,14 +507,13 @@ static int param_array_get(char *buffer, const struct kernel_param *kp)
|
||||
{
|
||||
int i, off, ret;
|
||||
const struct kparam_array *arr = kp->arr;
|
||||
struct kernel_param p;
|
||||
struct kernel_param p = *kp;
|
||||
|
||||
p = *kp;
|
||||
for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) {
|
||||
if (i)
|
||||
buffer[off++] = ',';
|
||||
p.arg = arr->elem + arr->elemsize * i;
|
||||
BUG_ON(!mutex_is_locked(¶m_lock));
|
||||
check_kparam_locked(p.mod);
|
||||
ret = arr->ops->get(buffer + off, &p);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -479,7 +533,7 @@ static void param_array_free(void *arg)
|
||||
arr->ops->free(arr->elem + arr->elemsize * i);
|
||||
}
|
||||
|
||||
struct kernel_param_ops param_array_ops = {
|
||||
const struct kernel_param_ops param_array_ops = {
|
||||
.set = param_array_set,
|
||||
.get = param_array_get,
|
||||
.free = param_array_free,
|
||||
@ -507,7 +561,7 @@ int param_get_string(char *buffer, const struct kernel_param *kp)
|
||||
}
|
||||
EXPORT_SYMBOL(param_get_string);
|
||||
|
||||
struct kernel_param_ops param_ops_string = {
|
||||
const struct kernel_param_ops param_ops_string = {
|
||||
.set = param_set_copystring,
|
||||
.get = param_get_string,
|
||||
};
|
||||
@ -542,9 +596,9 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
|
||||
if (!attribute->param->ops->get)
|
||||
return -EPERM;
|
||||
|
||||
mutex_lock(¶m_lock);
|
||||
kernel_param_lock(mk->mod);
|
||||
count = attribute->param->ops->get(buf, attribute->param);
|
||||
mutex_unlock(¶m_lock);
|
||||
kernel_param_unlock(mk->mod);
|
||||
if (count > 0) {
|
||||
strcat(buf, "\n");
|
||||
++count;
|
||||
@ -554,7 +608,7 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
|
||||
|
||||
/* sysfs always hands a nul-terminated string in buf. We rely on that. */
|
||||
static ssize_t param_attr_store(struct module_attribute *mattr,
|
||||
struct module_kobject *km,
|
||||
struct module_kobject *mk,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
int err;
|
||||
@ -563,10 +617,10 @@ static ssize_t param_attr_store(struct module_attribute *mattr,
|
||||
if (!attribute->param->ops->set)
|
||||
return -EPERM;
|
||||
|
||||
mutex_lock(¶m_lock);
|
||||
kernel_param_lock(mk->mod);
|
||||
param_check_unsafe(attribute->param);
|
||||
err = attribute->param->ops->set(buf, attribute->param);
|
||||
mutex_unlock(¶m_lock);
|
||||
kernel_param_unlock(mk->mod);
|
||||
if (!err)
|
||||
return len;
|
||||
return err;
|
||||
@ -580,17 +634,18 @@ static ssize_t param_attr_store(struct module_attribute *mattr,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
void __kernel_param_lock(void)
|
||||
void kernel_param_lock(struct module *mod)
|
||||
{
|
||||
mutex_lock(¶m_lock);
|
||||
mutex_lock(KPARAM_MUTEX(mod));
|
||||
}
|
||||
EXPORT_SYMBOL(__kernel_param_lock);
|
||||
|
||||
void __kernel_param_unlock(void)
|
||||
void kernel_param_unlock(struct module *mod)
|
||||
{
|
||||
mutex_unlock(¶m_lock);
|
||||
mutex_unlock(KPARAM_MUTEX(mod));
|
||||
}
|
||||
EXPORT_SYMBOL(__kernel_param_unlock);
|
||||
|
||||
EXPORT_SYMBOL(kernel_param_lock);
|
||||
EXPORT_SYMBOL(kernel_param_unlock);
|
||||
|
||||
/*
|
||||
* add_sysfs_param - add a parameter to sysfs
|
||||
@ -856,6 +911,7 @@ static void __init version_sysfs_builtin(void)
|
||||
mk = locate_module_kobject(vattr->module_name);
|
||||
if (mk) {
|
||||
err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
|
||||
WARN_ON_ONCE(err);
|
||||
kobject_uevent(&mk->kobj, KOBJ_ADD);
|
||||
kobject_put(&mk->kobj);
|
||||
}
|
||||
|
@ -319,32 +319,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
|
||||
* We want to use this from any context including NMI and tracing /
|
||||
* instrumenting the timekeeping code itself.
|
||||
*
|
||||
* So we handle this differently than the other timekeeping accessor
|
||||
* functions which retry when the sequence count has changed. The
|
||||
* update side does:
|
||||
*
|
||||
* smp_wmb(); <- Ensure that the last base[1] update is visible
|
||||
* tkf->seq++;
|
||||
* smp_wmb(); <- Ensure that the seqcount update is visible
|
||||
* update(tkf->base[0], tkr);
|
||||
* smp_wmb(); <- Ensure that the base[0] update is visible
|
||||
* tkf->seq++;
|
||||
* smp_wmb(); <- Ensure that the seqcount update is visible
|
||||
* update(tkf->base[1], tkr);
|
||||
*
|
||||
* The reader side does:
|
||||
*
|
||||
* do {
|
||||
* seq = tkf->seq;
|
||||
* smp_rmb();
|
||||
* idx = seq & 0x01;
|
||||
* now = now(tkf->base[idx]);
|
||||
* smp_rmb();
|
||||
* } while (seq != tkf->seq)
|
||||
*
|
||||
* As long as we update base[0] readers are forced off to
|
||||
* base[1]. Once base[0] is updated readers are redirected to base[0]
|
||||
* and the base[1] update takes place.
|
||||
* Employ the latch technique; see @raw_write_seqcount_latch.
|
||||
*
|
||||
* So if a NMI hits the update of base[0] then it will use base[1]
|
||||
* which is still consistent. In the worst case this can result is a
|
||||
@ -407,7 +382,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
u64 now;
|
||||
|
||||
do {
|
||||
seq = raw_read_seqcount(&tkf->seq);
|
||||
seq = raw_read_seqcount_latch(&tkf->seq);
|
||||
tkr = tkf->base + (seq & 0x01);
|
||||
now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
|
||||
} while (read_seqcount_retry(&tkf->seq, seq));
|
||||
|
@ -285,12 +285,7 @@ static bool wq_disable_numa;
|
||||
module_param_named(disable_numa, wq_disable_numa, bool, 0444);
|
||||
|
||||
/* see the comment above the definition of WQ_POWER_EFFICIENT */
|
||||
#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
|
||||
static bool wq_power_efficient = true;
|
||||
#else
|
||||
static bool wq_power_efficient;
|
||||
#endif
|
||||
|
||||
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
|
||||
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
|
||||
|
||||
static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
|
||||
|
@ -66,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
|
||||
struct module *mod;
|
||||
const struct bug_entry *bug = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
rcu_read_lock_sched();
|
||||
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
|
||||
unsigned i;
|
||||
|
||||
@ -77,7 +77,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
|
||||
}
|
||||
bug = NULL;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
return bug;
|
||||
}
|
||||
@ -88,6 +88,8 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
||||
char *secstrings;
|
||||
unsigned int i;
|
||||
|
||||
lockdep_assert_held(&module_mutex);
|
||||
|
||||
mod->bug_table = NULL;
|
||||
mod->num_bugs = 0;
|
||||
|
||||
@ -113,6 +115,7 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
||||
|
||||
void module_bug_cleanup(struct module *mod)
|
||||
{
|
||||
lockdep_assert_held(&module_mutex);
|
||||
list_del_rcu(&mod->bug_list);
|
||||
}
|
||||
|
||||
|
76
lib/rbtree.c
76
lib/rbtree.c
@ -44,6 +44,30 @@
|
||||
* parentheses and have some accompanying text comment.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Notes on lockless lookups:
|
||||
*
|
||||
* All stores to the tree structure (rb_left and rb_right) must be done using
|
||||
* WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
|
||||
* tree structure as seen in program order.
|
||||
*
|
||||
* These two requirements will allow lockless iteration of the tree -- not
|
||||
* correct iteration mind you, tree rotations are not atomic so a lookup might
|
||||
* miss entire subtrees.
|
||||
*
|
||||
* But they do guarantee that any such traversal will only see valid elements
|
||||
* and that it will indeed complete -- does not get stuck in a loop.
|
||||
*
|
||||
* It also guarantees that if the lookup returns an element it is the 'correct'
|
||||
* one. But not returning an element does _NOT_ mean it's not present.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* Stores to __rb_parent_color are not important for simple lookups so those
|
||||
* are left undone as of now. Nor did I check for loops involving parent
|
||||
* pointers.
|
||||
*/
|
||||
|
||||
static inline void rb_set_black(struct rb_node *rb)
|
||||
{
|
||||
rb->__rb_parent_color |= RB_BLACK;
|
||||
@ -129,8 +153,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
|
||||
* This still leaves us in violation of 4), the
|
||||
* continuation into Case 3 will fix that.
|
||||
*/
|
||||
parent->rb_right = tmp = node->rb_left;
|
||||
node->rb_left = parent;
|
||||
tmp = node->rb_left;
|
||||
WRITE_ONCE(parent->rb_right, tmp);
|
||||
WRITE_ONCE(node->rb_left, parent);
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, parent,
|
||||
RB_BLACK);
|
||||
@ -149,8 +174,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
|
||||
* / \
|
||||
* n U
|
||||
*/
|
||||
gparent->rb_left = tmp; /* == parent->rb_right */
|
||||
parent->rb_right = gparent;
|
||||
WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
|
||||
WRITE_ONCE(parent->rb_right, gparent);
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
||||
@ -171,8 +196,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
|
||||
tmp = parent->rb_left;
|
||||
if (node == tmp) {
|
||||
/* Case 2 - right rotate at parent */
|
||||
parent->rb_left = tmp = node->rb_right;
|
||||
node->rb_right = parent;
|
||||
tmp = node->rb_right;
|
||||
WRITE_ONCE(parent->rb_left, tmp);
|
||||
WRITE_ONCE(node->rb_right, parent);
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, parent,
|
||||
RB_BLACK);
|
||||
@ -183,8 +209,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
|
||||
}
|
||||
|
||||
/* Case 3 - left rotate at gparent */
|
||||
gparent->rb_right = tmp; /* == parent->rb_left */
|
||||
parent->rb_left = gparent;
|
||||
WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
|
||||
WRITE_ONCE(parent->rb_left, gparent);
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
||||
@ -224,8 +250,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
* / \ / \
|
||||
* Sl Sr N Sl
|
||||
*/
|
||||
parent->rb_right = tmp1 = sibling->rb_left;
|
||||
sibling->rb_left = parent;
|
||||
tmp1 = sibling->rb_left;
|
||||
WRITE_ONCE(parent->rb_right, tmp1);
|
||||
WRITE_ONCE(sibling->rb_left, parent);
|
||||
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_RED);
|
||||
@ -275,9 +302,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
* \
|
||||
* Sr
|
||||
*/
|
||||
sibling->rb_left = tmp1 = tmp2->rb_right;
|
||||
tmp2->rb_right = sibling;
|
||||
parent->rb_right = tmp2;
|
||||
tmp1 = tmp2->rb_right;
|
||||
WRITE_ONCE(sibling->rb_left, tmp1);
|
||||
WRITE_ONCE(tmp2->rb_right, sibling);
|
||||
WRITE_ONCE(parent->rb_right, tmp2);
|
||||
if (tmp1)
|
||||
rb_set_parent_color(tmp1, sibling,
|
||||
RB_BLACK);
|
||||
@ -297,8 +325,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
* / \ / \
|
||||
* (sl) sr N (sl)
|
||||
*/
|
||||
parent->rb_right = tmp2 = sibling->rb_left;
|
||||
sibling->rb_left = parent;
|
||||
tmp2 = sibling->rb_left;
|
||||
WRITE_ONCE(parent->rb_right, tmp2);
|
||||
WRITE_ONCE(sibling->rb_left, parent);
|
||||
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
||||
if (tmp2)
|
||||
rb_set_parent(tmp2, parent);
|
||||
@ -310,8 +339,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
sibling = parent->rb_left;
|
||||
if (rb_is_red(sibling)) {
|
||||
/* Case 1 - right rotate at parent */
|
||||
parent->rb_left = tmp1 = sibling->rb_right;
|
||||
sibling->rb_right = parent;
|
||||
tmp1 = sibling->rb_right;
|
||||
WRITE_ONCE(parent->rb_left, tmp1);
|
||||
WRITE_ONCE(sibling->rb_right, parent);
|
||||
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_RED);
|
||||
@ -336,9 +366,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
break;
|
||||
}
|
||||
/* Case 3 - right rotate at sibling */
|
||||
sibling->rb_right = tmp1 = tmp2->rb_left;
|
||||
tmp2->rb_left = sibling;
|
||||
parent->rb_left = tmp2;
|
||||
tmp1 = tmp2->rb_left;
|
||||
WRITE_ONCE(sibling->rb_right, tmp1);
|
||||
WRITE_ONCE(tmp2->rb_left, sibling);
|
||||
WRITE_ONCE(parent->rb_left, tmp2);
|
||||
if (tmp1)
|
||||
rb_set_parent_color(tmp1, sibling,
|
||||
RB_BLACK);
|
||||
@ -347,8 +378,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
sibling = tmp2;
|
||||
}
|
||||
/* Case 4 - left rotate at parent + color flips */
|
||||
parent->rb_left = tmp2 = sibling->rb_right;
|
||||
sibling->rb_right = parent;
|
||||
tmp2 = sibling->rb_right;
|
||||
WRITE_ONCE(parent->rb_left, tmp2);
|
||||
WRITE_ONCE(sibling->rb_right, parent);
|
||||
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
||||
if (tmp2)
|
||||
rb_set_parent(tmp2, parent);
|
||||
|
@ -103,7 +103,7 @@ ieee80211_rate_control_ops_get(const char *name)
|
||||
const struct rate_control_ops *ops;
|
||||
const char *alg_name;
|
||||
|
||||
kparam_block_sysfs_write(ieee80211_default_rc_algo);
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
if (!name)
|
||||
alg_name = ieee80211_default_rc_algo;
|
||||
else
|
||||
@ -117,7 +117,7 @@ ieee80211_rate_control_ops_get(const char *name)
|
||||
/* try built-in one if specific alg requested but not found */
|
||||
if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT))
|
||||
ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT);
|
||||
kparam_unblock_sysfs_write(ieee80211_default_rc_algo);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
||||
return ops;
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp)
|
||||
|
||||
#define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int);
|
||||
|
||||
static struct kernel_param_ops param_ops_hashtbl_sz = {
|
||||
static const struct kernel_param_ops param_ops_hashtbl_sz = {
|
||||
.set = param_set_hashtbl_sz,
|
||||
.get = param_get_hashtbl_sz,
|
||||
};
|
||||
|
@ -2982,7 +2982,7 @@ static int param_set_portnr(const char *val, const struct kernel_param *kp)
|
||||
RPC_MAX_RESVPORT);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_portnr = {
|
||||
static const struct kernel_param_ops param_ops_portnr = {
|
||||
.set = param_set_portnr,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
@ -3001,7 +3001,7 @@ static int param_set_slot_table_size(const char *val,
|
||||
RPC_MAX_SLOT_TABLE);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_slot_table_size = {
|
||||
static const struct kernel_param_ops param_ops_slot_table_size = {
|
||||
.set = param_set_slot_table_size,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
@ -3017,7 +3017,7 @@ static int param_set_max_slot_table_size(const char *val,
|
||||
RPC_MAX_SLOT_TABLE_LIMIT);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_max_slot_table_size = {
|
||||
static const struct kernel_param_ops param_ops_max_slot_table_size = {
|
||||
.set = param_set_max_slot_table_size,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
|
@ -654,7 +654,7 @@ static struct security_hook_list apparmor_hooks[] = {
|
||||
static int param_set_aabool(const char *val, const struct kernel_param *kp);
|
||||
static int param_get_aabool(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_aabool param_check_bool
|
||||
static struct kernel_param_ops param_ops_aabool = {
|
||||
static const struct kernel_param_ops param_ops_aabool = {
|
||||
.flags = KERNEL_PARAM_OPS_FL_NOARG,
|
||||
.set = param_set_aabool,
|
||||
.get = param_get_aabool
|
||||
@ -663,7 +663,7 @@ static struct kernel_param_ops param_ops_aabool = {
|
||||
static int param_set_aauint(const char *val, const struct kernel_param *kp);
|
||||
static int param_get_aauint(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_aauint param_check_uint
|
||||
static struct kernel_param_ops param_ops_aauint = {
|
||||
static const struct kernel_param_ops param_ops_aauint = {
|
||||
.set = param_set_aauint,
|
||||
.get = param_get_aauint
|
||||
};
|
||||
@ -671,7 +671,7 @@ static struct kernel_param_ops param_ops_aauint = {
|
||||
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
|
||||
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
|
||||
#define param_check_aalockpolicy param_check_bool
|
||||
static struct kernel_param_ops param_ops_aalockpolicy = {
|
||||
static const struct kernel_param_ops param_ops_aalockpolicy = {
|
||||
.flags = KERNEL_PARAM_OPS_FL_NOARG,
|
||||
.set = param_set_aalockpolicy,
|
||||
.get = param_get_aalockpolicy
|
||||
|
@ -55,7 +55,7 @@ static int param_set_bufsize(const char *val, const struct kernel_param *kp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops param_ops_bufsize = {
|
||||
static const struct kernel_param_ops param_ops_bufsize = {
|
||||
.set = param_set_bufsize,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
|
@ -171,7 +171,7 @@ MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int param_set_xint(const char *val, const struct kernel_param *kp);
|
||||
static struct kernel_param_ops param_ops_xint = {
|
||||
static const struct kernel_param_ops param_ops_xint = {
|
||||
.set = param_set_xint,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user