bitmap patches for 5.17-rc1

-----BEGIN PGP SIGNATURE-----
 
 iQHJBAABCgAzFiEEi8GdvG6xMhdgpu/4sUSA/TofvsgFAmHi+xgVHHl1cnkubm9y
 b3ZAZ21haWwuY29tAAoJELFEgP06H77IxdoMAMf3E+L51Ys/4iAiyJQNVoT3aIBC
 A8ZVOB9he1OA3o3wBNIRKmICHk+ovnfCWcXTr9fG/Ade2wJz88NAsGPQ1Phywb+s
 iGlpySllFN72RT9ZqtJhLEzgoHHOL0CzTW07TN9GJy4gQA2h2G9CTP+OmsQdnVqE
 m9Fn3PSlJ5lhzePlKfnln8rGZFgrriJakfEFPC79n/7an4+2Hvkb5rWigo7KQc4Z
 9YNqYUcHWZFUgq80adxEb9LlbMXdD+Z/8fCjOrAatuwVkD4RDt6iKD0mFGjHXGL7
 MZ9KRS8AfZXawmetk3jjtsV+/QkeS+Deuu7k0FoO0Th2QV7BGSDhsLXAS5By/MOC
 nfSyHhnXHzCsBMyVNrJHmNhEZoN29+tRwI84JX9lWcf/OLANcCofnP6f2UIX7tZY
 CAZAgVELp+0YQXdybrfzTQ8BT3TinjS/aZtCrYijRendI1GwUXcyl69vdOKqAHuk
 5jy8k/xHyp+ZWu6v+PyAAAEGowY++qhL0fmszA==
 =RKW4
 -----END PGP SIGNATURE-----

Merge tag 'bitmap-5.17-rc1' of git://github.com/norov/linux

Pull bitmap updates from Yury Norov:

 - introduce for_each_set_bitrange()

 - use find_first_*_bit() instead of find_next_*_bit() where possible

 - unify for_each_bit() macros

* tag 'bitmap-5.17-rc1' of git://github.com/norov/linux:
  vsprintf: rework bitmap_list_string
  lib: bitmap: add performance test for bitmap_print_to_pagebuf
  bitmap: unify find_bit operations
  mm/percpu: micro-optimize pcpu_is_populated()
  Replace for_each_*_bit_from() with for_each_*_bit() where appropriate
  find: micro-optimize for_each_{set,clear}_bit()
  include/linux: move for_each_bit() macros from bitops.h to find.h
  cpumask: replace cpumask_next_* with cpumask_first_* where appropriate
  tools: sync tools/bitmap with mother linux
  all: replace find_next{,_zero}_bit with find_first{,_zero}_bit where appropriate
  cpumask: use find_first_and_bit()
  lib: add find_first_and_bit()
  arch: remove GENERIC_FIND_FIRST_BIT entirely
  include: move find.h from asm_generic to linux
  bitops: move find_bit_*_le functions from le.h to find.h
  bitops: protect find_first_{,zero}_bit properly
This commit is contained in:
Linus Torvalds 2022-01-23 06:20:44 +02:00
commit 3689f9f8b0
73 changed files with 635 additions and 438 deletions

View File

@ -3410,14 +3410,14 @@ M: Yury Norov <yury.norov@gmail.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
S: Maintained
F: include/asm-generic/bitops/find.h
F: include/linux/bitmap.h
F: include/linux/find.h
F: lib/bitmap.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
F: lib/test_bitmap.c
F: tools/include/asm-generic/bitops/find.h
F: tools/include/linux/bitmap.h
F: tools/include/linux/find.h
F: tools/lib/bitmap.c
F: tools/lib/find_bit.c

View File

@ -430,8 +430,6 @@ static inline unsigned int __arch_hweight8(unsigned int w)
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
/*

View File

@ -20,7 +20,6 @@ config ARC
select COMMON_CLK
select DMA_DIRECT_REMAP
select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
select GENERIC_FIND_FIRST_BIT
# for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP

View File

@ -189,7 +189,6 @@ static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>

View File

@ -264,7 +264,6 @@ static inline int find_next_bit_le(const void *p, int size, int offset)
#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
/*

View File

@ -120,7 +120,6 @@ config ARM64
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_FIND_FIRST_BIT
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_IPI
select GENERIC_IRQ_PROBE

View File

@ -18,7 +18,6 @@
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>

View File

@ -59,7 +59,6 @@ static __always_inline unsigned long __fls(unsigned long x)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly

View File

@ -168,7 +168,6 @@ static inline unsigned long __ffs(unsigned long word)
return result;
}
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>

View File

@ -271,7 +271,6 @@ static inline unsigned long __fls(unsigned long word)
}
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>

View File

@ -441,8 +441,6 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/le.h>

View File

@ -529,6 +529,4 @@ static inline int __fls(int x)
#include <asm-generic/bitops/le.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#endif /* _M68K_BITOPS_H */

View File

@ -32,7 +32,6 @@ config MIPS
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_FIND_FIRST_BIT
select GENERIC_GETTIMEOFDAY
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE

View File

@ -444,7 +444,6 @@ static inline int ffs(int word)
}
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__

View File

@ -30,7 +30,6 @@
#include <asm/bitops/fls.h>
#include <asm/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly

View File

@ -203,7 +203,6 @@ static __inline__ int fls(unsigned int x)
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>

View File

@ -328,8 +328,6 @@ unsigned long __arch_hweight64(__u64 w);
#include <asm-generic/bitops/hweight.h>
#endif
#include <asm-generic/bitops/find.h>
/* wrappers that deal with KASAN instrumentation */
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>

View File

@ -375,7 +375,7 @@ int pasemi_dma_alloc_flag(void)
int bit;
retry:
bit = find_next_bit(flags_free, MAX_FLAGS, 0);
bit = find_first_bit(flags_free, MAX_FLAGS);
if (bit >= MAX_FLAGS)
return -ENOSPC;
if (!test_and_clear_bit(bit, flags_free))
@ -440,7 +440,7 @@ int pasemi_dma_alloc_fun(void)
int bit;
retry:
bit = find_next_bit(fun_free, MAX_FLAGS, 0);
bit = find_first_bit(fun_free, MAX_FLAGS);
if (bit >= MAX_FLAGS)
return -ENOSPC;
if (!test_and_clear_bit(bit, fun_free))

View File

@ -20,7 +20,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>

View File

@ -127,7 +127,6 @@ config S390
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
select GENERIC_FIND_FIRST_BIT
select GENERIC_GETTIMEOFDAY
select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD

View File

@ -387,7 +387,6 @@ static inline int fls(unsigned int word)
#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>

View File

@ -1990,7 +1990,7 @@ static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
}
return ms->base_gfn + ofs;
}

View File

@ -68,6 +68,5 @@ static inline unsigned long __ffs(unsigned long word)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/find.h>
#endif /* __ASM_SH_BITOPS_H */

View File

@ -100,7 +100,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>

View File

@ -52,8 +52,6 @@ unsigned int __arch_hweight8(unsigned int w);
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/le.h>

View File

@ -137,7 +137,6 @@ config X86
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_ENTRY
select GENERIC_FIND_FIRST_BIT
select GENERIC_IOMAP
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select GENERIC_IRQ_MATRIX_ALLOCATOR if X86_LOCAL_APIC

View File

@ -380,8 +380,6 @@ static __always_inline int fls64(__u64 x)
#include <asm-generic/bitops/fls64.h>
#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm/arch_hweight.h>

View File

@ -760,9 +760,9 @@ void __init lapic_update_legacy_vectors(void)
void __init lapic_assign_system_vectors(void)
{
unsigned int i, vector = 0;
unsigned int i, vector;
for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
for_each_set_bit(vector, system_vectors, NR_VECTORS)
irq_matrix_assign_system(vector_matrix, vector, false);
if (nr_legacy_irqs() > 1)

View File

@ -8,7 +8,6 @@ endmenu
config UML_X86
def_bool y
select GENERIC_FIND_FIRST_BIT
config 64BIT
bool "64-bit kernel" if "$(SUBARCH)" = "x86"

View File

@ -205,7 +205,6 @@ BIT_OPS(change, "xor", )
#undef BIT_OP
#undef TEST_AND_BIT_OP
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>

View File

@ -3285,7 +3285,7 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
struct blk_mq_hw_ctx *hctx)
{
if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
return false;
if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
return false;

View File

@ -196,7 +196,7 @@ rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
return per_cpu_ptr(sess->cpu_queues, bit);
} else if (cpu != 0) {
/* Search from 0 to cpu */
bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
bit = find_first_bit(sess->cpu_queues_bm, cpu);
if (bit < cpu)
return per_cpu_ptr(sess->cpu_queues, bit);
}

View File

@ -1047,7 +1047,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
{
unsigned int i = 0;
unsigned int i;
dev_err(gpu->dev, "recover hung GPU!\n");
@ -1060,7 +1060,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
/* complete all events, the GPU won't do it after the reset */
spin_lock(&gpu->event_spinlock);
for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
complete(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock(&gpu->event_spinlock);

View File

@ -248,8 +248,7 @@ static int ltc2992_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask
gpio_status = reg;
gpio_nr = 0;
for_each_set_bit_from(gpio_nr, mask, LTC2992_GPIO_NR) {
for_each_set_bit(gpio_nr, mask, LTC2992_GPIO_NR) {
if (test_bit(LTC2992_GPIO_BIT(gpio_nr), &gpio_status))
set_bit(gpio_nr, bits);
}

View File

@ -347,7 +347,7 @@ static int ad7124_find_free_config_slot(struct ad7124_state *st)
{
unsigned int free_cfg_slot;
free_cfg_slot = find_next_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS, 0);
free_cfg_slot = find_first_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS);
if (free_cfg_slot == AD7124_MAX_CONFIGS)
return -1;

View File

@ -1709,14 +1709,14 @@ static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf)
*/
static void irdma_get_used_rsrc(struct irdma_device *iwdev)
{
iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds,
iwdev->rf->max_pd, 0);
iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp, 0);
iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs,
iwdev->rf->max_cq, 0);
iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr, 0);
iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
iwdev->rf->max_pd);
iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp);
iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
iwdev->rf->max_cq);
iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr);
}
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)

View File

@ -106,7 +106,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
minor = find_first_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES);
if (minor == CEC_NUM_DEVICES) {
mutex_unlock(&cec_devnode_lock);
pr_err("could not get a free minor\n");

View File

@ -217,7 +217,7 @@ int __must_check media_devnode_register(struct media_device *mdev,
/* Part 1: Find a free minor number */
mutex_lock(&media_devnode_lock);
minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
minor = find_first_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES);
if (minor == MEDIA_NUM_DEVICES) {
mutex_unlock(&media_devnode_lock);
pr_err("could not get a free minor\n");

View File

@ -642,7 +642,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
* is at least SH_MOBILE_SDHI_MIN_TAP_ROW probes long then use the
* center index as the tap, otherwise bail out.
*/
bitmap_for_each_set_region(bitmap, rs, re, 0, taps_size) {
for_each_set_bitrange(rs, re, bitmap, taps_size) {
if (re - rs > tap_cnt) {
tap_end = re;
tap_start = rs;

View File

@ -2101,7 +2101,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
cpu = cpumask_next(-1, cpu_online_mask);
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);

View File

@ -213,7 +213,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
if (!val)
return 0;
pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL);
while (pos != MAX_MSI_IRQS_PER_CTRL) {
generic_handle_domain_irq(pp->irq_domain,
(index * MAX_MSI_IRQS_PER_CTRL) + pos);

View File

@ -17982,8 +17982,8 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
* the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
phba->sli4_hba.max_cfg_param.max_xri, 0);
xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
phba->sli4_hba.max_cfg_param.max_xri);
if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
spin_unlock_irq(&phba->hbalock);
return NO_XRI;
@ -19660,7 +19660,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
@ -20303,8 +20303,8 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
* have been tested so that we can detect when we should
* change the priority level.
*/
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX);
}

View File

@ -155,7 +155,7 @@ static int bman_portal_probe(struct platform_device *pdev)
}
spin_lock(&bman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus);
cpu = cpumask_first_zero(&portal_cpus);
if (cpu >= nr_cpu_ids) {
__bman_portals_probed = 1;
/* unassigned portal, skip init */

View File

@ -248,7 +248,7 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->pools = qm_get_pools_sdqcr();
spin_lock(&qman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus);
cpu = cpumask_first_zero(&portal_cpus);
if (cpu >= nr_cpu_ids) {
__qman_portals_probed = 1;
/* unassigned portal, skip init */

View File

@ -358,8 +358,8 @@ struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
goto out;
if (flags & K3_RINGACC_RING_USE_PROXY) {
proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
ringacc->num_proxies, 0);
proxy_id = find_first_zero_bit(ringacc->proxy_inuse,
ringacc->num_proxies);
if (proxy_id == ringacc->num_proxies)
goto error;
}

View File

@ -1938,7 +1938,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
more = n - (size - tail);
if (eol == N_TTY_BUF_SIZE && more) {
/* scan wrapped without finding set bit */
eol = find_next_bit(ldata->read_flags, more, 0);
eol = find_first_bit(ldata->read_flags, more);
found = eol != more;
} else
found = eol != size;

View File

@ -246,8 +246,7 @@ void acrn_ioreq_request_clear(struct acrn_vm *vm)
spin_lock_bh(&vm->ioreq_clients_lock);
client = vm->default_client;
if (client) {
vcpu = find_next_bit(client->ioreqs_map,
ACRN_IO_REQUEST_MAX, 0);
vcpu = find_first_bit(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
while (vcpu < ACRN_IO_REQUEST_MAX) {
acrn_ioreq_complete_request(client, vcpu, NULL);
vcpu = find_next_bit(client->ioreqs_map,

View File

@ -2555,8 +2555,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
if (secno >= MAIN_SECS(sbi)) {
if (dir == ALLOC_RIGHT) {
secno = find_next_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi), 0);
secno = find_first_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi));
f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
} else {
go_left = 1;
@ -2571,8 +2571,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
left_start--;
continue;
}
left_start = find_next_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi), 0);
left_start = find_first_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi));
f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
break;
}

View File

@ -379,7 +379,7 @@ static void o2hb_nego_timeout(struct work_struct *work)
o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
/* lowest node as master node to make negotiate decision. */
master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
master_node = find_first_bit(live_node_bitmap, O2NM_MAX_NODES);
if (master_node == o2nm_this_node()) {
if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {

View File

@ -1045,7 +1045,7 @@ static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map)
int status, ret = 0, i;
char *p;
if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES)
if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
goto bail;
qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL);
@ -1217,7 +1217,7 @@ static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map)
struct o2nm_node *node;
int ret = 0, status, count, i;
if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES)
if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
goto bail;
qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL);

View File

@ -861,7 +861,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
* to see if there are any nodes that still need to be
* considered. these will not appear in the mle nodemap
* but they might own this lockres. wait on them. */
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) {
mlog(0, "%s: res %.*s, At least one node (%d) "
"to recover before lock mastery can begin\n",
@ -912,7 +912,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
dlm_wait_for_recovery(dlm);
spin_lock(&dlm->spinlock);
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) {
mlog(0, "%s: res %.*s, At least one node (%d) "
"to recover before lock mastery can begin\n",
@ -1079,7 +1079,7 @@ static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
sleep = 1;
/* have all nodes responded? */
if (voting_done && !*blocked) {
bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (dlm->node_num <= bit) {
/* my node number is lowest.
* now tell other nodes that I am
@ -1234,8 +1234,8 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
} else {
mlog(ML_ERROR, "node down! %d\n", node);
if (blocked) {
int lowest = find_next_bit(mle->maybe_map,
O2NM_MAX_NODES, 0);
int lowest = find_first_bit(mle->maybe_map,
O2NM_MAX_NODES);
/* act like it was never there */
clear_bit(node, mle->maybe_map);
@ -1795,7 +1795,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
"MLE for it! (%.*s)\n", assert->node_idx,
namelen, name);
} else {
int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (bit >= O2NM_MAX_NODES) {
/* not necessarily an error, though less likely.
* could be master just re-asserting. */
@ -2521,7 +2521,7 @@ static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm,
}
if (!nonlocal) {
node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES);
if (node_ref >= O2NM_MAX_NODES)
return 0;
}
@ -3303,7 +3303,7 @@ static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
BUG_ON(mle->type != DLM_MLE_BLOCK);
spin_lock(&mle->spinlock);
bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (bit != dead_node) {
mlog(0, "mle found, but dead node %u would not have been "
"master\n", dead_node);
@ -3542,7 +3542,7 @@ void dlm_force_free_mles(struct dlm_ctxt *dlm)
spin_lock(&dlm->master_lock);
BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES));
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i);

View File

@ -451,7 +451,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
int bit;
bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit >= O2NM_MAX_NODES || bit < 0)
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
else

View File

@ -92,7 +92,7 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
return 0;
/* Another node has this resource with this node as the master */
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
bit = find_first_bit(res->refmap, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES)
return 0;

View File

@ -20,7 +20,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly

View File

@ -2,83 +2,19 @@
#ifndef _ASM_GENERIC_BITOPS_LE_H_
#define _ASM_GENERIC_BITOPS_LE_H_
#include <asm-generic/bitops/find.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/swab.h>
#if defined(__LITTLE_ENDIAN)
#define BITOP_LE_SWIZZLE 0
static inline unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
static inline unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
static inline unsigned long find_first_zero_bit_le(const void *addr,
unsigned long size)
{
return find_first_zero_bit(addr, size);
}
#elif defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
#ifndef find_next_zero_bit_le
static inline
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val = *(const unsigned long *)addr;
if (unlikely(offset >= size))
return size;
val = swab(val) | ~GENMASK(size - 1, offset);
return val == ~0UL ? size : ffz(val);
}
return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
}
#endif
#ifndef find_next_bit_le
static inline
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val = *(const unsigned long *)addr;
if (unlikely(offset >= size))
return size;
val = swab(val) & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
}
#endif
#ifndef find_first_zero_bit_le
#define find_first_zero_bit_le(addr, size) \
find_next_zero_bit_le((addr), (size), 0)
#endif
#else
#error "Please fix <asm/byteorder.h>"
#endif
static inline int test_bit_le(int nr, const void *addr)
{

View File

@ -6,6 +6,7 @@
#include <linux/align.h>
#include <linux/bitops.h>
#include <linux/find.h>
#include <linux/limits.h>
#include <linux/string.h>
#include <linux/types.h>
@ -54,12 +55,6 @@ struct device;
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
* bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region
* bitmap_next_set_region(map, &start, &end, nbits) Find next set region
* bitmap_for_each_clear_region(map, rs, re, start, end)
* Iterate over all clear regions
* bitmap_for_each_set_region(map, rs, re, start, end)
* Iterate over all set regions
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
@ -466,14 +461,6 @@ static inline void bitmap_replace(unsigned long *dst,
__bitmap_replace(dst, old, new, mask, nbits);
}
static inline void bitmap_next_clear_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
{
*rs = find_next_zero_bit(bitmap, end, *rs);
*re = find_next_bit(bitmap, end, *rs + 1);
}
static inline void bitmap_next_set_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
@ -482,25 +469,6 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
/*
* Bitmap region iterators. Iterates over the bitmap between [@start, @end).
* @rs and @re should be integer variables and will be set to start and end
* index of the current clear or set region.
*/
#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \
for ((rs) = (start), \
bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \
(rs) < (re); \
(rs) = (re) + 1, \
bitmap_next_clear_region((bitmap), &(rs), &(re), (end)))
#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \
for ((rs) = (start), \
bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \
(rs) < (re); \
(rs) = (re) + 1, \
bitmap_next_set_region((bitmap), &(rs), &(re), (end)))
/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value

View File

@ -32,40 +32,6 @@ extern unsigned long __sw_hweight64(__u64 w);
*/
#include <asm/bitops.h>
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_from(bit, addr, size) \
for ((bit) = find_next_bit((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
#define for_each_clear_bit(bit, addr, size) \
for ((bit) = find_first_zero_bit((addr), (size)); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
/* same as for_each_clear_bit() but use bit as value to start with */
#define for_each_clear_bit_from(bit, addr, size) \
for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
/**
* for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
* @start: bit offset to start search and to store the current iteration offset
* @clump: location to store copy of current 8-bit clump
* @bits: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_set_clump8(start, clump, bits, size) \
for ((start) = find_first_clump8(&(clump), (bits), (size)); \
(start) < (size); \
(start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
static inline int get_bitmask_order(unsigned int count)
{
int order;

View File

@ -123,6 +123,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
return 0;
}
static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return 0;
}
static inline unsigned int cpumask_first_and(const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
return 0;
}
static inline unsigned int cpumask_last(const struct cpumask *srcp)
{
return 0;
@ -167,7 +178,7 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p) {
return cpumask_next_and(-1, src1p, src2p);
return cpumask_first_and(src1p, src2p);
}
static inline int cpumask_any_distribute(const struct cpumask *srcp)
@ -195,6 +206,30 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
/**
* cpumask_first_zero - get the first unset cpu in a cpumask
* @srcp: the cpumask pointer
*
* Returns >= nr_cpu_ids if all cpus are set.
*/
static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return find_first_zero_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
/**
* cpumask_first_and - return the first cpu from *srcp1 & *srcp2
* @src1p: the first input
* @src2p: the second input
*
* Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
static inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
}
/**
* cpumask_last - get the last CPU in a cpumask
* @srcp: - the cpumask pointer
@ -585,15 +620,6 @@ static inline void cpumask_copy(struct cpumask *dstp,
*/
#define cpumask_any(srcp) cpumask_first(srcp)
/**
* cpumask_first_and - return the first cpu from *srcp1 & *srcp2
* @src1p: the first input
* @src2p: the second input
*
* Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
/**
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
* @mask1: the first input cpumask

372
include/linux/find.h Normal file
View File

@ -0,0 +1,372 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_FIND_H_
#define __LINUX_FIND_H_
#ifndef __LINUX_BITMAP_H
#error only <linux/bitmap.h> can be included directly
#endif
#include <linux/bitops.h>
extern unsigned long _find_next_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
unsigned long start, unsigned long invert, unsigned long le);
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static inline
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
}
#endif
#ifndef find_next_and_bit
/**
* find_next_and_bit - find the next set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static inline
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr1 & *addr2 & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
}
#endif
#ifndef find_next_zero_bit
/**
* find_next_zero_bit - find the next cleared bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
static inline
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr | ~GENMASK(size - 1, offset);
return val == ~0UL ? size : ffz(val);
}
return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
}
#endif
#ifndef find_first_bit
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
static inline
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr & GENMASK(size - 1, 0);
return val ? __ffs(val) : size;
}
return _find_first_bit(addr, size);
}
#endif
#ifndef find_first_and_bit
/**
* find_first_and_bit - find the first set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static inline
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
return val ? __ffs(val) : size;
}
return _find_first_and_bit(addr1, addr2, size);
}
#endif
#ifndef find_first_zero_bit
/**
* find_first_zero_bit - find the first cleared bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
static inline
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr | ~GENMASK(size - 1, 0);
return val == ~0UL ? size : ffz(val);
}
return _find_first_zero_bit(addr, size);
}
#endif
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
* @size: The number of bits to search
*
* Returns the bit number of the last set bit, or size.
*/
static inline
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr & GENMASK(size - 1, 0);
return val ? __fls(val) : size;
}
return _find_last_bit(addr, size);
}
#endif
/**
* find_next_clump8 - find next 8-bit clump with set bits in a memory region
* @clump: location to store copy of found clump
* @addr: address to base the search on
* @size: bitmap size in number of bits
* @offset: bit offset at which to start searching
*
* Returns the bit offset for the next set clump; the found clump value is
* copied to the location pointed by @clump. If no bits are set, returns @size.
*/
extern unsigned long find_next_clump8(unsigned long *clump,
const unsigned long *addr,
unsigned long size, unsigned long offset);
#define find_first_clump8(clump, bits, size) \
find_next_clump8((clump), (bits), (size), 0)
#if defined(__LITTLE_ENDIAN)
static inline unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
static inline unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
static inline unsigned long find_first_zero_bit_le(const void *addr,
unsigned long size)
{
return find_first_zero_bit(addr, size);
}
#elif defined(__BIG_ENDIAN)
#ifndef find_next_zero_bit_le
static inline
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val = *(const unsigned long *)addr;
if (unlikely(offset >= size))
return size;
val = swab(val) | ~GENMASK(size - 1, offset);
return val == ~0UL ? size : ffz(val);
}
return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
}
#endif
#ifndef find_next_bit_le
static inline
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val = *(const unsigned long *)addr;
if (unlikely(offset >= size))
return size;
val = swab(val) & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
}
#endif
#ifndef find_first_zero_bit_le
#define find_first_zero_bit_le(addr, size) \
find_next_zero_bit_le((addr), (size), 0)
#endif
#else
#error "Please fix <asm/byteorder.h>"
#endif
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_next_bit((addr), (size), 0); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_from(bit, addr, size) \
for ((bit) = find_next_bit((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
#define for_each_clear_bit(bit, addr, size) \
for ((bit) = find_next_zero_bit((addr), (size), 0); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
/* same as for_each_clear_bit() but use bit as value to start with */
#define for_each_clear_bit_from(bit, addr, size) \
for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
/**
* for_each_set_bitrange - iterate over all set bit ranges [b; e)
* @b: bit offset of start of current bitrange (first set bit)
* @e: bit offset of end of current bitrange (first unset bit)
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_set_bitrange(b, e, addr, size) \
for ((b) = find_next_bit((addr), (size), 0), \
(e) = find_next_zero_bit((addr), (size), (b) + 1); \
(b) < (size); \
(b) = find_next_bit((addr), (size), (e) + 1), \
(e) = find_next_zero_bit((addr), (size), (b) + 1))
/**
* for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
* @b: bit offset of start of current bitrange (first set bit); must be initialized
* @e: bit offset of end of current bitrange (first unset bit)
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_set_bitrange_from(b, e, addr, size) \
for ((b) = find_next_bit((addr), (size), (b)), \
(e) = find_next_zero_bit((addr), (size), (b) + 1); \
(b) < (size); \
(b) = find_next_bit((addr), (size), (e) + 1), \
(e) = find_next_zero_bit((addr), (size), (b) + 1))
/**
* for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
* @b: bit offset of start of current bitrange (first unset bit)
* @e: bit offset of end of current bitrange (first set bit)
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_clear_bitrange(b, e, addr, size) \
for ((b) = find_next_zero_bit((addr), (size), 0), \
(e) = find_next_bit((addr), (size), (b) + 1); \
(b) < (size); \
(b) = find_next_zero_bit((addr), (size), (e) + 1), \
(e) = find_next_bit((addr), (size), (b) + 1))
/**
* for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
* @b: bit offset of start of current bitrange (first set bit); must be initialized
* @e: bit offset of end of current bitrange (first unset bit)
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_clear_bitrange_from(b, e, addr, size) \
for ((b) = find_next_zero_bit((addr), (size), (b)), \
(e) = find_next_bit((addr), (size), (b) + 1); \
(b) < (size); \
(b) = find_next_zero_bit((addr), (size), (e) + 1), \
(e) = find_next_bit((addr), (size), (b) + 1))
/**
* for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
* @start: bit offset to start search and to store the current iteration offset
* @clump: location to store copy of current 8-bit clump
* @bits: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_set_clump8(start, clump, bits, size) \
for ((start) = find_first_clump8(&(clump), (bits), (size)); \
(start) < (size); \
(start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
#endif /*__LINUX_FIND_H_ */

View File

@ -285,7 +285,7 @@ static void clocksource_verify_choose_cpus(void)
return;
/* Make sure to select at least one CPU other than the current CPU. */
cpu = cpumask_next(-1, cpu_online_mask);
cpu = cpumask_first(cpu_online_mask);
if (cpu == smp_processor_id())
cpu = cpumask_next(cpu, cpu_online_mask);
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
@ -307,7 +307,7 @@ static void clocksource_verify_choose_cpus(void)
cpu = prandom_u32() % nr_cpu_ids;
cpu = cpumask_next(cpu - 1, cpu_online_mask);
if (cpu >= nr_cpu_ids)
cpu = cpumask_next(-1, cpu_online_mask);
cpu = cpumask_first(cpu_online_mask);
if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
cpumask_set_cpu(cpu, &cpus_chosen);
}

View File

@ -65,9 +65,6 @@ config GENERIC_STRNLEN_USER
config GENERIC_NET_UTILS
bool
config GENERIC_FIND_FIRST_BIT
bool
source "lib/math/Kconfig"
config NO_GENERIC_PCI_IOPORT_MAP

View File

@ -89,6 +89,27 @@ unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
EXPORT_SYMBOL(_find_first_bit);
#endif
#ifndef find_first_and_bit
/*
* Find the first set bit in two memory regions.
*/
unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
unsigned long idx, val;
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
val = addr1[idx] & addr2[idx];
if (val)
return min(idx * BITS_PER_LONG + __ffs(val), size);
}
return size;
}
EXPORT_SYMBOL(_find_first_and_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.

View File

@ -49,6 +49,25 @@ static int __init test_find_first_bit(void *bitmap, unsigned long len)
return 0;
}
static int __init test_find_first_and_bit(void *bitmap, const void *bitmap2, unsigned long len)
{
static DECLARE_BITMAP(cp, BITMAP_LEN) __initdata;
unsigned long i, cnt;
ktime_t time;
bitmap_copy(cp, bitmap, BITMAP_LEN);
time = ktime_get();
for (cnt = i = 0; i < len; cnt++) {
i = find_first_and_bit(cp, bitmap2, len);
__clear_bit(i, cp);
}
time = ktime_get() - time;
pr_err("find_first_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
static int __init test_find_next_bit(const void *bitmap, unsigned long len)
{
unsigned long i, cnt;
@ -129,6 +148,7 @@ static int __init find_bit_test(void)
* traverse only part of bitmap to avoid soft lockup.
*/
test_find_first_bit(bitmap, BITMAP_LEN / 10);
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN / 2);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
pr_err("\nStart testing find_bit() with sparse bitmap\n");
@ -145,6 +165,7 @@ static int __init find_bit_test(void)
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
test_find_first_bit(bitmap, BITMAP_LEN);
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
/*

View File

@ -251,7 +251,7 @@ void gen_pool_destroy(struct gen_pool *pool)
list_del(&chunk->next_chunk);
end_bit = chunk_size(chunk) >> order;
bit = find_next_bit(chunk->bits, end_bit, 0);
bit = find_first_bit(chunk->bits, end_bit);
BUG_ON(bit < end_bit);
vfree(chunk);

View File

@ -446,6 +446,42 @@ static void __init test_bitmap_parselist(void)
}
}
static void __init test_bitmap_printlist(void)
{
unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL);
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
char expected[256];
int ret, slen;
ktime_t time;
if (!buf || !bmap)
goto out;
memset(bmap, -1, PAGE_SIZE);
slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1);
if (slen < 0)
goto out;
time = ktime_get();
ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8);
time = ktime_get() - time;
if (ret != slen + 1) {
pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen);
goto out;
}
if (strncmp(buf, expected, slen)) {
pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected);
goto out;
}
pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
out:
kfree(buf);
kfree(bmap);
}
static const unsigned long parse_test[] __initconst = {
BITMAP_FROM_U64(0),
BITMAP_FROM_U64(1),
@ -818,6 +854,7 @@ static void __init selftest(void)
test_bitmap_arr32();
test_bitmap_parse();
test_bitmap_parselist();
test_bitmap_printlist();
test_mem_optimisations();
test_for_each_set_clump8();
test_bitmap_cut();

View File

@ -1241,20 +1241,13 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
int nr_bits = max_t(int, spec.field_width, 0);
/* current bit is 'cur', most recently seen range is [rbot, rtop] */
int cur, rbot, rtop;
bool first = true;
int rbot, rtop;
if (check_pointer(&buf, end, bitmap, spec))
return buf;
rbot = cur = find_first_bit(bitmap, nr_bits);
while (cur < nr_bits) {
rtop = cur;
cur = find_next_bit(bitmap, nr_bits, cur + 1);
if (cur < nr_bits && cur <= rtop + 1)
continue;
for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) {
if (!first) {
if (buf < end)
*buf = ',';
@ -1263,15 +1256,12 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
first = false;
buf = number(buf, end, rbot, default_dec_spec);
if (rbot < rtop) {
if (buf < end)
*buf = '-';
buf++;
if (rtop == rbot + 1)
continue;
buf = number(buf, end, rtop, default_dec_spec);
}
rbot = cur;
if (buf < end)
*buf = '-';
buf = number(++buf, end, rtop - 1, default_dec_spec);
}
return buf;
}

View File

@ -779,7 +779,7 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
{
struct pcpu_block_md *block = chunk->md_blocks + index;
unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
unsigned int rs, re, start; /* region start, region end */
unsigned int start, end; /* region start, region end */
/* promote scan_hint to contig_hint */
if (block->scan_hint) {
@ -795,9 +795,8 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
block->right_free = 0;
/* iterate over free areas and update the contig hints */
bitmap_for_each_clear_region(alloc_map, rs, re, start,
PCPU_BITMAP_BLOCK_BITS)
pcpu_block_update(block, rs, re);
for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
pcpu_block_update(block, start, end);
}
/**
@ -1070,17 +1069,18 @@ static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
int *next_off)
{
unsigned int page_start, page_end, rs, re;
unsigned int start, end;
page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
rs = page_start;
bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
if (rs >= page_end)
start = find_next_zero_bit(chunk->populated, end, start);
if (start >= end)
return true;
*next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
end = find_next_bit(chunk->populated, end, start + 1);
*next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
return false;
}
@ -1851,13 +1851,12 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
/* populate if not all pages are already there */
if (!is_atomic) {
unsigned int page_start, page_end, rs, re;
unsigned int page_end, rs, re;
page_start = PFN_DOWN(off);
rs = PFN_DOWN(off);
page_end = PFN_UP(off + size);
bitmap_for_each_clear_region(chunk->populated, rs, re,
page_start, page_end) {
for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
WARN_ON(chunk->immutable);
ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
@ -2013,8 +2012,7 @@ static void pcpu_balance_free(bool empty_only)
list_for_each_entry_safe(chunk, next, &to_free, list) {
unsigned int rs, re;
bitmap_for_each_set_region(chunk->populated, rs, re, 0,
chunk->nr_pages) {
for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
pcpu_depopulate_chunk(chunk, rs, re);
spin_lock_irq(&pcpu_lock);
pcpu_chunk_depopulated(chunk, rs, re);
@ -2084,8 +2082,7 @@ static void pcpu_balance_populated(void)
continue;
/* @chunk can't go away while pcpu_alloc_mutex is held */
bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
chunk->nr_pages) {
for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
int nr = min_t(int, re - rs, nr_to_pop);
spin_unlock_irq(&pcpu_lock);

View File

@ -608,7 +608,7 @@ static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
bitmap = &ncf->bitmap;
spin_lock_irqsave(&nc->lock, flags);
index = find_next_bit(bitmap, ncf->n_vids, 0);
index = find_first_bit(bitmap, ncf->n_vids);
if (index >= ncf->n_vids) {
spin_unlock_irqrestore(&nc->lock, flags);
return -1;
@ -667,7 +667,7 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
return -1;
}
index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
index = find_first_zero_bit(bitmap, ncf->n_vids);
if (index < 0 || index >= ncf->n_vids) {
netdev_err(ndp->ndev.dev,
"Channel %u already has all VLAN filters set\n",

View File

@ -18,7 +18,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#ifndef _TOOLS_LINUX_BITOPS_H_
#error only <linux/bitops.h> can be included directly

View File

@ -1,145 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_
#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_
extern unsigned long _find_next_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
unsigned long start, unsigned long invert, unsigned long le);
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static inline
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
}
#endif
#ifndef find_next_and_bit
/**
* find_next_and_bit - find the next set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static inline
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr1 & *addr2 & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
}
#endif
#ifndef find_next_zero_bit
/**
* find_next_zero_bit - find the next cleared bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
static inline
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr | ~GENMASK(size - 1, offset);
return val == ~0UL ? size : ffz(val);
}
return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
}
#endif
#ifndef find_first_bit
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
static inline
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr & GENMASK(size - 1, 0);
return val ? __ffs(val) : size;
}
return _find_first_bit(addr, size);
}
#endif /* find_first_bit */
#ifndef find_first_zero_bit
/**
* find_first_zero_bit - find the first cleared bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
static inline
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr | ~GENMASK(size - 1, 0);
return val == ~0UL ? size : ffz(val);
}
return _find_first_zero_bit(addr, size);
}
#endif
#endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */

View File

@ -1,9 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PERF_BITOPS_H
#define _PERF_BITOPS_H
#ifndef _TOOLS_LINUX_BITMAP_H
#define _TOOLS_LINUX_BITMAP_H
#include <string.h>
#include <linux/bitops.h>
#include <linux/find.h>
#include <stdlib.h>
#include <linux/kernel.h>
@ -181,4 +182,4 @@ static inline int bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
#endif /* _PERF_BITOPS_H */
#endif /* _TOOLS_LINUX_BITMAP_H */

View File

@ -1,11 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
#define _ASM_GENERIC_BITOPS_FIND_H_
#ifndef _TOOLS_LINUX_FIND_H_
#define _TOOLS_LINUX_FIND_H_
#ifndef _TOOLS_LINUX_BITMAP_H
#error tools: only <linux/bitmap.h> can be included directly
#endif
#include <linux/bitops.h>
extern unsigned long _find_next_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
unsigned long start, unsigned long invert, unsigned long le);
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
@ -95,8 +103,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
}
#endif
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
#ifndef find_first_bit
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
@ -116,7 +123,34 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
return _find_first_bit(addr, size);
}
#endif
#ifndef find_first_and_bit
/**
* find_first_and_bit - find the first set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static inline
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
return val ? __ffs(val) : size;
}
return _find_first_and_bit(addr1, addr2, size);
}
#endif
#ifndef find_first_zero_bit
/**
* find_first_zero_bit - find the first cleared bit in a memory region
* @addr: The address to start the search at
@ -136,16 +170,7 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
return _find_first_zero_bit(addr, size);
}
#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
#ifndef find_first_bit
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
#endif
#ifndef find_first_zero_bit
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
#endif
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
#ifndef find_last_bit
/**
@ -185,4 +210,5 @@ extern unsigned long find_next_clump8(unsigned long *clump,
#define find_first_clump8(clump, bits, size) \
find_next_clump8((clump), (bits), (size), 0)
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
#endif /*__LINUX_FIND_H_ */

View File

@ -96,6 +96,26 @@ unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
}
#endif
#ifndef find_first_and_bit
/*
* Find the first set bit in two memory regions.
*/
unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
unsigned long idx, val;
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
val = addr1[idx] & addr2[idx];
if (val)
return min(idx * BITS_PER_LONG + __ffs(val), size);
}
return size;
}
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.