mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
- In the series "treewide: Refactor heap related implementation",
Kuan-Wei Chiu has significantly reworked the min_heap library code and has taught bcachefs to use the new more generic implementation. - Yury Norov's series "Cleanup cpumask.h inclusion in core headers" reworks the cpumask and nodemask headers to make things generally more rational. - Kuan-Wei Chiu has sent along some maintenance work against our sorting library code in the series "lib/sort: Optimizations and cleanups". - More library maintainance work from Christophe Jaillet in the series "Remove usage of the deprecated ida_simple_xx() API". - Ryusuke Konishi continues with the nilfs2 fixes and clanups in the series "nilfs2: eliminate the call to inode_attach_wb()". - Kuan-Ying Lee has some fixes to the gdb scripts in the series "Fix GDB command error". - Plus the usual shower of singleton patches all over the place. Please see the relevant changelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZp2GvwAKCRDdBJ7gKXxA jlf/AP48xP5ilIHbtpAKm2z+MvGuTxJQ5VSC0UXFacuCbc93lAEA+Yo+vOVRmh6j fQF2nVKyKLYfSz7yqmCyAaHWohIYLgg= =Stxz -----END PGP SIGNATURE----- Merge tag 'mm-nonmm-stable-2024-07-21-15-07' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull non-MM updates from Andrew Morton: - In the series "treewide: Refactor heap related implementation", Kuan-Wei Chiu has significantly reworked the min_heap library code and has taught bcachefs to use the new more generic implementation. - Yury Norov's series "Cleanup cpumask.h inclusion in core headers" reworks the cpumask and nodemask headers to make things generally more rational. - Kuan-Wei Chiu has sent along some maintenance work against our sorting library code in the series "lib/sort: Optimizations and cleanups". - More library maintainance work from Christophe Jaillet in the series "Remove usage of the deprecated ida_simple_xx() API". - Ryusuke Konishi continues with the nilfs2 fixes and clanups in the series "nilfs2: eliminate the call to inode_attach_wb()". - Kuan-Ying Lee has some fixes to the gdb scripts in the series "Fix GDB command error". - Plus the usual shower of singleton patches all over the place. Please see the relevant changelogs for details. * tag 'mm-nonmm-stable-2024-07-21-15-07' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (98 commits) ia64: scrub ia64 from poison.h watchdog/perf: properly initialize the turbo mode timestamp and rearm counter tsacct: replace strncpy() with strscpy() lib/bch.c: use swap() to improve code test_bpf: convert comma to semicolon init/modpost: conditionally check section mismatch to __meminit* init: remove unused __MEMINIT* macros nilfs2: Constify struct kobj_type nilfs2: avoid undefined behavior in nilfs_cnt32_ge macro math: rational: add missing MODULE_DESCRIPTION() macro lib/zlib: add missing MODULE_DESCRIPTION() macro fs: ufs: add MODULE_DESCRIPTION() lib/rbtree.c: fix the example typo ocfs2: add bounds checking to ocfs2_check_dir_entry() fs: add kernel-doc comments to ocfs2_prepare_orphan_dir() coredump: simplify zap_process() selftests/fpu: add missing MODULE_DESCRIPTION() macro compiler.h: simplify data_race() macro build-id: require program headers to be right after ELF header resource: add missing MODULE_DESCRIPTION() ...
This commit is contained in:
commit
527eff227d
@ -3801,8 +3801,10 @@ F: include/linux/bitmap-str.h
|
|||||||
F: include/linux/bitmap.h
|
F: include/linux/bitmap.h
|
||||||
F: include/linux/bits.h
|
F: include/linux/bits.h
|
||||||
F: include/linux/cpumask.h
|
F: include/linux/cpumask.h
|
||||||
|
F: include/linux/cpumask_types.h
|
||||||
F: include/linux/find.h
|
F: include/linux/find.h
|
||||||
F: include/linux/nodemask.h
|
F: include/linux/nodemask.h
|
||||||
|
F: include/linux/nodemask_types.h
|
||||||
F: include/vdso/bits.h
|
F: include/vdso/bits.h
|
||||||
F: lib/bitmap-str.c
|
F: lib/bitmap-str.c
|
||||||
F: lib/bitmap.c
|
F: lib/bitmap.c
|
||||||
|
@ -123,8 +123,6 @@ SECTIONS
|
|||||||
*/
|
*/
|
||||||
*(.sfpr);
|
*(.sfpr);
|
||||||
*(.text.asan.* .text.tsan.*)
|
*(.text.asan.* .text.tsan.*)
|
||||||
MEM_KEEP(init.text)
|
|
||||||
MEM_KEEP(exit.text)
|
|
||||||
} :text
|
} :text
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
|
@ -656,17 +656,16 @@ static int occ_probe(struct platform_device *pdev)
|
|||||||
rc = of_property_read_u32(dev->of_node, "reg", ®);
|
rc = of_property_read_u32(dev->of_node, "reg", ®);
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
/* make sure we don't have a duplicate from dts */
|
/* make sure we don't have a duplicate from dts */
|
||||||
occ->idx = ida_simple_get(&occ_ida, reg, reg + 1,
|
occ->idx = ida_alloc_range(&occ_ida, reg, reg,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (occ->idx < 0)
|
if (occ->idx < 0)
|
||||||
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX,
|
occ->idx = ida_alloc_min(&occ_ida, 1,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
} else {
|
} else {
|
||||||
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX,
|
occ->idx = ida_alloc_min(&occ_ida, 1, GFP_KERNEL);
|
||||||
GFP_KERNEL);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX, GFP_KERNEL);
|
occ->idx = ida_alloc_min(&occ_ida, 1, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
platform_set_drvdata(pdev, occ);
|
platform_set_drvdata(pdev, occ);
|
||||||
@ -680,7 +679,7 @@ static int occ_probe(struct platform_device *pdev)
|
|||||||
rc = misc_register(&occ->mdev);
|
rc = misc_register(&occ->mdev);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "failed to register miscdevice: %d\n", rc);
|
dev_err(dev, "failed to register miscdevice: %d\n", rc);
|
||||||
ida_simple_remove(&occ_ida, occ->idx);
|
ida_free(&occ_ida, occ->idx);
|
||||||
kvfree(occ->buffer);
|
kvfree(occ->buffer);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
@ -719,7 +718,7 @@ static int occ_remove(struct platform_device *pdev)
|
|||||||
else
|
else
|
||||||
device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child);
|
device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child);
|
||||||
|
|
||||||
ida_simple_remove(&occ_ida, occ->idx);
|
ida_free(&occ_ida, occ->idx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -340,7 +340,7 @@ static int eb_create(struct i915_execbuffer *eb)
|
|||||||
* Without a 1:1 association between relocation handles and
|
* Without a 1:1 association between relocation handles and
|
||||||
* the execobject[] index, we instead create a hashtable.
|
* the execobject[] index, we instead create a hashtable.
|
||||||
* We size it dynamically based on available memory, starting
|
* We size it dynamically based on available memory, starting
|
||||||
* first with 1:1 assocative hash and scaling back until
|
* first with 1:1 associative hash and scaling back until
|
||||||
* the allocation succeeds.
|
* the allocation succeeds.
|
||||||
*
|
*
|
||||||
* Later on we use a positive lut_size to indicate we are
|
* Later on we use a positive lut_size to indicate we are
|
||||||
|
@ -164,40 +164,68 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
|
|||||||
* prio is worth 1/8th of what INITIAL_PRIO is worth.
|
* prio is worth 1/8th of what INITIAL_PRIO is worth.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define bucket_prio(b) \
|
static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b)
|
||||||
({ \
|
{
|
||||||
unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
|
unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;
|
||||||
\
|
|
||||||
(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
|
return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);
|
||||||
#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
|
}
|
||||||
|
|
||||||
|
static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args)
|
||||||
|
{
|
||||||
|
struct bucket **lhs = (struct bucket **)l;
|
||||||
|
struct bucket **rhs = (struct bucket **)r;
|
||||||
|
struct cache *ca = args;
|
||||||
|
|
||||||
|
return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
|
||||||
|
{
|
||||||
|
struct bucket **lhs = (struct bucket **)l;
|
||||||
|
struct bucket **rhs = (struct bucket **)r;
|
||||||
|
struct cache *ca = args;
|
||||||
|
|
||||||
|
return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
|
||||||
|
{
|
||||||
|
struct bucket **lhs = l, **rhs = r;
|
||||||
|
|
||||||
|
swap(*lhs, *rhs);
|
||||||
|
}
|
||||||
|
|
||||||
static void invalidate_buckets_lru(struct cache *ca)
|
static void invalidate_buckets_lru(struct cache *ca)
|
||||||
{
|
{
|
||||||
struct bucket *b;
|
struct bucket *b;
|
||||||
ssize_t i;
|
const struct min_heap_callbacks bucket_max_cmp_callback = {
|
||||||
|
.less = new_bucket_max_cmp,
|
||||||
|
.swp = new_bucket_swap,
|
||||||
|
};
|
||||||
|
const struct min_heap_callbacks bucket_min_cmp_callback = {
|
||||||
|
.less = new_bucket_min_cmp,
|
||||||
|
.swp = new_bucket_swap,
|
||||||
|
};
|
||||||
|
|
||||||
ca->heap.used = 0;
|
ca->heap.nr = 0;
|
||||||
|
|
||||||
for_each_bucket(b, ca) {
|
for_each_bucket(b, ca) {
|
||||||
if (!bch_can_invalidate_bucket(ca, b))
|
if (!bch_can_invalidate_bucket(ca, b))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!heap_full(&ca->heap))
|
if (!min_heap_full(&ca->heap))
|
||||||
heap_add(&ca->heap, b, bucket_max_cmp);
|
min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca);
|
||||||
else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
|
else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) {
|
||||||
ca->heap.data[0] = b;
|
ca->heap.data[0] = b;
|
||||||
heap_sift(&ca->heap, 0, bucket_max_cmp);
|
min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = ca->heap.used / 2 - 1; i >= 0; --i)
|
min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca);
|
||||||
heap_sift(&ca->heap, i, bucket_min_cmp);
|
|
||||||
|
|
||||||
while (!fifo_full(&ca->free_inc)) {
|
while (!fifo_full(&ca->free_inc)) {
|
||||||
if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
|
if (!ca->heap.nr) {
|
||||||
/*
|
/*
|
||||||
* We don't want to be calling invalidate_buckets()
|
* We don't want to be calling invalidate_buckets()
|
||||||
* multiple times when it can't do anything
|
* multiple times when it can't do anything
|
||||||
@ -206,6 +234,8 @@ static void invalidate_buckets_lru(struct cache *ca)
|
|||||||
wake_up_gc(ca->set);
|
wake_up_gc(ca->set);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
b = min_heap_peek(&ca->heap)[0];
|
||||||
|
min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca);
|
||||||
|
|
||||||
bch_invalidate_one_bucket(ca, b);
|
bch_invalidate_one_bucket(ca, b);
|
||||||
}
|
}
|
||||||
|
@ -458,7 +458,7 @@ struct cache {
|
|||||||
/* Allocation stuff: */
|
/* Allocation stuff: */
|
||||||
struct bucket *buckets;
|
struct bucket *buckets;
|
||||||
|
|
||||||
DECLARE_HEAP(struct bucket *, heap);
|
DEFINE_MIN_HEAP(struct bucket *, cache_heap) heap;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If nonzero, we know we aren't going to find any buckets to invalidate
|
* If nonzero, we know we aren't going to find any buckets to invalidate
|
||||||
|
@ -54,9 +54,11 @@ void bch_dump_bucket(struct btree_keys *b)
|
|||||||
int __bch_count_data(struct btree_keys *b)
|
int __bch_count_data(struct btree_keys *b)
|
||||||
{
|
{
|
||||||
unsigned int ret = 0;
|
unsigned int ret = 0;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
if (b->ops->is_extents)
|
if (b->ops->is_extents)
|
||||||
for_each_key(b, k, &iter)
|
for_each_key(b, k, &iter)
|
||||||
ret += KEY_SIZE(k);
|
ret += KEY_SIZE(k);
|
||||||
@ -67,9 +69,11 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
|
|||||||
{
|
{
|
||||||
va_list args;
|
va_list args;
|
||||||
struct bkey *k, *p = NULL;
|
struct bkey *k, *p = NULL;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
const char *err;
|
const char *err;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
for_each_key(b, k, &iter) {
|
for_each_key(b, k, &iter) {
|
||||||
if (b->ops->is_extents) {
|
if (b->ops->is_extents) {
|
||||||
err = "Keys out of order";
|
err = "Keys out of order";
|
||||||
@ -110,9 +114,9 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
|
|||||||
|
|
||||||
static void bch_btree_iter_next_check(struct btree_iter *iter)
|
static void bch_btree_iter_next_check(struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
struct bkey *k = iter->data->k, *next = bkey_next(k);
|
struct bkey *k = iter->heap.data->k, *next = bkey_next(k);
|
||||||
|
|
||||||
if (next < iter->data->end &&
|
if (next < iter->heap.data->end &&
|
||||||
bkey_cmp(k, iter->b->ops->is_extents ?
|
bkey_cmp(k, iter->b->ops->is_extents ?
|
||||||
&START_KEY(next) : next) > 0) {
|
&START_KEY(next) : next) > 0) {
|
||||||
bch_dump_bucket(iter->b);
|
bch_dump_bucket(iter->b);
|
||||||
@ -879,12 +883,14 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
|||||||
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
|
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
|
||||||
struct bset *i = bset_tree_last(b)->data;
|
struct bset *i = bset_tree_last(b)->data;
|
||||||
struct bkey *m, *prev = NULL;
|
struct bkey *m, *prev = NULL;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
struct bkey preceding_key_on_stack = ZERO_KEY;
|
struct bkey preceding_key_on_stack = ZERO_KEY;
|
||||||
struct bkey *preceding_key_p = &preceding_key_on_stack;
|
struct bkey *preceding_key_p = &preceding_key_on_stack;
|
||||||
|
|
||||||
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
|
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If k has preceding key, preceding_key_p will be set to address
|
* If k has preceding key, preceding_key_p will be set to address
|
||||||
* of k's preceding key; otherwise preceding_key_p will be set
|
* of k's preceding key; otherwise preceding_key_p will be set
|
||||||
@ -895,9 +901,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
|||||||
else
|
else
|
||||||
preceding_key(k, &preceding_key_p);
|
preceding_key(k, &preceding_key_p);
|
||||||
|
|
||||||
m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
|
m = bch_btree_iter_init(b, &iter, preceding_key_p);
|
||||||
|
|
||||||
if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
|
if (b->ops->insert_fixup(b, k, &iter, replace_key))
|
||||||
return status;
|
return status;
|
||||||
|
|
||||||
status = BTREE_INSERT_STATUS_INSERT;
|
status = BTREE_INSERT_STATUS_INSERT;
|
||||||
@ -1077,79 +1083,102 @@ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
|
|||||||
|
|
||||||
/* Btree iterator */
|
/* Btree iterator */
|
||||||
|
|
||||||
typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
|
typedef bool (new_btree_iter_cmp_fn)(const void *, const void *, void *);
|
||||||
struct btree_iter_set);
|
|
||||||
|
|
||||||
static inline bool btree_iter_cmp(struct btree_iter_set l,
|
static inline bool new_btree_iter_cmp(const void *l, const void *r, void __always_unused *args)
|
||||||
struct btree_iter_set r)
|
|
||||||
{
|
{
|
||||||
return bkey_cmp(l.k, r.k) > 0;
|
const struct btree_iter_set *_l = l;
|
||||||
|
const struct btree_iter_set *_r = r;
|
||||||
|
|
||||||
|
return bkey_cmp(_l->k, _r->k) <= 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
|
||||||
|
{
|
||||||
|
struct btree_iter_set *_iter1 = iter1;
|
||||||
|
struct btree_iter_set *_iter2 = iter2;
|
||||||
|
|
||||||
|
swap(*_iter1, *_iter2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool btree_iter_end(struct btree_iter *iter)
|
static inline bool btree_iter_end(struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
return !iter->used;
|
return !iter->heap.nr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
|
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
|
||||||
struct bkey *end)
|
struct bkey *end)
|
||||||
{
|
{
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = new_btree_iter_cmp,
|
||||||
|
.swp = new_btree_iter_swap,
|
||||||
|
};
|
||||||
|
|
||||||
if (k != end)
|
if (k != end)
|
||||||
BUG_ON(!heap_add(iter,
|
BUG_ON(!min_heap_push(&iter->heap,
|
||||||
((struct btree_iter_set) { k, end }),
|
&((struct btree_iter_set) { k, end }),
|
||||||
btree_iter_cmp));
|
&callbacks,
|
||||||
|
NULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
|
static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
|
||||||
struct btree_iter_stack *iter,
|
struct btree_iter *iter,
|
||||||
struct bkey *search,
|
struct bkey *search,
|
||||||
struct bset_tree *start)
|
struct bset_tree *start)
|
||||||
{
|
{
|
||||||
struct bkey *ret = NULL;
|
struct bkey *ret = NULL;
|
||||||
|
|
||||||
iter->iter.size = ARRAY_SIZE(iter->stack_data);
|
iter->heap.size = ARRAY_SIZE(iter->heap.preallocated);
|
||||||
iter->iter.used = 0;
|
iter->heap.nr = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_BCACHE_DEBUG
|
#ifdef CONFIG_BCACHE_DEBUG
|
||||||
iter->iter.b = b;
|
iter->b = b;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (; start <= bset_tree_last(b); start++) {
|
for (; start <= bset_tree_last(b); start++) {
|
||||||
ret = bch_bset_search(b, start, search);
|
ret = bch_bset_search(b, start, search);
|
||||||
bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
|
bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
|
struct bkey *bch_btree_iter_init(struct btree_keys *b,
|
||||||
struct btree_iter_stack *iter,
|
struct btree_iter *iter,
|
||||||
struct bkey *search)
|
struct bkey *search)
|
||||||
{
|
{
|
||||||
return __bch_btree_iter_stack_init(b, iter, search, b->set);
|
return __bch_btree_iter_init(b, iter, search, b->set);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
|
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
|
||||||
btree_iter_cmp_fn *cmp)
|
new_btree_iter_cmp_fn *cmp)
|
||||||
{
|
{
|
||||||
struct btree_iter_set b __maybe_unused;
|
struct btree_iter_set b __maybe_unused;
|
||||||
struct bkey *ret = NULL;
|
struct bkey *ret = NULL;
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = cmp,
|
||||||
|
.swp = new_btree_iter_swap,
|
||||||
|
};
|
||||||
|
|
||||||
if (!btree_iter_end(iter)) {
|
if (!btree_iter_end(iter)) {
|
||||||
bch_btree_iter_next_check(iter);
|
bch_btree_iter_next_check(iter);
|
||||||
|
|
||||||
ret = iter->data->k;
|
ret = iter->heap.data->k;
|
||||||
iter->data->k = bkey_next(iter->data->k);
|
iter->heap.data->k = bkey_next(iter->heap.data->k);
|
||||||
|
|
||||||
if (iter->data->k > iter->data->end) {
|
if (iter->heap.data->k > iter->heap.data->end) {
|
||||||
WARN_ONCE(1, "bset was corrupt!\n");
|
WARN_ONCE(1, "bset was corrupt!\n");
|
||||||
iter->data->k = iter->data->end;
|
iter->heap.data->k = iter->heap.data->end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iter->data->k == iter->data->end)
|
if (iter->heap.data->k == iter->heap.data->end) {
|
||||||
heap_pop(iter, b, cmp);
|
if (iter->heap.nr) {
|
||||||
|
b = min_heap_peek(&iter->heap)[0];
|
||||||
|
min_heap_pop(&iter->heap, &callbacks, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
else
|
else
|
||||||
heap_sift(iter, 0, cmp);
|
min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -1157,7 +1186,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
|
|||||||
|
|
||||||
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
|
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
return __bch_btree_iter_next(iter, btree_iter_cmp);
|
return __bch_btree_iter_next(iter, new_btree_iter_cmp);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1195,16 +1224,18 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
|
|||||||
struct btree_iter *iter,
|
struct btree_iter *iter,
|
||||||
bool fixup, bool remove_stale)
|
bool fixup, bool remove_stale)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
struct bkey *k, *last = NULL;
|
struct bkey *k, *last = NULL;
|
||||||
BKEY_PADDED(k) tmp;
|
BKEY_PADDED(k) tmp;
|
||||||
bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
|
bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
|
||||||
? bch_ptr_bad
|
? bch_ptr_bad
|
||||||
: bch_ptr_invalid;
|
: bch_ptr_invalid;
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = b->ops->sort_cmp,
|
||||||
|
.swp = new_btree_iter_swap,
|
||||||
|
};
|
||||||
|
|
||||||
/* Heapify the iterator, using our comparison function */
|
/* Heapify the iterator, using our comparison function */
|
||||||
for (i = iter->used / 2 - 1; i >= 0; --i)
|
min_heapify_all(&iter->heap, &callbacks, NULL);
|
||||||
heap_sift(iter, i, b->ops->sort_cmp);
|
|
||||||
|
|
||||||
while (!btree_iter_end(iter)) {
|
while (!btree_iter_end(iter)) {
|
||||||
if (b->ops->sort_fixup && fixup)
|
if (b->ops->sort_fixup && fixup)
|
||||||
@ -1293,10 +1324,11 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
|
|||||||
struct bset_sort_state *state)
|
struct bset_sort_state *state)
|
||||||
{
|
{
|
||||||
size_t order = b->page_order, keys = 0;
|
size_t order = b->page_order, keys = 0;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
int oldsize = bch_count_data(b);
|
int oldsize = bch_count_data(b);
|
||||||
|
|
||||||
__bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
|
||||||
|
|
||||||
if (start) {
|
if (start) {
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
@ -1307,7 +1339,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
|
|||||||
order = get_order(__set_bytes(b->set->data, keys));
|
order = get_order(__set_bytes(b->set->data, keys));
|
||||||
}
|
}
|
||||||
|
|
||||||
__btree_sort(b, &iter.iter, start, order, false, state);
|
__btree_sort(b, &iter, start, order, false, state);
|
||||||
|
|
||||||
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
|
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
|
||||||
}
|
}
|
||||||
@ -1323,11 +1355,13 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
|
|||||||
struct bset_sort_state *state)
|
struct bset_sort_state *state)
|
||||||
{
|
{
|
||||||
uint64_t start_time = local_clock();
|
uint64_t start_time = local_clock();
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
bch_btree_iter_stack_init(b, &iter, NULL);
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
btree_mergesort(b, new->set->data, &iter.iter, false, true);
|
bch_btree_iter_init(b, &iter, NULL);
|
||||||
|
|
||||||
|
btree_mergesort(b, new->set->data, &iter, false, true);
|
||||||
|
|
||||||
bch_time_stats_update(&state->time, start_time);
|
bch_time_stats_update(&state->time, start_time);
|
||||||
|
|
||||||
|
@ -187,8 +187,9 @@ struct bset_tree {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct btree_keys_ops {
|
struct btree_keys_ops {
|
||||||
bool (*sort_cmp)(struct btree_iter_set l,
|
bool (*sort_cmp)(const void *l,
|
||||||
struct btree_iter_set r);
|
const void *r,
|
||||||
|
void *args);
|
||||||
struct bkey *(*sort_fixup)(struct btree_iter *iter,
|
struct bkey *(*sort_fixup)(struct btree_iter *iter,
|
||||||
struct bkey *tmp);
|
struct bkey *tmp);
|
||||||
bool (*insert_fixup)(struct btree_keys *b,
|
bool (*insert_fixup)(struct btree_keys *b,
|
||||||
@ -312,23 +313,17 @@ enum {
|
|||||||
BTREE_INSERT_STATUS_FRONT_MERGE,
|
BTREE_INSERT_STATUS_FRONT_MERGE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct btree_iter_set {
|
||||||
|
struct bkey *k, *end;
|
||||||
|
};
|
||||||
|
|
||||||
/* Btree key iteration */
|
/* Btree key iteration */
|
||||||
|
|
||||||
struct btree_iter {
|
struct btree_iter {
|
||||||
size_t size, used;
|
|
||||||
#ifdef CONFIG_BCACHE_DEBUG
|
#ifdef CONFIG_BCACHE_DEBUG
|
||||||
struct btree_keys *b;
|
struct btree_keys *b;
|
||||||
#endif
|
#endif
|
||||||
struct btree_iter_set {
|
MIN_HEAP_PREALLOCATED(struct btree_iter_set, btree_iter_heap, MAX_BSETS) heap;
|
||||||
struct bkey *k, *end;
|
|
||||||
} data[];
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Fixed-size btree_iter that can be allocated on the stack */
|
|
||||||
|
|
||||||
struct btree_iter_stack {
|
|
||||||
struct btree_iter iter;
|
|
||||||
struct btree_iter_set stack_data[MAX_BSETS];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
|
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
|
||||||
@ -340,9 +335,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
|
|||||||
|
|
||||||
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
|
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
|
||||||
struct bkey *end);
|
struct bkey *end);
|
||||||
struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
|
struct bkey *bch_btree_iter_init(struct btree_keys *b,
|
||||||
struct btree_iter_stack *iter,
|
struct btree_iter *iter,
|
||||||
struct bkey *search);
|
struct bkey *search);
|
||||||
|
|
||||||
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
|
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
|
||||||
const struct bkey *search);
|
const struct bkey *search);
|
||||||
@ -357,14 +352,13 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
|
|||||||
return search ? __bch_bset_search(b, t, search) : t->data->start;
|
return search ? __bch_bset_search(b, t, search) : t->data->start;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define for_each_key_filter(b, k, stack_iter, filter) \
|
#define for_each_key_filter(b, k, iter, filter) \
|
||||||
for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
|
for (bch_btree_iter_init((b), (iter), NULL); \
|
||||||
((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
|
((k) = bch_btree_iter_next_filter((iter), (b), filter));)
|
||||||
filter));)
|
|
||||||
|
|
||||||
#define for_each_key(b, k, stack_iter) \
|
#define for_each_key(b, k, iter) \
|
||||||
for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
|
for (bch_btree_iter_init((b), (iter), NULL); \
|
||||||
((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
|
((k) = bch_btree_iter_next(iter));)
|
||||||
|
|
||||||
/* Sorting */
|
/* Sorting */
|
||||||
|
|
||||||
|
@ -149,19 +149,19 @@ void bch_btree_node_read_done(struct btree *b)
|
|||||||
{
|
{
|
||||||
const char *err = "bad btree header";
|
const char *err = "bad btree header";
|
||||||
struct bset *i = btree_bset_first(b);
|
struct bset *i = btree_bset_first(b);
|
||||||
struct btree_iter *iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* c->fill_iter can allocate an iterator with more memory space
|
* c->fill_iter can allocate an iterator with more memory space
|
||||||
* than static MAX_BSETS.
|
* than static MAX_BSETS.
|
||||||
* See the comment arount cache_set->fill_iter.
|
* See the comment arount cache_set->fill_iter.
|
||||||
*/
|
*/
|
||||||
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
|
iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
|
||||||
iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
|
iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
|
||||||
iter->used = 0;
|
iter.heap.nr = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_BCACHE_DEBUG
|
#ifdef CONFIG_BCACHE_DEBUG
|
||||||
iter->b = &b->keys;
|
iter.b = &b->keys;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!i->seq)
|
if (!i->seq)
|
||||||
@ -199,7 +199,7 @@ void bch_btree_node_read_done(struct btree *b)
|
|||||||
if (i != b->keys.set[0].data && !i->keys)
|
if (i != b->keys.set[0].data && !i->keys)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
|
bch_btree_iter_push(&iter, i->start, bset_bkey_last(i));
|
||||||
|
|
||||||
b->written += set_blocks(i, block_bytes(b->c->cache));
|
b->written += set_blocks(i, block_bytes(b->c->cache));
|
||||||
}
|
}
|
||||||
@ -211,7 +211,7 @@ void bch_btree_node_read_done(struct btree *b)
|
|||||||
if (i->seq == b->keys.set[0].data->seq)
|
if (i->seq == b->keys.set[0].data->seq)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
|
bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort);
|
||||||
|
|
||||||
i = b->keys.set[0].data;
|
i = b->keys.set[0].data;
|
||||||
err = "short btree key";
|
err = "short btree key";
|
||||||
@ -223,7 +223,7 @@ void bch_btree_node_read_done(struct btree *b)
|
|||||||
bch_bset_init_next(&b->keys, write_block(b),
|
bch_bset_init_next(&b->keys, write_block(b),
|
||||||
bset_magic(&b->c->cache->sb));
|
bset_magic(&b->c->cache->sb));
|
||||||
out:
|
out:
|
||||||
mempool_free(iter, &b->c->fill_iter);
|
mempool_free(iter.heap.data, &b->c->fill_iter);
|
||||||
return;
|
return;
|
||||||
err:
|
err:
|
||||||
set_btree_node_io_error(b);
|
set_btree_node_io_error(b);
|
||||||
@ -1309,9 +1309,11 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
|
|||||||
uint8_t stale = 0;
|
uint8_t stale = 0;
|
||||||
unsigned int keys = 0, good_keys = 0;
|
unsigned int keys = 0, good_keys = 0;
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
struct bset_tree *t;
|
struct bset_tree *t;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
gc->nodes++;
|
gc->nodes++;
|
||||||
|
|
||||||
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
|
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
|
||||||
@ -1570,9 +1572,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
|
|||||||
static unsigned int btree_gc_count_keys(struct btree *b)
|
static unsigned int btree_gc_count_keys(struct btree *b)
|
||||||
{
|
{
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
unsigned int ret = 0;
|
unsigned int ret = 0;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
|
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
|
||||||
ret += bkey_u64s(k);
|
ret += bkey_u64s(k);
|
||||||
|
|
||||||
@ -1611,18 +1615,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
bool should_rewrite;
|
bool should_rewrite;
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
struct gc_merge_info r[GC_MERGE_NODES];
|
struct gc_merge_info r[GC_MERGE_NODES];
|
||||||
struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
|
struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
|
||||||
|
|
||||||
bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
|
||||||
|
|
||||||
for (i = r; i < r + ARRAY_SIZE(r); i++)
|
for (i = r; i < r + ARRAY_SIZE(r); i++)
|
||||||
i->b = ERR_PTR(-EINTR);
|
i->b = ERR_PTR(-EINTR);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
|
k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
|
||||||
bch_ptr_bad);
|
|
||||||
if (k) {
|
if (k) {
|
||||||
r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
|
r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
|
||||||
true, b);
|
true, b);
|
||||||
@ -1917,7 +1921,9 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
|
|||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct bkey *k, *p = NULL;
|
struct bkey *k, *p = NULL;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
|
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
|
||||||
bch_initial_mark_key(b->c, b->level, k);
|
bch_initial_mark_key(b->c, b->level, k);
|
||||||
@ -1925,10 +1931,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
|
|||||||
bch_initial_mark_key(b->c, b->level + 1, &b->key);
|
bch_initial_mark_key(b->c, b->level + 1, &b->key);
|
||||||
|
|
||||||
if (b->level) {
|
if (b->level) {
|
||||||
bch_btree_iter_stack_init(&b->keys, &iter, NULL);
|
bch_btree_iter_init(&b->keys, &iter, NULL);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
|
k = bch_btree_iter_next_filter(&iter, &b->keys,
|
||||||
bch_ptr_bad);
|
bch_ptr_bad);
|
||||||
if (k) {
|
if (k) {
|
||||||
btree_node_prefetch(b, k);
|
btree_node_prefetch(b, k);
|
||||||
@ -1956,7 +1962,7 @@ static int bch_btree_check_thread(void *arg)
|
|||||||
struct btree_check_info *info = arg;
|
struct btree_check_info *info = arg;
|
||||||
struct btree_check_state *check_state = info->state;
|
struct btree_check_state *check_state = info->state;
|
||||||
struct cache_set *c = check_state->c;
|
struct cache_set *c = check_state->c;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
struct bkey *k, *p;
|
struct bkey *k, *p;
|
||||||
int cur_idx, prev_idx, skip_nr;
|
int cur_idx, prev_idx, skip_nr;
|
||||||
|
|
||||||
@ -1964,9 +1970,11 @@ static int bch_btree_check_thread(void *arg)
|
|||||||
cur_idx = prev_idx = 0;
|
cur_idx = prev_idx = 0;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
/* root node keys are checked before thread created */
|
/* root node keys are checked before thread created */
|
||||||
bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
|
bch_btree_iter_init(&c->root->keys, &iter, NULL);
|
||||||
k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
|
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
|
||||||
BUG_ON(!k);
|
BUG_ON(!k);
|
||||||
|
|
||||||
p = k;
|
p = k;
|
||||||
@ -1984,7 +1992,7 @@ static int bch_btree_check_thread(void *arg)
|
|||||||
skip_nr = cur_idx - prev_idx;
|
skip_nr = cur_idx - prev_idx;
|
||||||
|
|
||||||
while (skip_nr) {
|
while (skip_nr) {
|
||||||
k = bch_btree_iter_next_filter(&iter.iter,
|
k = bch_btree_iter_next_filter(&iter,
|
||||||
&c->root->keys,
|
&c->root->keys,
|
||||||
bch_ptr_bad);
|
bch_ptr_bad);
|
||||||
if (k)
|
if (k)
|
||||||
@ -2057,9 +2065,11 @@ int bch_btree_check(struct cache_set *c)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
int i;
|
int i;
|
||||||
struct bkey *k = NULL;
|
struct bkey *k = NULL;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
struct btree_check_state check_state;
|
struct btree_check_state check_state;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
/* check and mark root node keys */
|
/* check and mark root node keys */
|
||||||
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
|
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
|
||||||
bch_initial_mark_key(c, c->root->level, k);
|
bch_initial_mark_key(c, c->root->level, k);
|
||||||
@ -2553,11 +2563,12 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
|
|||||||
|
|
||||||
if (b->level) {
|
if (b->level) {
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
bch_btree_iter_stack_init(&b->keys, &iter, from);
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
bch_btree_iter_init(&b->keys, &iter, from);
|
||||||
|
|
||||||
while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
|
while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
|
||||||
bch_ptr_bad))) {
|
bch_ptr_bad))) {
|
||||||
ret = bcache_btree(map_nodes_recurse, k, b,
|
ret = bcache_btree(map_nodes_recurse, k, b,
|
||||||
op, from, fn, flags);
|
op, from, fn, flags);
|
||||||
@ -2586,12 +2597,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
|||||||
{
|
{
|
||||||
int ret = MAP_CONTINUE;
|
int ret = MAP_CONTINUE;
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
bch_btree_iter_stack_init(&b->keys, &iter, from);
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
bch_btree_iter_init(&b->keys, &iter, from);
|
||||||
|
|
||||||
while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
|
while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
|
||||||
bch_ptr_bad))) {
|
|
||||||
ret = !b->level
|
ret = !b->level
|
||||||
? fn(op, b, k)
|
? fn(op, b, k)
|
||||||
: bcache_btree(map_keys_recurse, k,
|
: bcache_btree(map_keys_recurse, k,
|
||||||
|
@ -33,15 +33,16 @@ static void sort_key_next(struct btree_iter *iter,
|
|||||||
i->k = bkey_next(i->k);
|
i->k = bkey_next(i->k);
|
||||||
|
|
||||||
if (i->k == i->end)
|
if (i->k == i->end)
|
||||||
*i = iter->data[--iter->used];
|
*i = iter->heap.data[--iter->heap.nr];
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bch_key_sort_cmp(struct btree_iter_set l,
|
static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
|
||||||
struct btree_iter_set r)
|
|
||||||
{
|
{
|
||||||
int64_t c = bkey_cmp(l.k, r.k);
|
struct btree_iter_set *_l = (struct btree_iter_set *)l;
|
||||||
|
struct btree_iter_set *_r = (struct btree_iter_set *)r;
|
||||||
|
int64_t c = bkey_cmp(_l->k, _r->k);
|
||||||
|
|
||||||
return c ? c > 0 : l.k < r.k;
|
return !(c ? c > 0 : _l->k < _r->k);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
|
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
|
||||||
@ -238,7 +239,7 @@ static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const struct btree_keys_ops bch_btree_keys_ops = {
|
const struct btree_keys_ops bch_btree_keys_ops = {
|
||||||
.sort_cmp = bch_key_sort_cmp,
|
.sort_cmp = new_bch_key_sort_cmp,
|
||||||
.insert_fixup = bch_btree_ptr_insert_fixup,
|
.insert_fixup = bch_btree_ptr_insert_fixup,
|
||||||
.key_invalid = bch_btree_ptr_invalid,
|
.key_invalid = bch_btree_ptr_invalid,
|
||||||
.key_bad = bch_btree_ptr_bad,
|
.key_bad = bch_btree_ptr_bad,
|
||||||
@ -255,22 +256,36 @@ const struct btree_keys_ops bch_btree_keys_ops = {
|
|||||||
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
|
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
|
||||||
* equal in different sets, we have to process them newest to oldest.
|
* equal in different sets, we have to process them newest to oldest.
|
||||||
*/
|
*/
|
||||||
static bool bch_extent_sort_cmp(struct btree_iter_set l,
|
|
||||||
struct btree_iter_set r)
|
|
||||||
{
|
|
||||||
int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
|
|
||||||
|
|
||||||
return c ? c > 0 : l.k < r.k;
|
static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
|
||||||
|
{
|
||||||
|
struct btree_iter_set *_l = (struct btree_iter_set *)l;
|
||||||
|
struct btree_iter_set *_r = (struct btree_iter_set *)r;
|
||||||
|
int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
|
||||||
|
|
||||||
|
return !(c ? c > 0 : _l->k < _r->k);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
|
||||||
|
{
|
||||||
|
struct btree_iter_set *_iter1 = iter1;
|
||||||
|
struct btree_iter_set *_iter2 = iter2;
|
||||||
|
|
||||||
|
swap(*_iter1, *_iter2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
||||||
struct bkey *tmp)
|
struct bkey *tmp)
|
||||||
{
|
{
|
||||||
while (iter->used > 1) {
|
const struct min_heap_callbacks callbacks = {
|
||||||
struct btree_iter_set *top = iter->data, *i = top + 1;
|
.less = new_bch_extent_sort_cmp,
|
||||||
|
.swp = new_btree_iter_swap,
|
||||||
|
};
|
||||||
|
while (iter->heap.nr > 1) {
|
||||||
|
struct btree_iter_set *top = iter->heap.data, *i = top + 1;
|
||||||
|
|
||||||
if (iter->used > 2 &&
|
if (iter->heap.nr > 2 &&
|
||||||
bch_extent_sort_cmp(i[0], i[1]))
|
!new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
|
||||||
i++;
|
i++;
|
||||||
|
|
||||||
if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
|
if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
|
||||||
@ -278,7 +293,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
|||||||
|
|
||||||
if (!KEY_SIZE(i->k)) {
|
if (!KEY_SIZE(i->k)) {
|
||||||
sort_key_next(iter, i);
|
sort_key_next(iter, i);
|
||||||
heap_sift(iter, i - top, bch_extent_sort_cmp);
|
min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,7 +303,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
|||||||
else
|
else
|
||||||
bch_cut_front(top->k, i->k);
|
bch_cut_front(top->k, i->k);
|
||||||
|
|
||||||
heap_sift(iter, i - top, bch_extent_sort_cmp);
|
min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
|
||||||
} else {
|
} else {
|
||||||
/* can't happen because of comparison func */
|
/* can't happen because of comparison func */
|
||||||
BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
|
BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
|
||||||
@ -298,7 +313,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
|||||||
|
|
||||||
bch_cut_back(&START_KEY(i->k), tmp);
|
bch_cut_back(&START_KEY(i->k), tmp);
|
||||||
bch_cut_front(i->k, top->k);
|
bch_cut_front(i->k, top->k);
|
||||||
heap_sift(iter, 0, bch_extent_sort_cmp);
|
min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
|
||||||
|
|
||||||
return tmp;
|
return tmp;
|
||||||
} else {
|
} else {
|
||||||
@ -618,7 +633,7 @@ static bool bch_extent_merge(struct btree_keys *bk,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const struct btree_keys_ops bch_extent_keys_ops = {
|
const struct btree_keys_ops bch_extent_keys_ops = {
|
||||||
.sort_cmp = bch_extent_sort_cmp,
|
.sort_cmp = new_bch_extent_sort_cmp,
|
||||||
.sort_fixup = bch_extent_sort_fixup,
|
.sort_fixup = bch_extent_sort_fixup,
|
||||||
.insert_fixup = bch_extent_insert_fixup,
|
.insert_fixup = bch_extent_insert_fixup,
|
||||||
.key_invalid = bch_extent_invalid,
|
.key_invalid = bch_extent_invalid,
|
||||||
|
@ -182,16 +182,27 @@ err: if (!IS_ERR_OR_NULL(w->private))
|
|||||||
closure_sync(&cl);
|
closure_sync(&cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bucket_cmp(struct bucket *l, struct bucket *r)
|
static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *args)
|
||||||
{
|
{
|
||||||
return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
|
struct bucket **_l = (struct bucket **)l;
|
||||||
|
struct bucket **_r = (struct bucket **)r;
|
||||||
|
|
||||||
|
return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void new_bucket_swap(void *l, void *r, void __always_unused *args)
|
||||||
|
{
|
||||||
|
struct bucket **_l = l;
|
||||||
|
struct bucket **_r = r;
|
||||||
|
|
||||||
|
swap(*_l, *_r);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int bucket_heap_top(struct cache *ca)
|
static unsigned int bucket_heap_top(struct cache *ca)
|
||||||
{
|
{
|
||||||
struct bucket *b;
|
struct bucket *b;
|
||||||
|
|
||||||
return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
|
return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch_moving_gc(struct cache_set *c)
|
void bch_moving_gc(struct cache_set *c)
|
||||||
@ -199,6 +210,10 @@ void bch_moving_gc(struct cache_set *c)
|
|||||||
struct cache *ca = c->cache;
|
struct cache *ca = c->cache;
|
||||||
struct bucket *b;
|
struct bucket *b;
|
||||||
unsigned long sectors_to_move, reserve_sectors;
|
unsigned long sectors_to_move, reserve_sectors;
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = new_bucket_cmp,
|
||||||
|
.swp = new_bucket_swap,
|
||||||
|
};
|
||||||
|
|
||||||
if (!c->copy_gc_enabled)
|
if (!c->copy_gc_enabled)
|
||||||
return;
|
return;
|
||||||
@ -209,7 +224,7 @@ void bch_moving_gc(struct cache_set *c)
|
|||||||
reserve_sectors = ca->sb.bucket_size *
|
reserve_sectors = ca->sb.bucket_size *
|
||||||
fifo_used(&ca->free[RESERVE_MOVINGGC]);
|
fifo_used(&ca->free[RESERVE_MOVINGGC]);
|
||||||
|
|
||||||
ca->heap.used = 0;
|
ca->heap.nr = 0;
|
||||||
|
|
||||||
for_each_bucket(b, ca) {
|
for_each_bucket(b, ca) {
|
||||||
if (GC_MARK(b) == GC_MARK_METADATA ||
|
if (GC_MARK(b) == GC_MARK_METADATA ||
|
||||||
@ -218,25 +233,31 @@ void bch_moving_gc(struct cache_set *c)
|
|||||||
atomic_read(&b->pin))
|
atomic_read(&b->pin))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!heap_full(&ca->heap)) {
|
if (!min_heap_full(&ca->heap)) {
|
||||||
sectors_to_move += GC_SECTORS_USED(b);
|
sectors_to_move += GC_SECTORS_USED(b);
|
||||||
heap_add(&ca->heap, b, bucket_cmp);
|
min_heap_push(&ca->heap, &b, &callbacks, NULL);
|
||||||
} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
|
} else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) {
|
||||||
sectors_to_move -= bucket_heap_top(ca);
|
sectors_to_move -= bucket_heap_top(ca);
|
||||||
sectors_to_move += GC_SECTORS_USED(b);
|
sectors_to_move += GC_SECTORS_USED(b);
|
||||||
|
|
||||||
ca->heap.data[0] = b;
|
ca->heap.data[0] = b;
|
||||||
heap_sift(&ca->heap, 0, bucket_cmp);
|
min_heap_sift_down(&ca->heap, 0, &callbacks, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (sectors_to_move > reserve_sectors) {
|
while (sectors_to_move > reserve_sectors) {
|
||||||
heap_pop(&ca->heap, b, bucket_cmp);
|
if (ca->heap.nr) {
|
||||||
|
b = min_heap_peek(&ca->heap)[0];
|
||||||
|
min_heap_pop(&ca->heap, &callbacks, NULL);
|
||||||
|
}
|
||||||
sectors_to_move -= GC_SECTORS_USED(b);
|
sectors_to_move -= GC_SECTORS_USED(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (heap_pop(&ca->heap, b, bucket_cmp))
|
while (ca->heap.nr) {
|
||||||
|
b = min_heap_peek(&ca->heap)[0];
|
||||||
|
min_heap_pop(&ca->heap, &callbacks, NULL);
|
||||||
SET_GC_MOVE(b, 1);
|
SET_GC_MOVE(b, 1);
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&c->bucket_lock);
|
mutex_unlock(&c->bucket_lock);
|
||||||
|
|
||||||
|
@ -1907,8 +1907,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
|
|||||||
INIT_LIST_HEAD(&c->btree_cache_freed);
|
INIT_LIST_HEAD(&c->btree_cache_freed);
|
||||||
INIT_LIST_HEAD(&c->data_buckets);
|
INIT_LIST_HEAD(&c->data_buckets);
|
||||||
|
|
||||||
iter_size = sizeof(struct btree_iter) +
|
iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
|
||||||
((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
|
|
||||||
sizeof(struct btree_iter_set);
|
sizeof(struct btree_iter_set);
|
||||||
|
|
||||||
c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
|
c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
|
||||||
|
@ -660,7 +660,9 @@ static unsigned int bch_root_usage(struct cache_set *c)
|
|||||||
unsigned int bytes = 0;
|
unsigned int bytes = 0;
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct btree *b;
|
struct btree *b;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
goto lock_root;
|
goto lock_root;
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
/*
|
/*
|
||||||
* random utiility code, for bcache but in theory not specific to bcache
|
* random utility code, for bcache but in theory not specific to bcache
|
||||||
*
|
*
|
||||||
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
||||||
* Copyright 2012 Google, Inc.
|
* Copyright 2012 Google, Inc.
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/sched/clock.h>
|
#include <linux/sched/clock.h>
|
||||||
#include <linux/llist.h>
|
#include <linux/llist.h>
|
||||||
|
#include <linux/min_heap.h>
|
||||||
#include <linux/ratelimit.h>
|
#include <linux/ratelimit.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
@ -30,16 +31,10 @@ struct closure;
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define DECLARE_HEAP(type, name) \
|
|
||||||
struct { \
|
|
||||||
size_t size, used; \
|
|
||||||
type *data; \
|
|
||||||
} name
|
|
||||||
|
|
||||||
#define init_heap(heap, _size, gfp) \
|
#define init_heap(heap, _size, gfp) \
|
||||||
({ \
|
({ \
|
||||||
size_t _bytes; \
|
size_t _bytes; \
|
||||||
(heap)->used = 0; \
|
(heap)->nr = 0; \
|
||||||
(heap)->size = (_size); \
|
(heap)->size = (_size); \
|
||||||
_bytes = (heap)->size * sizeof(*(heap)->data); \
|
_bytes = (heap)->size * sizeof(*(heap)->data); \
|
||||||
(heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
|
(heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
|
||||||
@ -52,64 +47,6 @@ do { \
|
|||||||
(heap)->data = NULL; \
|
(heap)->data = NULL; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
|
|
||||||
|
|
||||||
#define heap_sift(h, i, cmp) \
|
|
||||||
do { \
|
|
||||||
size_t _r, _j = i; \
|
|
||||||
\
|
|
||||||
for (; _j * 2 + 1 < (h)->used; _j = _r) { \
|
|
||||||
_r = _j * 2 + 1; \
|
|
||||||
if (_r + 1 < (h)->used && \
|
|
||||||
cmp((h)->data[_r], (h)->data[_r + 1])) \
|
|
||||||
_r++; \
|
|
||||||
\
|
|
||||||
if (cmp((h)->data[_r], (h)->data[_j])) \
|
|
||||||
break; \
|
|
||||||
heap_swap(h, _r, _j); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define heap_sift_down(h, i, cmp) \
|
|
||||||
do { \
|
|
||||||
while (i) { \
|
|
||||||
size_t p = (i - 1) / 2; \
|
|
||||||
if (cmp((h)->data[i], (h)->data[p])) \
|
|
||||||
break; \
|
|
||||||
heap_swap(h, i, p); \
|
|
||||||
i = p; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define heap_add(h, d, cmp) \
|
|
||||||
({ \
|
|
||||||
bool _r = !heap_full(h); \
|
|
||||||
if (_r) { \
|
|
||||||
size_t _i = (h)->used++; \
|
|
||||||
(h)->data[_i] = d; \
|
|
||||||
\
|
|
||||||
heap_sift_down(h, _i, cmp); \
|
|
||||||
heap_sift(h, _i, cmp); \
|
|
||||||
} \
|
|
||||||
_r; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define heap_pop(h, d, cmp) \
|
|
||||||
({ \
|
|
||||||
bool _r = (h)->used; \
|
|
||||||
if (_r) { \
|
|
||||||
(d) = (h)->data[0]; \
|
|
||||||
(h)->used--; \
|
|
||||||
heap_swap(h, 0, (h)->used); \
|
|
||||||
heap_sift(h, 0, cmp); \
|
|
||||||
} \
|
|
||||||
_r; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
|
|
||||||
|
|
||||||
#define heap_full(h) ((h)->used == (h)->size)
|
|
||||||
|
|
||||||
#define DECLARE_FIFO(type, name) \
|
#define DECLARE_FIFO(type, name) \
|
||||||
struct { \
|
struct { \
|
||||||
size_t front, back, size, mask; \
|
size_t front, back, size, mask; \
|
||||||
|
@ -908,15 +908,16 @@ static int bch_dirty_init_thread(void *arg)
|
|||||||
struct dirty_init_thrd_info *info = arg;
|
struct dirty_init_thrd_info *info = arg;
|
||||||
struct bch_dirty_init_state *state = info->state;
|
struct bch_dirty_init_state *state = info->state;
|
||||||
struct cache_set *c = state->c;
|
struct cache_set *c = state->c;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
struct bkey *k, *p;
|
struct bkey *k, *p;
|
||||||
int cur_idx, prev_idx, skip_nr;
|
int cur_idx, prev_idx, skip_nr;
|
||||||
|
|
||||||
k = p = NULL;
|
k = p = NULL;
|
||||||
prev_idx = 0;
|
prev_idx = 0;
|
||||||
|
|
||||||
bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
|
bch_btree_iter_init(&c->root->keys, &iter, NULL);
|
||||||
|
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
|
||||||
BUG_ON(!k);
|
BUG_ON(!k);
|
||||||
|
|
||||||
p = k;
|
p = k;
|
||||||
@ -930,7 +931,7 @@ static int bch_dirty_init_thread(void *arg)
|
|||||||
skip_nr = cur_idx - prev_idx;
|
skip_nr = cur_idx - prev_idx;
|
||||||
|
|
||||||
while (skip_nr) {
|
while (skip_nr) {
|
||||||
k = bch_btree_iter_next_filter(&iter.iter,
|
k = bch_btree_iter_next_filter(&iter,
|
||||||
&c->root->keys,
|
&c->root->keys,
|
||||||
bch_ptr_bad);
|
bch_ptr_bad);
|
||||||
if (k)
|
if (k)
|
||||||
@ -979,11 +980,13 @@ void bch_sectors_dirty_init(struct bcache_device *d)
|
|||||||
int i;
|
int i;
|
||||||
struct btree *b = NULL;
|
struct btree *b = NULL;
|
||||||
struct bkey *k = NULL;
|
struct bkey *k = NULL;
|
||||||
struct btree_iter_stack iter;
|
struct btree_iter iter;
|
||||||
struct sectors_dirty_init op;
|
struct sectors_dirty_init op;
|
||||||
struct cache_set *c = d->c;
|
struct cache_set *c = d->c;
|
||||||
struct bch_dirty_init_state state;
|
struct bch_dirty_init_state state;
|
||||||
|
|
||||||
|
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||||
|
|
||||||
retry_lock:
|
retry_lock:
|
||||||
b = c->root;
|
b = c->root;
|
||||||
rw_lock(0, b, b->level);
|
rw_lock(0, b, b->level);
|
||||||
|
@ -51,6 +51,8 @@ struct recovery_point {
|
|||||||
bool increment_applied;
|
bool increment_applied;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
DEFINE_MIN_HEAP(struct numbered_block_mapping, replay_heap);
|
||||||
|
|
||||||
struct repair_completion {
|
struct repair_completion {
|
||||||
/* The completion header */
|
/* The completion header */
|
||||||
struct vdo_completion completion;
|
struct vdo_completion completion;
|
||||||
@ -97,7 +99,7 @@ struct repair_completion {
|
|||||||
* order, then original journal order. This permits efficient iteration over the journal
|
* order, then original journal order. This permits efficient iteration over the journal
|
||||||
* entries in order.
|
* entries in order.
|
||||||
*/
|
*/
|
||||||
struct min_heap replay_heap;
|
struct replay_heap replay_heap;
|
||||||
/* Fields tracking progress through the journal entries. */
|
/* Fields tracking progress through the journal entries. */
|
||||||
struct numbered_block_mapping *current_entry;
|
struct numbered_block_mapping *current_entry;
|
||||||
struct numbered_block_mapping *current_unfetched_entry;
|
struct numbered_block_mapping *current_unfetched_entry;
|
||||||
@ -135,7 +137,7 @@ struct repair_completion {
|
|||||||
* to sort by slot while still ensuring we replay all entries with the same slot in the exact order
|
* to sort by slot while still ensuring we replay all entries with the same slot in the exact order
|
||||||
* as they appeared in the journal.
|
* as they appeared in the journal.
|
||||||
*/
|
*/
|
||||||
static bool mapping_is_less_than(const void *item1, const void *item2)
|
static bool mapping_is_less_than(const void *item1, const void *item2, void __always_unused *args)
|
||||||
{
|
{
|
||||||
const struct numbered_block_mapping *mapping1 =
|
const struct numbered_block_mapping *mapping1 =
|
||||||
(const struct numbered_block_mapping *) item1;
|
(const struct numbered_block_mapping *) item1;
|
||||||
@ -154,7 +156,7 @@ static bool mapping_is_less_than(const void *item1, const void *item2)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void swap_mappings(void *item1, void *item2)
|
static void swap_mappings(void *item1, void *item2, void __always_unused *args)
|
||||||
{
|
{
|
||||||
struct numbered_block_mapping *mapping1 = item1;
|
struct numbered_block_mapping *mapping1 = item1;
|
||||||
struct numbered_block_mapping *mapping2 = item2;
|
struct numbered_block_mapping *mapping2 = item2;
|
||||||
@ -163,14 +165,13 @@ static void swap_mappings(void *item1, void *item2)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct min_heap_callbacks repair_min_heap = {
|
static const struct min_heap_callbacks repair_min_heap = {
|
||||||
.elem_size = sizeof(struct numbered_block_mapping),
|
|
||||||
.less = mapping_is_less_than,
|
.less = mapping_is_less_than,
|
||||||
.swp = swap_mappings,
|
.swp = swap_mappings,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair)
|
static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair)
|
||||||
{
|
{
|
||||||
struct min_heap *heap = &repair->replay_heap;
|
struct replay_heap *heap = &repair->replay_heap;
|
||||||
struct numbered_block_mapping *last;
|
struct numbered_block_mapping *last;
|
||||||
|
|
||||||
if (heap->nr == 0)
|
if (heap->nr == 0)
|
||||||
@ -181,8 +182,8 @@ static struct numbered_block_mapping *sort_next_heap_element(struct repair_compl
|
|||||||
* restore the heap invariant, and return a pointer to the popped element.
|
* restore the heap invariant, and return a pointer to the popped element.
|
||||||
*/
|
*/
|
||||||
last = &repair->entries[--heap->nr];
|
last = &repair->entries[--heap->nr];
|
||||||
swap_mappings(heap->data, last);
|
swap_mappings(heap->data, last, NULL);
|
||||||
min_heapify(heap, 0, &repair_min_heap);
|
min_heap_sift_down(heap, 0, &repair_min_heap, NULL);
|
||||||
return last;
|
return last;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1116,12 +1117,12 @@ static void recover_block_map(struct vdo_completion *completion)
|
|||||||
* Organize the journal entries into a binary heap so we can iterate over them in sorted
|
* Organize the journal entries into a binary heap so we can iterate over them in sorted
|
||||||
* order incrementally, avoiding an expensive sort call.
|
* order incrementally, avoiding an expensive sort call.
|
||||||
*/
|
*/
|
||||||
repair->replay_heap = (struct min_heap) {
|
repair->replay_heap = (struct replay_heap) {
|
||||||
.data = repair->entries,
|
.data = repair->entries,
|
||||||
.nr = repair->block_map_entry_count,
|
.nr = repair->block_map_entry_count,
|
||||||
.size = repair->block_map_entry_count,
|
.size = repair->block_map_entry_count,
|
||||||
};
|
};
|
||||||
min_heapify_all(&repair->replay_heap, &repair_min_heap);
|
min_heapify_all(&repair->replay_heap, &repair_min_heap, NULL);
|
||||||
|
|
||||||
vdo_log_info("Replaying %zu recovery entries into block map",
|
vdo_log_info("Replaying %zu recovery entries into block map",
|
||||||
repair->block_map_entry_count);
|
repair->block_map_entry_count);
|
||||||
|
@ -3288,7 +3288,8 @@ int vdo_release_block_reference(struct block_allocator *allocator,
|
|||||||
* Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
|
* Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
|
||||||
* before larger ones.
|
* before larger ones.
|
||||||
*/
|
*/
|
||||||
static bool slab_status_is_less_than(const void *item1, const void *item2)
|
static bool slab_status_is_less_than(const void *item1, const void *item2,
|
||||||
|
void __always_unused *args)
|
||||||
{
|
{
|
||||||
const struct slab_status *info1 = item1;
|
const struct slab_status *info1 = item1;
|
||||||
const struct slab_status *info2 = item2;
|
const struct slab_status *info2 = item2;
|
||||||
@ -3300,7 +3301,7 @@ static bool slab_status_is_less_than(const void *item1, const void *item2)
|
|||||||
return info1->slab_number < info2->slab_number;
|
return info1->slab_number < info2->slab_number;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void swap_slab_statuses(void *item1, void *item2)
|
static void swap_slab_statuses(void *item1, void *item2, void __always_unused *args)
|
||||||
{
|
{
|
||||||
struct slab_status *info1 = item1;
|
struct slab_status *info1 = item1;
|
||||||
struct slab_status *info2 = item2;
|
struct slab_status *info2 = item2;
|
||||||
@ -3309,7 +3310,6 @@ static void swap_slab_statuses(void *item1, void *item2)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct min_heap_callbacks slab_status_min_heap = {
|
static const struct min_heap_callbacks slab_status_min_heap = {
|
||||||
.elem_size = sizeof(struct slab_status),
|
|
||||||
.less = slab_status_is_less_than,
|
.less = slab_status_is_less_than,
|
||||||
.swp = swap_slab_statuses,
|
.swp = swap_slab_statuses,
|
||||||
};
|
};
|
||||||
@ -3509,7 +3509,7 @@ static int get_slab_statuses(struct block_allocator *allocator,
|
|||||||
static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator *allocator)
|
static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator *allocator)
|
||||||
{
|
{
|
||||||
struct slab_status current_slab_status;
|
struct slab_status current_slab_status;
|
||||||
struct min_heap heap;
|
DEFINE_MIN_HEAP(struct slab_status, heap) heap;
|
||||||
int result;
|
int result;
|
||||||
struct slab_status *slab_statuses;
|
struct slab_status *slab_statuses;
|
||||||
struct slab_depot *depot = allocator->depot;
|
struct slab_depot *depot = allocator->depot;
|
||||||
@ -3521,12 +3521,12 @@ static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator
|
|||||||
return result;
|
return result;
|
||||||
|
|
||||||
/* Sort the slabs by cleanliness, then by emptiness hint. */
|
/* Sort the slabs by cleanliness, then by emptiness hint. */
|
||||||
heap = (struct min_heap) {
|
heap = (struct heap) {
|
||||||
.data = slab_statuses,
|
.data = slab_statuses,
|
||||||
.nr = allocator->slab_count,
|
.nr = allocator->slab_count,
|
||||||
.size = allocator->slab_count,
|
.size = allocator->slab_count,
|
||||||
};
|
};
|
||||||
min_heapify_all(&heap, &slab_status_min_heap);
|
min_heapify_all(&heap, &slab_status_min_heap, NULL);
|
||||||
|
|
||||||
while (heap.nr > 0) {
|
while (heap.nr > 0) {
|
||||||
bool high_priority;
|
bool high_priority;
|
||||||
@ -3534,7 +3534,7 @@ static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator
|
|||||||
struct slab_journal *journal;
|
struct slab_journal *journal;
|
||||||
|
|
||||||
current_slab_status = slab_statuses[0];
|
current_slab_status = slab_statuses[0];
|
||||||
min_heap_pop(&heap, &slab_status_min_heap);
|
min_heap_pop(&heap, &slab_status_min_heap, NULL);
|
||||||
slab = depot->slabs[current_slab_status.slab_number];
|
slab = depot->slabs[current_slab_status.slab_number];
|
||||||
|
|
||||||
if ((depot->load_type == VDO_SLAB_DEPOT_REBUILD_LOAD) ||
|
if ((depot->load_type == VDO_SLAB_DEPOT_REBUILD_LOAD) ||
|
||||||
|
@ -1286,7 +1286,7 @@ int most_register_interface(struct most_interface *iface)
|
|||||||
!iface->poison_channel || (iface->num_channels > MAX_CHANNELS))
|
!iface->poison_channel || (iface->num_channels > MAX_CHANNELS))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
|
id = ida_alloc(&mdev_id, GFP_KERNEL);
|
||||||
if (id < 0) {
|
if (id < 0) {
|
||||||
dev_err(iface->dev, "Failed to allocate device ID\n");
|
dev_err(iface->dev, "Failed to allocate device ID\n");
|
||||||
return id;
|
return id;
|
||||||
@ -1294,7 +1294,7 @@ int most_register_interface(struct most_interface *iface)
|
|||||||
|
|
||||||
iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
|
iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
|
||||||
if (!iface->p) {
|
if (!iface->p) {
|
||||||
ida_simple_remove(&mdev_id, id);
|
ida_free(&mdev_id, id);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1308,7 +1308,7 @@ int most_register_interface(struct most_interface *iface)
|
|||||||
dev_err(iface->dev, "Failed to register interface device\n");
|
dev_err(iface->dev, "Failed to register interface device\n");
|
||||||
kfree(iface->p);
|
kfree(iface->p);
|
||||||
put_device(iface->dev);
|
put_device(iface->dev);
|
||||||
ida_simple_remove(&mdev_id, id);
|
ida_free(&mdev_id, id);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1366,7 +1366,7 @@ int most_register_interface(struct most_interface *iface)
|
|||||||
}
|
}
|
||||||
kfree(iface->p);
|
kfree(iface->p);
|
||||||
device_unregister(iface->dev);
|
device_unregister(iface->dev);
|
||||||
ida_simple_remove(&mdev_id, id);
|
ida_free(&mdev_id, id);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(most_register_interface);
|
EXPORT_SYMBOL_GPL(most_register_interface);
|
||||||
@ -1397,7 +1397,7 @@ void most_deregister_interface(struct most_interface *iface)
|
|||||||
device_unregister(&c->dev);
|
device_unregister(&c->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
ida_simple_remove(&mdev_id, iface->p->dev_id);
|
ida_free(&mdev_id, iface->p->dev_id);
|
||||||
kfree(iface->p);
|
kfree(iface->p);
|
||||||
device_unregister(iface->dev);
|
device_unregister(iface->dev);
|
||||||
}
|
}
|
||||||
|
@ -100,7 +100,7 @@ static void destroy_cdev(struct comp_channel *c)
|
|||||||
|
|
||||||
static void destroy_channel(struct comp_channel *c)
|
static void destroy_channel(struct comp_channel *c)
|
||||||
{
|
{
|
||||||
ida_simple_remove(&comp.minor_id, MINOR(c->devno));
|
ida_free(&comp.minor_id, MINOR(c->devno));
|
||||||
kfifo_free(&c->fifo);
|
kfifo_free(&c->fifo);
|
||||||
kfree(c);
|
kfree(c);
|
||||||
}
|
}
|
||||||
@ -425,7 +425,7 @@ static int comp_probe(struct most_interface *iface, int channel_id,
|
|||||||
if (c)
|
if (c)
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
|
current_minor = ida_alloc(&comp.minor_id, GFP_KERNEL);
|
||||||
if (current_minor < 0)
|
if (current_minor < 0)
|
||||||
return current_minor;
|
return current_minor;
|
||||||
|
|
||||||
@ -472,7 +472,7 @@ static int comp_probe(struct most_interface *iface, int channel_id,
|
|||||||
err_free_c:
|
err_free_c:
|
||||||
kfree(c);
|
kfree(c);
|
||||||
err_remove_ida:
|
err_remove_ida:
|
||||||
ida_simple_remove(&comp.minor_id, current_minor);
|
ida_free(&comp.minor_id, current_minor);
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,7 +229,7 @@ struct acx_rx_msdu_lifetime {
|
|||||||
* === ==========
|
* === ==========
|
||||||
* 31:12 Reserved - Always equal to 0.
|
* 31:12 Reserved - Always equal to 0.
|
||||||
* 11 Association - When set, the WiLink receives all association
|
* 11 Association - When set, the WiLink receives all association
|
||||||
* related frames (association request/response, reassocation
|
* related frames (association request/response, reassociation
|
||||||
* request/response, and disassociation). When clear, these frames
|
* request/response, and disassociation). When clear, these frames
|
||||||
* are discarded.
|
* are discarded.
|
||||||
* 10 Auth/De auth - When set, the WiLink receives all authentication
|
* 10 Auth/De auth - When set, the WiLink receives all authentication
|
||||||
|
@ -2286,7 +2286,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
|
|||||||
* on.
|
* on.
|
||||||
*/
|
*/
|
||||||
if (!io_req)
|
if (!io_req)
|
||||||
/* If there is not io_req assocated with this CQE
|
/* If there is not io_req associated with this CQE
|
||||||
* just queue it on CPU 0
|
* just queue it on CPU 0
|
||||||
*/
|
*/
|
||||||
cpu = 0;
|
cpu = 0;
|
||||||
|
@ -979,7 +979,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
|
|||||||
left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
|
left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
|
||||||
pos = pframe + (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
|
pos = pframe + (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
|
||||||
|
|
||||||
/* check if this stat has been successfully authenticated/assocated */
|
/* check if this stat has been successfully authenticated/associated */
|
||||||
if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
|
if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
|
||||||
if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
|
if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
|
||||||
status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
|
status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
|
||||||
|
@ -452,7 +452,7 @@ void LPS_Enter(struct adapter *padapter, const char *msg)
|
|||||||
if (hal_btcoex_IsBtControlLps(padapter))
|
if (hal_btcoex_IsBtControlLps(padapter))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Skip lps enter request if number of assocated adapters is not 1 */
|
/* Skip lps enter request if number of associated adapters is not 1 */
|
||||||
if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE))
|
if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE))
|
||||||
n_assoc_iface++;
|
n_assoc_iface++;
|
||||||
if (n_assoc_iface != 1)
|
if (n_assoc_iface != 1)
|
||||||
|
@ -6,15 +6,29 @@
|
|||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/preempt.h>
|
#include <linux/preempt.h>
|
||||||
|
|
||||||
static inline long io_timer_cmp(io_timer_heap *h,
|
static inline bool io_timer_cmp(const void *l, const void *r, void __always_unused *args)
|
||||||
struct io_timer *l,
|
|
||||||
struct io_timer *r)
|
|
||||||
{
|
{
|
||||||
return l->expire - r->expire;
|
struct io_timer **_l = (struct io_timer **)l;
|
||||||
|
struct io_timer **_r = (struct io_timer **)r;
|
||||||
|
|
||||||
|
return (*_l)->expire < (*_r)->expire;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void io_timer_swp(void *l, void *r, void __always_unused *args)
|
||||||
|
{
|
||||||
|
struct io_timer **_l = (struct io_timer **)l;
|
||||||
|
struct io_timer **_r = (struct io_timer **)r;
|
||||||
|
|
||||||
|
swap(*_l, *_r);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
|
void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
|
||||||
{
|
{
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = io_timer_cmp,
|
||||||
|
.swp = io_timer_swp,
|
||||||
|
};
|
||||||
|
|
||||||
spin_lock(&clock->timer_lock);
|
spin_lock(&clock->timer_lock);
|
||||||
|
|
||||||
if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) {
|
if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) {
|
||||||
@ -23,22 +37,27 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < clock->timers.used; i++)
|
for (size_t i = 0; i < clock->timers.nr; i++)
|
||||||
if (clock->timers.data[i] == timer)
|
if (clock->timers.data[i] == timer)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
|
BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL));
|
||||||
out:
|
out:
|
||||||
spin_unlock(&clock->timer_lock);
|
spin_unlock(&clock->timer_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
|
void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
|
||||||
{
|
{
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = io_timer_cmp,
|
||||||
|
.swp = io_timer_swp,
|
||||||
|
};
|
||||||
|
|
||||||
spin_lock(&clock->timer_lock);
|
spin_lock(&clock->timer_lock);
|
||||||
|
|
||||||
for (size_t i = 0; i < clock->timers.used; i++)
|
for (size_t i = 0; i < clock->timers.nr; i++)
|
||||||
if (clock->timers.data[i] == timer) {
|
if (clock->timers.data[i] == timer) {
|
||||||
heap_del(&clock->timers, i, io_timer_cmp, NULL);
|
min_heap_del(&clock->timers, i, &callbacks, NULL);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,10 +142,17 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
|
|||||||
static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now)
|
static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now)
|
||||||
{
|
{
|
||||||
struct io_timer *ret = NULL;
|
struct io_timer *ret = NULL;
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = io_timer_cmp,
|
||||||
|
.swp = io_timer_swp,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (clock->timers.nr &&
|
||||||
|
time_after_eq64(now, clock->timers.data[0]->expire)) {
|
||||||
|
ret = *min_heap_peek(&clock->timers);
|
||||||
|
min_heap_pop(&clock->timers, &callbacks, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
if (clock->timers.used &&
|
|
||||||
time_after_eq64(now, clock->timers.data[0]->expire))
|
|
||||||
heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,7 +176,7 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
|
|||||||
printbuf_tabstop_push(out, 40);
|
printbuf_tabstop_push(out, 40);
|
||||||
prt_printf(out, "current time:\t%llu\n", now);
|
prt_printf(out, "current time:\t%llu\n", now);
|
||||||
|
|
||||||
for (unsigned i = 0; i < clock->timers.used; i++)
|
for (unsigned i = 0; i < clock->timers.nr; i++)
|
||||||
prt_printf(out, "%ps %ps:\t%llu\n",
|
prt_printf(out, "%ps %ps:\t%llu\n",
|
||||||
clock->timers.data[i]->fn,
|
clock->timers.data[i]->fn,
|
||||||
clock->timers.data[i]->fn2,
|
clock->timers.data[i]->fn2,
|
||||||
|
@ -24,7 +24,7 @@ struct io_timer {
|
|||||||
/* Amount to buffer up on a percpu counter */
|
/* Amount to buffer up on a percpu counter */
|
||||||
#define IO_CLOCK_PCPU_SECTORS 128
|
#define IO_CLOCK_PCPU_SECTORS 128
|
||||||
|
|
||||||
typedef HEAP(struct io_timer *) io_timer_heap;
|
typedef DEFINE_MIN_HEAP(struct io_timer *, io_timer_heap) io_timer_heap;
|
||||||
|
|
||||||
struct io_clock {
|
struct io_clock {
|
||||||
atomic64_t now;
|
atomic64_t now;
|
||||||
|
@ -901,8 +901,8 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
|
|||||||
|
|
||||||
mutex_lock(&c->ec_stripes_heap_lock);
|
mutex_lock(&c->ec_stripes_heap_lock);
|
||||||
if (n.size > h->size) {
|
if (n.size > h->size) {
|
||||||
memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
|
memcpy(n.data, h->data, h->nr * sizeof(h->data[0]));
|
||||||
n.used = h->used;
|
n.nr = h->nr;
|
||||||
swap(*h, n);
|
swap(*h, n);
|
||||||
}
|
}
|
||||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||||
@ -993,7 +993,7 @@ static u64 stripe_idx_to_delete(struct bch_fs *c)
|
|||||||
|
|
||||||
lockdep_assert_held(&c->ec_stripes_heap_lock);
|
lockdep_assert_held(&c->ec_stripes_heap_lock);
|
||||||
|
|
||||||
if (h->used &&
|
if (h->nr &&
|
||||||
h->data[0].blocks_nonempty == 0 &&
|
h->data[0].blocks_nonempty == 0 &&
|
||||||
!bch2_stripe_is_open(c, h->data[0].idx))
|
!bch2_stripe_is_open(c, h->data[0].idx))
|
||||||
return h->data[0].idx;
|
return h->data[0].idx;
|
||||||
@ -1001,14 +1001,6 @@ static u64 stripe_idx_to_delete(struct bch_fs *c)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
|
|
||||||
struct ec_stripe_heap_entry l,
|
|
||||||
struct ec_stripe_heap_entry r)
|
|
||||||
{
|
|
||||||
return ((l.blocks_nonempty > r.blocks_nonempty) -
|
|
||||||
(l.blocks_nonempty < r.blocks_nonempty));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
|
static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
|
||||||
size_t i)
|
size_t i)
|
||||||
{
|
{
|
||||||
@ -1017,39 +1009,71 @@ static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
|
|||||||
genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
|
genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool ec_stripes_heap_cmp(const void *l, const void *r, void __always_unused *args)
|
||||||
|
{
|
||||||
|
struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l;
|
||||||
|
struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r;
|
||||||
|
|
||||||
|
return ((_l->blocks_nonempty > _r->blocks_nonempty) <
|
||||||
|
(_l->blocks_nonempty < _r->blocks_nonempty));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ec_stripes_heap_swap(void *l, void *r, void *h)
|
||||||
|
{
|
||||||
|
struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l;
|
||||||
|
struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r;
|
||||||
|
ec_stripes_heap *_h = (ec_stripes_heap *)h;
|
||||||
|
size_t i = _l - _h->data;
|
||||||
|
size_t j = _r - _h->data;
|
||||||
|
|
||||||
|
swap(*_l, *_r);
|
||||||
|
|
||||||
|
ec_stripes_heap_set_backpointer(_h, i);
|
||||||
|
ec_stripes_heap_set_backpointer(_h, j);
|
||||||
|
}
|
||||||
|
|
||||||
static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
|
static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
|
||||||
{
|
{
|
||||||
ec_stripes_heap *h = &c->ec_stripes_heap;
|
ec_stripes_heap *h = &c->ec_stripes_heap;
|
||||||
struct stripe *m = genradix_ptr(&c->stripes, idx);
|
struct stripe *m = genradix_ptr(&c->stripes, idx);
|
||||||
|
|
||||||
BUG_ON(m->heap_idx >= h->used);
|
BUG_ON(m->heap_idx >= h->nr);
|
||||||
BUG_ON(h->data[m->heap_idx].idx != idx);
|
BUG_ON(h->data[m->heap_idx].idx != idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch2_stripes_heap_del(struct bch_fs *c,
|
void bch2_stripes_heap_del(struct bch_fs *c,
|
||||||
struct stripe *m, size_t idx)
|
struct stripe *m, size_t idx)
|
||||||
{
|
{
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = ec_stripes_heap_cmp,
|
||||||
|
.swp = ec_stripes_heap_swap,
|
||||||
|
};
|
||||||
|
|
||||||
mutex_lock(&c->ec_stripes_heap_lock);
|
mutex_lock(&c->ec_stripes_heap_lock);
|
||||||
heap_verify_backpointer(c, idx);
|
heap_verify_backpointer(c, idx);
|
||||||
|
|
||||||
heap_del(&c->ec_stripes_heap, m->heap_idx,
|
min_heap_del(&c->ec_stripes_heap, m->heap_idx, &callbacks, &c->ec_stripes_heap);
|
||||||
ec_stripes_heap_cmp,
|
|
||||||
ec_stripes_heap_set_backpointer);
|
|
||||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch2_stripes_heap_insert(struct bch_fs *c,
|
void bch2_stripes_heap_insert(struct bch_fs *c,
|
||||||
struct stripe *m, size_t idx)
|
struct stripe *m, size_t idx)
|
||||||
{
|
{
|
||||||
mutex_lock(&c->ec_stripes_heap_lock);
|
const struct min_heap_callbacks callbacks = {
|
||||||
BUG_ON(heap_full(&c->ec_stripes_heap));
|
.less = ec_stripes_heap_cmp,
|
||||||
|
.swp = ec_stripes_heap_swap,
|
||||||
|
};
|
||||||
|
|
||||||
heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
|
mutex_lock(&c->ec_stripes_heap_lock);
|
||||||
|
BUG_ON(min_heap_full(&c->ec_stripes_heap));
|
||||||
|
|
||||||
|
genradix_ptr(&c->stripes, idx)->heap_idx = c->ec_stripes_heap.nr;
|
||||||
|
min_heap_push(&c->ec_stripes_heap, &((struct ec_stripe_heap_entry) {
|
||||||
.idx = idx,
|
.idx = idx,
|
||||||
.blocks_nonempty = m->blocks_nonempty,
|
.blocks_nonempty = m->blocks_nonempty,
|
||||||
}),
|
}),
|
||||||
ec_stripes_heap_cmp,
|
&callbacks,
|
||||||
ec_stripes_heap_set_backpointer);
|
&c->ec_stripes_heap);
|
||||||
|
|
||||||
heap_verify_backpointer(c, idx);
|
heap_verify_backpointer(c, idx);
|
||||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||||
@ -1058,6 +1082,10 @@ void bch2_stripes_heap_insert(struct bch_fs *c,
|
|||||||
void bch2_stripes_heap_update(struct bch_fs *c,
|
void bch2_stripes_heap_update(struct bch_fs *c,
|
||||||
struct stripe *m, size_t idx)
|
struct stripe *m, size_t idx)
|
||||||
{
|
{
|
||||||
|
const struct min_heap_callbacks callbacks = {
|
||||||
|
.less = ec_stripes_heap_cmp,
|
||||||
|
.swp = ec_stripes_heap_swap,
|
||||||
|
};
|
||||||
ec_stripes_heap *h = &c->ec_stripes_heap;
|
ec_stripes_heap *h = &c->ec_stripes_heap;
|
||||||
bool do_deletes;
|
bool do_deletes;
|
||||||
size_t i;
|
size_t i;
|
||||||
@ -1068,10 +1096,8 @@ void bch2_stripes_heap_update(struct bch_fs *c,
|
|||||||
h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
|
h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
|
||||||
|
|
||||||
i = m->heap_idx;
|
i = m->heap_idx;
|
||||||
heap_sift_up(h, i, ec_stripes_heap_cmp,
|
min_heap_sift_up(h, i, &callbacks, &c->ec_stripes_heap);
|
||||||
ec_stripes_heap_set_backpointer);
|
min_heap_sift_down(h, i, &callbacks, &c->ec_stripes_heap);
|
||||||
heap_sift_down(h, i, ec_stripes_heap_cmp,
|
|
||||||
ec_stripes_heap_set_backpointer);
|
|
||||||
|
|
||||||
heap_verify_backpointer(c, idx);
|
heap_verify_backpointer(c, idx);
|
||||||
|
|
||||||
@ -1864,7 +1890,7 @@ static s64 get_existing_stripe(struct bch_fs *c,
|
|||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
mutex_lock(&c->ec_stripes_heap_lock);
|
mutex_lock(&c->ec_stripes_heap_lock);
|
||||||
for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
|
for (heap_idx = 0; heap_idx < h->nr; heap_idx++) {
|
||||||
/* No blocks worth reusing, stripe will just be deleted: */
|
/* No blocks worth reusing, stripe will just be deleted: */
|
||||||
if (!h->data[heap_idx].blocks_nonempty)
|
if (!h->data[heap_idx].blocks_nonempty)
|
||||||
continue;
|
continue;
|
||||||
@ -2195,7 +2221,7 @@ void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
|
|||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
mutex_lock(&c->ec_stripes_heap_lock);
|
mutex_lock(&c->ec_stripes_heap_lock);
|
||||||
for (i = 0; i < min_t(size_t, h->used, 50); i++) {
|
for (i = 0; i < min_t(size_t, h->nr, 50); i++) {
|
||||||
m = genradix_ptr(&c->stripes, h->data[i].idx);
|
m = genradix_ptr(&c->stripes, h->data[i].idx);
|
||||||
|
|
||||||
prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
|
prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
|
||||||
|
@ -36,6 +36,6 @@ struct ec_stripe_heap_entry {
|
|||||||
unsigned blocks_nonempty;
|
unsigned blocks_nonempty;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef HEAP(struct ec_stripe_heap_entry) ec_stripes_heap;
|
typedef DEFINE_MIN_HEAP(struct ec_stripe_heap_entry, ec_stripes_heap) ec_stripes_heap;
|
||||||
|
|
||||||
#endif /* _BCACHEFS_EC_TYPES_H */
|
#endif /* _BCACHEFS_EC_TYPES_H */
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
/*
|
/*
|
||||||
* random utiility code, for bcache but in theory not specific to bcache
|
* random utility code, for bcache but in theory not specific to bcache
|
||||||
*
|
*
|
||||||
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
||||||
* Copyright 2012 Google, Inc.
|
* Copyright 2012 Google, Inc.
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/freezer.h>
|
#include <linux/freezer.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/min_heap.h>
|
||||||
#include <linux/sched/clock.h>
|
#include <linux/sched/clock.h>
|
||||||
#include <linux/llist.h>
|
#include <linux/llist.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
@ -54,17 +55,9 @@ static inline size_t buf_pages(void *p, size_t len)
|
|||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define HEAP(type) \
|
|
||||||
struct { \
|
|
||||||
size_t size, used; \
|
|
||||||
type *data; \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DECLARE_HEAP(type, name) HEAP(type) name
|
|
||||||
|
|
||||||
#define init_heap(heap, _size, gfp) \
|
#define init_heap(heap, _size, gfp) \
|
||||||
({ \
|
({ \
|
||||||
(heap)->used = 0; \
|
(heap)->nr = 0; \
|
||||||
(heap)->size = (_size); \
|
(heap)->size = (_size); \
|
||||||
(heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\
|
(heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\
|
||||||
(gfp)); \
|
(gfp)); \
|
||||||
@ -76,113 +69,6 @@ do { \
|
|||||||
(heap)->data = NULL; \
|
(heap)->data = NULL; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define heap_set_backpointer(h, i, _fn) \
|
|
||||||
do { \
|
|
||||||
void (*fn)(typeof(h), size_t) = _fn; \
|
|
||||||
if (fn) \
|
|
||||||
fn(h, i); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define heap_swap(h, i, j, set_backpointer) \
|
|
||||||
do { \
|
|
||||||
swap((h)->data[i], (h)->data[j]); \
|
|
||||||
heap_set_backpointer(h, i, set_backpointer); \
|
|
||||||
heap_set_backpointer(h, j, set_backpointer); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define heap_peek(h) \
|
|
||||||
({ \
|
|
||||||
EBUG_ON(!(h)->used); \
|
|
||||||
(h)->data[0]; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define heap_full(h) ((h)->used == (h)->size)
|
|
||||||
|
|
||||||
#define heap_sift_down(h, i, cmp, set_backpointer) \
|
|
||||||
do { \
|
|
||||||
size_t _c, _j = i; \
|
|
||||||
\
|
|
||||||
for (; _j * 2 + 1 < (h)->used; _j = _c) { \
|
|
||||||
_c = _j * 2 + 1; \
|
|
||||||
if (_c + 1 < (h)->used && \
|
|
||||||
cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \
|
|
||||||
_c++; \
|
|
||||||
\
|
|
||||||
if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \
|
|
||||||
break; \
|
|
||||||
heap_swap(h, _c, _j, set_backpointer); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define heap_sift_up(h, i, cmp, set_backpointer) \
|
|
||||||
do { \
|
|
||||||
while (i) { \
|
|
||||||
size_t p = (i - 1) / 2; \
|
|
||||||
if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \
|
|
||||||
break; \
|
|
||||||
heap_swap(h, i, p, set_backpointer); \
|
|
||||||
i = p; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define __heap_add(h, d, cmp, set_backpointer) \
|
|
||||||
({ \
|
|
||||||
size_t _i = (h)->used++; \
|
|
||||||
(h)->data[_i] = d; \
|
|
||||||
heap_set_backpointer(h, _i, set_backpointer); \
|
|
||||||
\
|
|
||||||
heap_sift_up(h, _i, cmp, set_backpointer); \
|
|
||||||
_i; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define heap_add(h, d, cmp, set_backpointer) \
|
|
||||||
({ \
|
|
||||||
bool _r = !heap_full(h); \
|
|
||||||
if (_r) \
|
|
||||||
__heap_add(h, d, cmp, set_backpointer); \
|
|
||||||
_r; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define heap_add_or_replace(h, new, cmp, set_backpointer) \
|
|
||||||
do { \
|
|
||||||
if (!heap_add(h, new, cmp, set_backpointer) && \
|
|
||||||
cmp(h, new, heap_peek(h)) >= 0) { \
|
|
||||||
(h)->data[0] = new; \
|
|
||||||
heap_set_backpointer(h, 0, set_backpointer); \
|
|
||||||
heap_sift_down(h, 0, cmp, set_backpointer); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define heap_del(h, i, cmp, set_backpointer) \
|
|
||||||
do { \
|
|
||||||
size_t _i = (i); \
|
|
||||||
\
|
|
||||||
BUG_ON(_i >= (h)->used); \
|
|
||||||
(h)->used--; \
|
|
||||||
if ((_i) < (h)->used) { \
|
|
||||||
heap_swap(h, _i, (h)->used, set_backpointer); \
|
|
||||||
heap_sift_up(h, _i, cmp, set_backpointer); \
|
|
||||||
heap_sift_down(h, _i, cmp, set_backpointer); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define heap_pop(h, d, cmp, set_backpointer) \
|
|
||||||
({ \
|
|
||||||
bool _r = (h)->used; \
|
|
||||||
if (_r) { \
|
|
||||||
(d) = (h)->data[0]; \
|
|
||||||
heap_del(h, 0, cmp, set_backpointer); \
|
|
||||||
} \
|
|
||||||
_r; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define heap_resort(heap, cmp, set_backpointer) \
|
|
||||||
do { \
|
|
||||||
ssize_t _i; \
|
|
||||||
for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \
|
|
||||||
heap_sift_down(heap, _i, cmp, set_backpointer); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define ANYSINT_MAX(t) \
|
#define ANYSINT_MAX(t) \
|
||||||
((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
|
((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
|
||||||
|
|
||||||
|
@ -361,17 +361,16 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
|
|||||||
return ispipe;
|
return ispipe;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int zap_process(struct task_struct *start, int exit_code)
|
static int zap_process(struct signal_struct *signal, int exit_code)
|
||||||
{
|
{
|
||||||
struct task_struct *t;
|
struct task_struct *t;
|
||||||
int nr = 0;
|
int nr = 0;
|
||||||
|
|
||||||
/* Allow SIGKILL, see prepare_signal() */
|
signal->flags = SIGNAL_GROUP_EXIT;
|
||||||
start->signal->flags = SIGNAL_GROUP_EXIT;
|
signal->group_exit_code = exit_code;
|
||||||
start->signal->group_exit_code = exit_code;
|
signal->group_stop_count = 0;
|
||||||
start->signal->group_stop_count = 0;
|
|
||||||
|
|
||||||
for_each_thread(start, t) {
|
__for_each_thread(signal, t) {
|
||||||
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
|
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
|
||||||
if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
|
if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
|
||||||
sigaddset(&t->pending.signal, SIGKILL);
|
sigaddset(&t->pending.signal, SIGKILL);
|
||||||
@ -391,8 +390,9 @@ static int zap_threads(struct task_struct *tsk,
|
|||||||
|
|
||||||
spin_lock_irq(&tsk->sighand->siglock);
|
spin_lock_irq(&tsk->sighand->siglock);
|
||||||
if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
|
if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
|
||||||
|
/* Allow SIGKILL, see prepare_signal() */
|
||||||
signal->core_state = core_state;
|
signal->core_state = core_state;
|
||||||
nr = zap_process(tsk, exit_code);
|
nr = zap_process(signal, exit_code);
|
||||||
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
|
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
|
||||||
tsk->flags |= PF_DUMPCORE;
|
tsk->flags |= PF_DUMPCORE;
|
||||||
atomic_set(&core_state->nr_threads, nr);
|
atomic_set(&core_state->nr_threads, nr);
|
||||||
|
@ -136,7 +136,7 @@ static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
|
|||||||
|
|
||||||
#define nilfs_cnt32_ge(a, b) \
|
#define nilfs_cnt32_ge(a, b) \
|
||||||
(typecheck(__u32, a) && typecheck(__u32, b) && \
|
(typecheck(__u32, a) && typecheck(__u32, b) && \
|
||||||
((__s32)(a) - (__s32)(b) >= 0))
|
((__s32)((a) - (b)) >= 0))
|
||||||
|
|
||||||
static int nilfs_prepare_segment_lock(struct super_block *sb,
|
static int nilfs_prepare_segment_lock(struct super_block *sb,
|
||||||
struct nilfs_transaction_info *ti)
|
struct nilfs_transaction_info *ti)
|
||||||
@ -1639,41 +1639,30 @@ static void nilfs_begin_folio_io(struct folio *folio)
|
|||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
|
/**
|
||||||
|
* nilfs_prepare_write_logs - prepare to write logs
|
||||||
|
* @logs: logs to prepare for writing
|
||||||
|
* @seed: checksum seed value
|
||||||
|
*
|
||||||
|
* nilfs_prepare_write_logs() adds checksums and prepares the block
|
||||||
|
* buffers/folios for writing logs. In order to stabilize folios of
|
||||||
|
* memory-mapped file blocks by putting them in writeback state before
|
||||||
|
* calculating the checksums, first prepare to write payload blocks other
|
||||||
|
* than segment summary and super root blocks in which the checksums will
|
||||||
|
* be embedded.
|
||||||
|
*/
|
||||||
|
static void nilfs_prepare_write_logs(struct list_head *logs, u32 seed)
|
||||||
{
|
{
|
||||||
struct nilfs_segment_buffer *segbuf;
|
struct nilfs_segment_buffer *segbuf;
|
||||||
struct folio *bd_folio = NULL, *fs_folio = NULL;
|
struct folio *bd_folio = NULL, *fs_folio = NULL;
|
||||||
|
struct buffer_head *bh;
|
||||||
|
|
||||||
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
|
/* Prepare to write payload blocks */
|
||||||
struct buffer_head *bh;
|
list_for_each_entry(segbuf, logs, sb_list) {
|
||||||
|
|
||||||
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
|
|
||||||
b_assoc_buffers) {
|
|
||||||
if (bh->b_folio != bd_folio) {
|
|
||||||
if (bd_folio) {
|
|
||||||
folio_lock(bd_folio);
|
|
||||||
folio_wait_writeback(bd_folio);
|
|
||||||
folio_clear_dirty_for_io(bd_folio);
|
|
||||||
folio_start_writeback(bd_folio);
|
|
||||||
folio_unlock(bd_folio);
|
|
||||||
}
|
|
||||||
bd_folio = bh->b_folio;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
|
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
|
||||||
b_assoc_buffers) {
|
b_assoc_buffers) {
|
||||||
if (bh == segbuf->sb_super_root) {
|
if (bh == segbuf->sb_super_root)
|
||||||
if (bh->b_folio != bd_folio) {
|
|
||||||
folio_lock(bd_folio);
|
|
||||||
folio_wait_writeback(bd_folio);
|
|
||||||
folio_clear_dirty_for_io(bd_folio);
|
|
||||||
folio_start_writeback(bd_folio);
|
|
||||||
folio_unlock(bd_folio);
|
|
||||||
bd_folio = bh->b_folio;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
set_buffer_async_write(bh);
|
set_buffer_async_write(bh);
|
||||||
if (bh->b_folio != fs_folio) {
|
if (bh->b_folio != fs_folio) {
|
||||||
nilfs_begin_folio_io(fs_folio);
|
nilfs_begin_folio_io(fs_folio);
|
||||||
@ -1681,6 +1670,42 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
nilfs_begin_folio_io(fs_folio);
|
||||||
|
|
||||||
|
nilfs_add_checksums_on_logs(logs, seed);
|
||||||
|
|
||||||
|
/* Prepare to write segment summary blocks */
|
||||||
|
list_for_each_entry(segbuf, logs, sb_list) {
|
||||||
|
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
|
||||||
|
b_assoc_buffers) {
|
||||||
|
mark_buffer_dirty(bh);
|
||||||
|
if (bh->b_folio == bd_folio)
|
||||||
|
continue;
|
||||||
|
if (bd_folio) {
|
||||||
|
folio_lock(bd_folio);
|
||||||
|
folio_wait_writeback(bd_folio);
|
||||||
|
folio_clear_dirty_for_io(bd_folio);
|
||||||
|
folio_start_writeback(bd_folio);
|
||||||
|
folio_unlock(bd_folio);
|
||||||
|
}
|
||||||
|
bd_folio = bh->b_folio;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Prepare to write super root block */
|
||||||
|
bh = NILFS_LAST_SEGBUF(logs)->sb_super_root;
|
||||||
|
if (bh) {
|
||||||
|
mark_buffer_dirty(bh);
|
||||||
|
if (bh->b_folio != bd_folio) {
|
||||||
|
folio_lock(bd_folio);
|
||||||
|
folio_wait_writeback(bd_folio);
|
||||||
|
folio_clear_dirty_for_io(bd_folio);
|
||||||
|
folio_start_writeback(bd_folio);
|
||||||
|
folio_unlock(bd_folio);
|
||||||
|
bd_folio = bh->b_folio;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (bd_folio) {
|
if (bd_folio) {
|
||||||
folio_lock(bd_folio);
|
folio_lock(bd_folio);
|
||||||
folio_wait_writeback(bd_folio);
|
folio_wait_writeback(bd_folio);
|
||||||
@ -1688,7 +1713,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
|
|||||||
folio_start_writeback(bd_folio);
|
folio_start_writeback(bd_folio);
|
||||||
folio_unlock(bd_folio);
|
folio_unlock(bd_folio);
|
||||||
}
|
}
|
||||||
nilfs_begin_folio_io(fs_folio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
|
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
|
||||||
@ -2070,10 +2094,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
|
|||||||
nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
|
nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
|
||||||
|
|
||||||
/* Write partial segments */
|
/* Write partial segments */
|
||||||
nilfs_segctor_prepare_write(sci);
|
nilfs_prepare_write_logs(&sci->sc_segbufs, nilfs->ns_crc_seed);
|
||||||
|
|
||||||
nilfs_add_checksums_on_logs(&sci->sc_segbufs,
|
|
||||||
nilfs->ns_crc_seed);
|
|
||||||
|
|
||||||
err = nilfs_segctor_write(sci, nilfs);
|
err = nilfs_segctor_write(sci, nilfs);
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
@ -2824,8 +2845,6 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
|
|||||||
if (!nilfs->ns_writer)
|
if (!nilfs->ns_writer)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
inode_attach_wb(nilfs->ns_bdev->bd_mapping->host, NULL);
|
|
||||||
|
|
||||||
err = nilfs_segctor_start_thread(nilfs->ns_writer);
|
err = nilfs_segctor_start_thread(nilfs->ns_writer);
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
nilfs_detach_log_writer(sb);
|
nilfs_detach_log_writer(sb);
|
||||||
|
@ -56,7 +56,7 @@ static void nilfs_##name##_attr_release(struct kobject *kobj) \
|
|||||||
sg_##name##_kobj); \
|
sg_##name##_kobj); \
|
||||||
complete(&subgroups->sg_##name##_kobj_unregister); \
|
complete(&subgroups->sg_##name##_kobj_unregister); \
|
||||||
} \
|
} \
|
||||||
static struct kobj_type nilfs_##name##_ktype = { \
|
static const struct kobj_type nilfs_##name##_ktype = { \
|
||||||
.default_groups = nilfs_##name##_groups, \
|
.default_groups = nilfs_##name##_groups, \
|
||||||
.sysfs_ops = &nilfs_##name##_attr_ops, \
|
.sysfs_ops = &nilfs_##name##_attr_ops, \
|
||||||
.release = nilfs_##name##_attr_release, \
|
.release = nilfs_##name##_attr_release, \
|
||||||
@ -166,7 +166,7 @@ static const struct sysfs_ops nilfs_snapshot_attr_ops = {
|
|||||||
.store = nilfs_snapshot_attr_store,
|
.store = nilfs_snapshot_attr_store,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kobj_type nilfs_snapshot_ktype = {
|
static const struct kobj_type nilfs_snapshot_ktype = {
|
||||||
.default_groups = nilfs_snapshot_groups,
|
.default_groups = nilfs_snapshot_groups,
|
||||||
.sysfs_ops = &nilfs_snapshot_attr_ops,
|
.sysfs_ops = &nilfs_snapshot_attr_ops,
|
||||||
.release = nilfs_snapshot_attr_release,
|
.release = nilfs_snapshot_attr_release,
|
||||||
@ -967,7 +967,7 @@ static const struct sysfs_ops nilfs_dev_attr_ops = {
|
|||||||
.store = nilfs_dev_attr_store,
|
.store = nilfs_dev_attr_store,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kobj_type nilfs_dev_ktype = {
|
static const struct kobj_type nilfs_dev_ktype = {
|
||||||
.default_groups = nilfs_dev_groups,
|
.default_groups = nilfs_dev_groups,
|
||||||
.sysfs_ops = &nilfs_dev_attr_ops,
|
.sysfs_ops = &nilfs_dev_attr_ops,
|
||||||
.release = nilfs_dev_attr_release,
|
.release = nilfs_dev_attr_release,
|
||||||
|
@ -294,13 +294,16 @@ static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
|
|||||||
* bh passed here can be an inode block or a dir data block, depending
|
* bh passed here can be an inode block or a dir data block, depending
|
||||||
* on the inode inline data flag.
|
* on the inode inline data flag.
|
||||||
*/
|
*/
|
||||||
static int ocfs2_check_dir_entry(struct inode * dir,
|
static int ocfs2_check_dir_entry(struct inode *dir,
|
||||||
struct ocfs2_dir_entry * de,
|
struct ocfs2_dir_entry *de,
|
||||||
struct buffer_head * bh,
|
struct buffer_head *bh,
|
||||||
|
char *buf,
|
||||||
|
unsigned int size,
|
||||||
unsigned long offset)
|
unsigned long offset)
|
||||||
{
|
{
|
||||||
const char *error_msg = NULL;
|
const char *error_msg = NULL;
|
||||||
const int rlen = le16_to_cpu(de->rec_len);
|
const int rlen = le16_to_cpu(de->rec_len);
|
||||||
|
const unsigned long next_offset = ((char *) de - buf) + rlen;
|
||||||
|
|
||||||
if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
|
if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
|
||||||
error_msg = "rec_len is smaller than minimal";
|
error_msg = "rec_len is smaller than minimal";
|
||||||
@ -308,9 +311,11 @@ static int ocfs2_check_dir_entry(struct inode * dir,
|
|||||||
error_msg = "rec_len % 4 != 0";
|
error_msg = "rec_len % 4 != 0";
|
||||||
else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
|
else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
|
||||||
error_msg = "rec_len is too small for name_len";
|
error_msg = "rec_len is too small for name_len";
|
||||||
else if (unlikely(
|
else if (unlikely(next_offset > size))
|
||||||
((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
|
error_msg = "directory entry overrun";
|
||||||
error_msg = "directory entry across blocks";
|
else if (unlikely(next_offset > size - OCFS2_DIR_REC_LEN(1)) &&
|
||||||
|
next_offset != size)
|
||||||
|
error_msg = "directory entry too close to end";
|
||||||
|
|
||||||
if (unlikely(error_msg != NULL))
|
if (unlikely(error_msg != NULL))
|
||||||
mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
|
mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
|
||||||
@ -352,16 +357,17 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
|
|||||||
de_buf = first_de;
|
de_buf = first_de;
|
||||||
dlimit = de_buf + bytes;
|
dlimit = de_buf + bytes;
|
||||||
|
|
||||||
while (de_buf < dlimit) {
|
while (de_buf < dlimit - OCFS2_DIR_MEMBER_LEN) {
|
||||||
/* this code is executed quadratically often */
|
/* this code is executed quadratically often */
|
||||||
/* do minimal checking `by hand' */
|
/* do minimal checking `by hand' */
|
||||||
|
|
||||||
de = (struct ocfs2_dir_entry *) de_buf;
|
de = (struct ocfs2_dir_entry *) de_buf;
|
||||||
|
|
||||||
if (de_buf + namelen <= dlimit &&
|
if (de->name + namelen <= dlimit &&
|
||||||
ocfs2_match(namelen, name, de)) {
|
ocfs2_match(namelen, name, de)) {
|
||||||
/* found a match - just to be sure, do a full check */
|
/* found a match - just to be sure, do a full check */
|
||||||
if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
|
if (!ocfs2_check_dir_entry(dir, de, bh, first_de,
|
||||||
|
bytes, offset)) {
|
||||||
ret = -1;
|
ret = -1;
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
@ -1138,7 +1144,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
|
|||||||
pde = NULL;
|
pde = NULL;
|
||||||
de = (struct ocfs2_dir_entry *) first_de;
|
de = (struct ocfs2_dir_entry *) first_de;
|
||||||
while (i < bytes) {
|
while (i < bytes) {
|
||||||
if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
|
if (!ocfs2_check_dir_entry(dir, de, bh, first_de, bytes, i)) {
|
||||||
status = -EIO;
|
status = -EIO;
|
||||||
mlog_errno(status);
|
mlog_errno(status);
|
||||||
goto bail;
|
goto bail;
|
||||||
@ -1635,7 +1641,8 @@ int __ocfs2_add_entry(handle_t *handle,
|
|||||||
/* These checks should've already been passed by the
|
/* These checks should've already been passed by the
|
||||||
* prepare function, but I guess we can leave them
|
* prepare function, but I guess we can leave them
|
||||||
* here anyway. */
|
* here anyway. */
|
||||||
if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
|
if (!ocfs2_check_dir_entry(dir, de, insert_bh, data_start,
|
||||||
|
size, offset)) {
|
||||||
retval = -ENOENT;
|
retval = -ENOENT;
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
@ -1774,7 +1781,8 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
|
de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
|
||||||
if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
|
if (!ocfs2_check_dir_entry(inode, de, di_bh, (char *)data->id_data,
|
||||||
|
i_size_read(inode), ctx->pos)) {
|
||||||
/* On error, skip the f_pos to the end. */
|
/* On error, skip the f_pos to the end. */
|
||||||
ctx->pos = i_size_read(inode);
|
ctx->pos = i_size_read(inode);
|
||||||
break;
|
break;
|
||||||
@ -1867,7 +1875,8 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
|
|||||||
while (ctx->pos < i_size_read(inode)
|
while (ctx->pos < i_size_read(inode)
|
||||||
&& offset < sb->s_blocksize) {
|
&& offset < sb->s_blocksize) {
|
||||||
de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
|
de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
|
||||||
if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
|
if (!ocfs2_check_dir_entry(inode, de, bh, bh->b_data,
|
||||||
|
sb->s_blocksize, offset)) {
|
||||||
/* On error, skip the f_pos to the
|
/* On error, skip the f_pos to the
|
||||||
next block. */
|
next block. */
|
||||||
ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
|
ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
|
||||||
@ -3339,7 +3348,7 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
|
|||||||
struct super_block *sb = dir->i_sb;
|
struct super_block *sb = dir->i_sb;
|
||||||
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
||||||
struct ocfs2_dir_entry *de, *last_de = NULL;
|
struct ocfs2_dir_entry *de, *last_de = NULL;
|
||||||
char *de_buf, *limit;
|
char *first_de, *de_buf, *limit;
|
||||||
unsigned long offset = 0;
|
unsigned long offset = 0;
|
||||||
unsigned int rec_len, new_rec_len, free_space;
|
unsigned int rec_len, new_rec_len, free_space;
|
||||||
|
|
||||||
@ -3352,14 +3361,16 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
|
|||||||
else
|
else
|
||||||
free_space = dir->i_sb->s_blocksize - i_size_read(dir);
|
free_space = dir->i_sb->s_blocksize - i_size_read(dir);
|
||||||
|
|
||||||
de_buf = di->id2.i_data.id_data;
|
first_de = di->id2.i_data.id_data;
|
||||||
|
de_buf = first_de;
|
||||||
limit = de_buf + i_size_read(dir);
|
limit = de_buf + i_size_read(dir);
|
||||||
rec_len = OCFS2_DIR_REC_LEN(namelen);
|
rec_len = OCFS2_DIR_REC_LEN(namelen);
|
||||||
|
|
||||||
while (de_buf < limit) {
|
while (de_buf < limit) {
|
||||||
de = (struct ocfs2_dir_entry *)de_buf;
|
de = (struct ocfs2_dir_entry *)de_buf;
|
||||||
|
|
||||||
if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
|
if (!ocfs2_check_dir_entry(dir, de, di_bh, first_de,
|
||||||
|
i_size_read(dir), offset)) {
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -3441,7 +3452,8 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
|
|||||||
/* move to next block */
|
/* move to next block */
|
||||||
de = (struct ocfs2_dir_entry *) bh->b_data;
|
de = (struct ocfs2_dir_entry *) bh->b_data;
|
||||||
}
|
}
|
||||||
if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
|
if (!ocfs2_check_dir_entry(dir, de, bh, bh->b_data, blocksize,
|
||||||
|
offset)) {
|
||||||
status = -ENOENT;
|
status = -ENOENT;
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
@ -221,12 +221,12 @@ struct ocfs2_lock_res_ops {
|
|||||||
*/
|
*/
|
||||||
#define LOCK_TYPE_USES_LVB 0x2
|
#define LOCK_TYPE_USES_LVB 0x2
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
|
||||||
.get_osb = ocfs2_get_inode_osb,
|
.get_osb = ocfs2_get_inode_osb,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
|
||||||
.get_osb = ocfs2_get_inode_osb,
|
.get_osb = ocfs2_get_inode_osb,
|
||||||
.check_downconvert = ocfs2_check_meta_downconvert,
|
.check_downconvert = ocfs2_check_meta_downconvert,
|
||||||
.set_lvb = ocfs2_set_meta_lvb,
|
.set_lvb = ocfs2_set_meta_lvb,
|
||||||
@ -234,50 +234,50 @@ static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
|
|||||||
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
|
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_super_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_super_lops = {
|
||||||
.flags = LOCK_TYPE_REQUIRES_REFRESH,
|
.flags = LOCK_TYPE_REQUIRES_REFRESH,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_rename_lops = {
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = {
|
||||||
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
|
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
|
||||||
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
|
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
|
||||||
.get_osb = ocfs2_get_dentry_osb,
|
.get_osb = ocfs2_get_dentry_osb,
|
||||||
.post_unlock = ocfs2_dentry_post_unlock,
|
.post_unlock = ocfs2_dentry_post_unlock,
|
||||||
.downconvert_worker = ocfs2_dentry_convert_worker,
|
.downconvert_worker = ocfs2_dentry_convert_worker,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
|
||||||
.get_osb = ocfs2_get_inode_osb,
|
.get_osb = ocfs2_get_inode_osb,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_flock_lops = {
|
||||||
.get_osb = ocfs2_get_file_osb,
|
.get_osb = ocfs2_get_file_osb,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
|
||||||
.set_lvb = ocfs2_set_qinfo_lvb,
|
.set_lvb = ocfs2_set_qinfo_lvb,
|
||||||
.get_osb = ocfs2_get_qinfo_osb,
|
.get_osb = ocfs2_get_qinfo_osb,
|
||||||
.flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
|
.flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
|
static const struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
|
||||||
.check_downconvert = ocfs2_check_refcount_downconvert,
|
.check_downconvert = ocfs2_check_refcount_downconvert,
|
||||||
.downconvert_worker = ocfs2_refcount_convert_worker,
|
.downconvert_worker = ocfs2_refcount_convert_worker,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
@ -510,7 +510,7 @@ static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
|
|||||||
static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
|
static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
|
||||||
struct ocfs2_lock_res *res,
|
struct ocfs2_lock_res *res,
|
||||||
enum ocfs2_lock_type type,
|
enum ocfs2_lock_type type,
|
||||||
struct ocfs2_lock_res_ops *ops,
|
const struct ocfs2_lock_res_ops *ops,
|
||||||
void *priv)
|
void *priv)
|
||||||
{
|
{
|
||||||
res->l_type = type;
|
res->l_type = type;
|
||||||
@ -553,7 +553,7 @@ void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
|
|||||||
unsigned int generation,
|
unsigned int generation,
|
||||||
struct inode *inode)
|
struct inode *inode)
|
||||||
{
|
{
|
||||||
struct ocfs2_lock_res_ops *ops;
|
const struct ocfs2_lock_res_ops *ops;
|
||||||
|
|
||||||
switch(type) {
|
switch(type) {
|
||||||
case OCFS2_LOCK_TYPE_RW:
|
case OCFS2_LOCK_TYPE_RW:
|
||||||
|
@ -2189,8 +2189,10 @@ static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
|
|||||||
* @osb: ocfs2 file system
|
* @osb: ocfs2 file system
|
||||||
* @ret_orphan_dir: Orphan dir inode - returned locked!
|
* @ret_orphan_dir: Orphan dir inode - returned locked!
|
||||||
* @blkno: Actual block number of the inode to be inserted into orphan dir.
|
* @blkno: Actual block number of the inode to be inserted into orphan dir.
|
||||||
|
* @name: Buffer to store the name of the orphan.
|
||||||
* @lookup: dir lookup result, to be passed back into functions like
|
* @lookup: dir lookup result, to be passed back into functions like
|
||||||
* ocfs2_orphan_add
|
* ocfs2_orphan_add
|
||||||
|
* @dio: Flag indicating if direct IO is being used or not.
|
||||||
*
|
*
|
||||||
* Returns zero on success and the ret_orphan_dir, name and lookup
|
* Returns zero on success and the ret_orphan_dir, name and lookup
|
||||||
* fields will be populated.
|
* fields will be populated.
|
||||||
|
@ -154,7 +154,7 @@ struct ocfs2_lock_stats {
|
|||||||
|
|
||||||
struct ocfs2_lock_res {
|
struct ocfs2_lock_res {
|
||||||
void *l_priv;
|
void *l_priv;
|
||||||
struct ocfs2_lock_res_ops *l_ops;
|
const struct ocfs2_lock_res_ops *l_ops;
|
||||||
|
|
||||||
|
|
||||||
struct list_head l_blocked_list;
|
struct list_head l_blocked_list;
|
||||||
|
@ -404,7 +404,7 @@ static int o2cb_cluster_this_node(struct ocfs2_cluster_connection *conn,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ocfs2_stack_operations o2cb_stack_ops = {
|
static const struct ocfs2_stack_operations o2cb_stack_ops = {
|
||||||
.connect = o2cb_cluster_connect,
|
.connect = o2cb_cluster_connect,
|
||||||
.disconnect = o2cb_cluster_disconnect,
|
.disconnect = o2cb_cluster_disconnect,
|
||||||
.this_node = o2cb_cluster_this_node,
|
.this_node = o2cb_cluster_this_node,
|
||||||
|
@ -1065,7 +1065,7 @@ static int user_cluster_this_node(struct ocfs2_cluster_connection *conn,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ocfs2_stack_operations ocfs2_user_plugin_ops = {
|
static const struct ocfs2_stack_operations ocfs2_user_plugin_ops = {
|
||||||
.connect = user_cluster_connect,
|
.connect = user_cluster_connect,
|
||||||
.disconnect = user_cluster_disconnect,
|
.disconnect = user_cluster_disconnect,
|
||||||
.this_node = user_cluster_this_node,
|
.this_node = user_cluster_this_node,
|
||||||
|
@ -223,7 +223,7 @@ struct ocfs2_stack_operations {
|
|||||||
*/
|
*/
|
||||||
struct ocfs2_stack_plugin {
|
struct ocfs2_stack_plugin {
|
||||||
char *sp_name;
|
char *sp_name;
|
||||||
struct ocfs2_stack_operations *sp_ops;
|
const struct ocfs2_stack_operations *sp_ops;
|
||||||
struct module *sp_owner;
|
struct module *sp_owner;
|
||||||
|
|
||||||
/* These are managed by the stackglue code. */
|
/* These are managed by the stackglue code. */
|
||||||
|
@ -1062,13 +1062,13 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
|
|||||||
return i_ret + b_ret;
|
return i_ret + b_ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_xattr_find_entry(int name_index,
|
static int ocfs2_xattr_find_entry(struct inode *inode, int name_index,
|
||||||
const char *name,
|
const char *name,
|
||||||
struct ocfs2_xattr_search *xs)
|
struct ocfs2_xattr_search *xs)
|
||||||
{
|
{
|
||||||
struct ocfs2_xattr_entry *entry;
|
struct ocfs2_xattr_entry *entry;
|
||||||
size_t name_len;
|
size_t name_len;
|
||||||
int i, cmp = 1;
|
int i, name_offset, cmp = 1;
|
||||||
|
|
||||||
if (name == NULL)
|
if (name == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1076,13 +1076,22 @@ static int ocfs2_xattr_find_entry(int name_index,
|
|||||||
name_len = strlen(name);
|
name_len = strlen(name);
|
||||||
entry = xs->here;
|
entry = xs->here;
|
||||||
for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
|
for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
|
||||||
|
if ((void *)entry >= xs->end) {
|
||||||
|
ocfs2_error(inode->i_sb, "corrupted xattr entries");
|
||||||
|
return -EFSCORRUPTED;
|
||||||
|
}
|
||||||
cmp = name_index - ocfs2_xattr_get_type(entry);
|
cmp = name_index - ocfs2_xattr_get_type(entry);
|
||||||
if (!cmp)
|
if (!cmp)
|
||||||
cmp = name_len - entry->xe_name_len;
|
cmp = name_len - entry->xe_name_len;
|
||||||
if (!cmp)
|
if (!cmp) {
|
||||||
cmp = memcmp(name, (xs->base +
|
name_offset = le16_to_cpu(entry->xe_name_offset);
|
||||||
le16_to_cpu(entry->xe_name_offset)),
|
if ((xs->base + name_offset + name_len) > xs->end) {
|
||||||
name_len);
|
ocfs2_error(inode->i_sb,
|
||||||
|
"corrupted xattr entries");
|
||||||
|
return -EFSCORRUPTED;
|
||||||
|
}
|
||||||
|
cmp = memcmp(name, (xs->base + name_offset), name_len);
|
||||||
|
}
|
||||||
if (cmp == 0)
|
if (cmp == 0)
|
||||||
break;
|
break;
|
||||||
entry += 1;
|
entry += 1;
|
||||||
@ -1166,7 +1175,7 @@ static int ocfs2_xattr_ibody_get(struct inode *inode,
|
|||||||
xs->base = (void *)xs->header;
|
xs->base = (void *)xs->header;
|
||||||
xs->here = xs->header->xh_entries;
|
xs->here = xs->header->xh_entries;
|
||||||
|
|
||||||
ret = ocfs2_xattr_find_entry(name_index, name, xs);
|
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
size = le64_to_cpu(xs->here->xe_value_size);
|
size = le64_to_cpu(xs->here->xe_value_size);
|
||||||
@ -2698,7 +2707,7 @@ static int ocfs2_xattr_ibody_find(struct inode *inode,
|
|||||||
|
|
||||||
/* Find the named attribute. */
|
/* Find the named attribute. */
|
||||||
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
|
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
|
||||||
ret = ocfs2_xattr_find_entry(name_index, name, xs);
|
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
|
||||||
if (ret && ret != -ENODATA)
|
if (ret && ret != -ENODATA)
|
||||||
return ret;
|
return ret;
|
||||||
xs->not_found = ret;
|
xs->not_found = ret;
|
||||||
@ -2833,7 +2842,7 @@ static int ocfs2_xattr_block_find(struct inode *inode,
|
|||||||
xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
|
xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
|
||||||
xs->here = xs->header->xh_entries;
|
xs->here = xs->header->xh_entries;
|
||||||
|
|
||||||
ret = ocfs2_xattr_find_entry(name_index, name, xs);
|
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
|
||||||
} else
|
} else
|
||||||
ret = ocfs2_xattr_index_block_find(inode, blk_bh,
|
ret = ocfs2_xattr_index_block_find(inode, blk_bh,
|
||||||
name_index,
|
name_index,
|
||||||
|
@ -1540,4 +1540,5 @@ static void __exit exit_ufs_fs(void)
|
|||||||
|
|
||||||
module_init(init_ufs_fs)
|
module_init(init_ufs_fs)
|
||||||
module_exit(exit_ufs_fs)
|
module_exit(exit_ufs_fs)
|
||||||
|
MODULE_DESCRIPTION("UFS Filesystem");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -141,14 +141,6 @@
|
|||||||
* often happens at runtime)
|
* often happens at runtime)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if defined(CONFIG_MEMORY_HOTPLUG)
|
|
||||||
#define MEM_KEEP(sec) *(.mem##sec)
|
|
||||||
#define MEM_DISCARD(sec)
|
|
||||||
#else
|
|
||||||
#define MEM_KEEP(sec)
|
|
||||||
#define MEM_DISCARD(sec) *(.mem##sec)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
|
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
|
||||||
#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
|
#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
|
||||||
#define PATCHABLE_DISCARDS
|
#define PATCHABLE_DISCARDS
|
||||||
@ -357,7 +349,6 @@
|
|||||||
*(.data..decrypted) \
|
*(.data..decrypted) \
|
||||||
*(.ref.data) \
|
*(.ref.data) \
|
||||||
*(.data..shared_aligned) /* percpu related */ \
|
*(.data..shared_aligned) /* percpu related */ \
|
||||||
MEM_KEEP(init.data*) \
|
|
||||||
*(.data.unlikely) \
|
*(.data.unlikely) \
|
||||||
__start_once = .; \
|
__start_once = .; \
|
||||||
*(.data.once) \
|
*(.data.once) \
|
||||||
@ -542,7 +533,6 @@
|
|||||||
/* __*init sections */ \
|
/* __*init sections */ \
|
||||||
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
|
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
|
||||||
*(.ref.rodata) \
|
*(.ref.rodata) \
|
||||||
MEM_KEEP(init.rodata) \
|
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
/* Built-in module parameters. */ \
|
/* Built-in module parameters. */ \
|
||||||
@ -593,8 +583,7 @@
|
|||||||
*(.text.unknown .text.unknown.*) \
|
*(.text.unknown .text.unknown.*) \
|
||||||
NOINSTR_TEXT \
|
NOINSTR_TEXT \
|
||||||
*(.ref.text) \
|
*(.ref.text) \
|
||||||
*(.text.asan.* .text.tsan.*) \
|
*(.text.asan.* .text.tsan.*)
|
||||||
MEM_KEEP(init.text*) \
|
|
||||||
|
|
||||||
|
|
||||||
/* sched.text is aling to function alignment to secure we have same
|
/* sched.text is aling to function alignment to secure we have same
|
||||||
@ -701,7 +690,6 @@
|
|||||||
#define INIT_DATA \
|
#define INIT_DATA \
|
||||||
KEEP(*(SORT(___kentry+*))) \
|
KEEP(*(SORT(___kentry+*))) \
|
||||||
*(.init.data .init.data.*) \
|
*(.init.data .init.data.*) \
|
||||||
MEM_DISCARD(init.data*) \
|
|
||||||
KERNEL_CTORS() \
|
KERNEL_CTORS() \
|
||||||
MCOUNT_REC() \
|
MCOUNT_REC() \
|
||||||
*(.init.rodata .init.rodata.*) \
|
*(.init.rodata .init.rodata.*) \
|
||||||
@ -709,7 +697,6 @@
|
|||||||
TRACE_SYSCALLS() \
|
TRACE_SYSCALLS() \
|
||||||
KPROBE_BLACKLIST() \
|
KPROBE_BLACKLIST() \
|
||||||
ERROR_INJECT_WHITELIST() \
|
ERROR_INJECT_WHITELIST() \
|
||||||
MEM_DISCARD(init.rodata) \
|
|
||||||
CLK_OF_TABLES() \
|
CLK_OF_TABLES() \
|
||||||
RESERVEDMEM_OF_TABLES() \
|
RESERVEDMEM_OF_TABLES() \
|
||||||
TIMER_OF_TABLES() \
|
TIMER_OF_TABLES() \
|
||||||
@ -727,8 +714,7 @@
|
|||||||
|
|
||||||
#define INIT_TEXT \
|
#define INIT_TEXT \
|
||||||
*(.init.text .init.text.*) \
|
*(.init.text .init.text.*) \
|
||||||
*(.text.startup) \
|
*(.text.startup)
|
||||||
MEM_DISCARD(init.text*)
|
|
||||||
|
|
||||||
#define EXIT_DATA \
|
#define EXIT_DATA \
|
||||||
*(.exit.data .exit.data.*) \
|
*(.exit.data .exit.data.*) \
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/cpuhplock.h>
|
#include <linux/cpuhplock.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
|
||||||
struct device_node;
|
struct device_node;
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/nodemask.h>
|
#include <linux/nodemask.h>
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
#include <linux/cgroupstats.h>
|
#include <linux/cgroupstats.h>
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||||
|
|
||||||
# include <linux/clocksource.h>
|
# include <linux/clocksource.h>
|
||||||
# include <linux/cpumask.h>
|
# include <linux/cpumask_types.h>
|
||||||
# include <linux/ktime.h>
|
# include <linux/ktime.h>
|
||||||
# include <linux/notifier.h>
|
# include <linux/notifier.h>
|
||||||
|
|
||||||
|
@ -208,10 +208,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||||||
*/
|
*/
|
||||||
#define data_race(expr) \
|
#define data_race(expr) \
|
||||||
({ \
|
({ \
|
||||||
__unqual_scalar_typeof(({ expr; })) __v = ({ \
|
__kcsan_disable_current(); \
|
||||||
__kcsan_disable_current(); \
|
__auto_type __v = (expr); \
|
||||||
expr; \
|
|
||||||
}); \
|
|
||||||
__kcsan_enable_current(); \
|
__kcsan_enable_current(); \
|
||||||
__v; \
|
__v; \
|
||||||
})
|
})
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
|
|
||||||
#include <linux/node.h>
|
#include <linux/node.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/cpuhotplug.h>
|
#include <linux/cpuhotplug.h>
|
||||||
#include <linux/cpuhplock.h>
|
#include <linux/cpuhplock.h>
|
||||||
#include <linux/cpu_smt.h>
|
#include <linux/cpu_smt.h>
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
|
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/thermal.h>
|
#include <linux/thermal.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
|
|
||||||
struct cpufreq_policy;
|
struct cpufreq_policy;
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
* Copyright 2011 Solarflare Communications Inc.
|
* Copyright 2011 Solarflare Communications Inc.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/kref.h>
|
#include <linux/kref.h>
|
||||||
|
@ -9,25 +9,13 @@
|
|||||||
*/
|
*/
|
||||||
#include <linux/cleanup.h>
|
#include <linux/cleanup.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/threads.h>
|
|
||||||
#include <linux/bitmap.h>
|
#include <linux/bitmap.h>
|
||||||
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/gfp_types.h>
|
#include <linux/gfp_types.h>
|
||||||
#include <linux/numa.h>
|
#include <linux/numa.h>
|
||||||
|
|
||||||
/* Don't assign or return these: may not be this big! */
|
|
||||||
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* cpumask_bits - get the bits in a cpumask
|
|
||||||
* @maskp: the struct cpumask *
|
|
||||||
*
|
|
||||||
* You should only assume nr_cpu_ids bits of this mask are valid. This is
|
|
||||||
* a macro so it's const-correct.
|
|
||||||
*/
|
|
||||||
#define cpumask_bits(maskp) ((maskp)->bits)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpumask_pr_args - printf args to output a cpumask
|
* cpumask_pr_args - printf args to output a cpumask
|
||||||
* @maskp: cpumask to be printed
|
* @maskp: cpumask to be printed
|
||||||
@ -925,48 +913,7 @@ static inline unsigned int cpumask_size(void)
|
|||||||
return bitmap_size(large_cpumask_bits);
|
return bitmap_size(large_cpumask_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* cpumask_var_t: struct cpumask for stack usage.
|
|
||||||
*
|
|
||||||
* Oh, the wicked games we play! In order to make kernel coding a
|
|
||||||
* little more difficult, we typedef cpumask_var_t to an array or a
|
|
||||||
* pointer: doing &mask on an array is a noop, so it still works.
|
|
||||||
*
|
|
||||||
* i.e.
|
|
||||||
* cpumask_var_t tmpmask;
|
|
||||||
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
|
|
||||||
* return -ENOMEM;
|
|
||||||
*
|
|
||||||
* ... use 'tmpmask' like a normal struct cpumask * ...
|
|
||||||
*
|
|
||||||
* free_cpumask_var(tmpmask);
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* However, one notable exception is there. alloc_cpumask_var() allocates
|
|
||||||
* only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
|
|
||||||
* NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
|
|
||||||
*
|
|
||||||
* cpumask_var_t tmpmask;
|
|
||||||
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
|
|
||||||
* return -ENOMEM;
|
|
||||||
*
|
|
||||||
* var = *tmpmask;
|
|
||||||
*
|
|
||||||
* This code makes NR_CPUS length memcopy and brings to a memory corruption.
|
|
||||||
* cpumask_copy() provide safe copy functionality.
|
|
||||||
*
|
|
||||||
* Note that there is another evil here: If you define a cpumask_var_t
|
|
||||||
* as a percpu variable then the way to obtain the address of the cpumask
|
|
||||||
* structure differently influences what this_cpu_* operation needs to be
|
|
||||||
* used. Please use this_cpu_cpumask_var_t in those cases. The direct use
|
|
||||||
* of this_cpu_ptr() or this_cpu_read() will lead to failures when the
|
|
||||||
* other type of cpumask_var_t implementation is configured.
|
|
||||||
*
|
|
||||||
* Please also note that __cpumask_var_read_mostly can be used to declare
|
|
||||||
* a cpumask_var_t variable itself (not its content) as read mostly.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||||
typedef struct cpumask *cpumask_var_t;
|
|
||||||
|
|
||||||
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
|
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
|
||||||
#define __cpumask_var_read_mostly __read_mostly
|
#define __cpumask_var_read_mostly __read_mostly
|
||||||
@ -1013,7 +960,6 @@ static inline bool cpumask_available(cpumask_var_t mask)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
typedef struct cpumask cpumask_var_t[1];
|
|
||||||
|
|
||||||
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
|
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
|
||||||
#define __cpumask_var_read_mostly
|
#define __cpumask_var_read_mostly
|
||||||
|
66
include/linux/cpumask_types.h
Normal file
66
include/linux/cpumask_types.h
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef __LINUX_CPUMASK_TYPES_H
|
||||||
|
#define __LINUX_CPUMASK_TYPES_H
|
||||||
|
|
||||||
|
#include <linux/bitops.h>
|
||||||
|
#include <linux/threads.h>
|
||||||
|
|
||||||
|
/* Don't assign or return these: may not be this big! */
|
||||||
|
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpumask_bits - get the bits in a cpumask
|
||||||
|
* @maskp: the struct cpumask *
|
||||||
|
*
|
||||||
|
* You should only assume nr_cpu_ids bits of this mask are valid. This is
|
||||||
|
* a macro so it's const-correct.
|
||||||
|
*/
|
||||||
|
#define cpumask_bits(maskp) ((maskp)->bits)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* cpumask_var_t: struct cpumask for stack usage.
|
||||||
|
*
|
||||||
|
* Oh, the wicked games we play! In order to make kernel coding a
|
||||||
|
* little more difficult, we typedef cpumask_var_t to an array or a
|
||||||
|
* pointer: doing &mask on an array is a noop, so it still works.
|
||||||
|
*
|
||||||
|
* i.e.
|
||||||
|
* cpumask_var_t tmpmask;
|
||||||
|
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
|
||||||
|
* return -ENOMEM;
|
||||||
|
*
|
||||||
|
* ... use 'tmpmask' like a normal struct cpumask * ...
|
||||||
|
*
|
||||||
|
* free_cpumask_var(tmpmask);
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* However, one notable exception is there. alloc_cpumask_var() allocates
|
||||||
|
* only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
|
||||||
|
* NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
|
||||||
|
*
|
||||||
|
* cpumask_var_t tmpmask;
|
||||||
|
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
|
||||||
|
* return -ENOMEM;
|
||||||
|
*
|
||||||
|
* var = *tmpmask;
|
||||||
|
*
|
||||||
|
* This code makes NR_CPUS length memcopy and brings to a memory corruption.
|
||||||
|
* cpumask_copy() provide safe copy functionality.
|
||||||
|
*
|
||||||
|
* Note that there is another evil here: If you define a cpumask_var_t
|
||||||
|
* as a percpu variable then the way to obtain the address of the cpumask
|
||||||
|
* structure differently influences what this_cpu_* operation needs to be
|
||||||
|
* used. Please use this_cpu_cpumask_var_t in those cases. The direct use
|
||||||
|
* of this_cpu_ptr() or this_cpu_read() will lead to failures when the
|
||||||
|
* other type of cpumask_var_t implementation is configured.
|
||||||
|
*
|
||||||
|
* Please also note that __cpumask_var_read_mostly can be used to declare
|
||||||
|
* a cpumask_var_t variable itself (not its content) as read mostly.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||||
|
typedef struct cpumask *cpumask_var_t;
|
||||||
|
#else
|
||||||
|
typedef struct cpumask cpumask_var_t[1];
|
||||||
|
#endif /* CONFIG_CPUMASK_OFFSTACK */
|
||||||
|
|
||||||
|
#endif /* __LINUX_CPUMASK_TYPES_H */
|
@ -84,11 +84,15 @@
|
|||||||
|
|
||||||
#define __exit __section(".exit.text") __exitused __cold notrace
|
#define __exit __section(".exit.text") __exitused __cold notrace
|
||||||
|
|
||||||
/* Used for MEMORY_HOTPLUG */
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
#define __meminit __section(".meminit.text") __cold notrace \
|
#define __meminit
|
||||||
__latent_entropy
|
#define __meminitdata
|
||||||
#define __meminitdata __section(".meminit.data")
|
#define __meminitconst
|
||||||
#define __meminitconst __section(".meminit.rodata")
|
#else
|
||||||
|
#define __meminit __init
|
||||||
|
#define __meminitdata __initdata
|
||||||
|
#define __meminitconst __initconst
|
||||||
|
#endif
|
||||||
|
|
||||||
/* For assembly routines */
|
/* For assembly routines */
|
||||||
#define __HEAD .section ".head.text","ax"
|
#define __HEAD .section ".head.text","ax"
|
||||||
@ -99,10 +103,6 @@
|
|||||||
#define __INITRODATA .section ".init.rodata","a",%progbits
|
#define __INITRODATA .section ".init.rodata","a",%progbits
|
||||||
#define __FINITDATA .previous
|
#define __FINITDATA .previous
|
||||||
|
|
||||||
#define __MEMINIT .section ".meminit.text", "ax"
|
|
||||||
#define __MEMINITDATA .section ".meminit.data", "aw"
|
|
||||||
#define __MEMINITRODATA .section ".meminit.rodata", "a"
|
|
||||||
|
|
||||||
/* silence warnings when references are OK */
|
/* silence warnings when references are OK */
|
||||||
#define __REF .section ".ref.text", "ax"
|
#define __REF .section ".ref.text", "ax"
|
||||||
#define __REFDATA .section ".ref.data", "aw"
|
#define __REFDATA .section ".ref.data", "aw"
|
||||||
|
@ -6,13 +6,13 @@
|
|||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/cleanup.h>
|
#include <linux/cleanup.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/irqreturn.h>
|
#include <linux/irqreturn.h>
|
||||||
#include <linux/irqnr.h>
|
#include <linux/irqnr.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/irqflags.h>
|
#include <linux/irqflags.h>
|
||||||
#include <linux/hrtimer.h>
|
#include <linux/hrtimer.h>
|
||||||
#include <linux/kref.h>
|
#include <linux/kref.h>
|
||||||
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
|
#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
|
||||||
|
|
||||||
#include <linux/fwnode.h>
|
#include <linux/fwnode.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/irqdomain.h>
|
#include <linux/irqdomain.h>
|
||||||
|
|
||||||
struct partition_affinity {
|
struct partition_affinity {
|
||||||
|
@ -31,7 +31,7 @@
|
|||||||
/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
|
/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
|
||||||
#define jhash_mask(n) (jhash_size(n)-1)
|
#define jhash_mask(n) (jhash_size(n)-1)
|
||||||
|
|
||||||
/* __jhash_mix -- mix 3 32-bit values reversibly. */
|
/* __jhash_mix - mix 3 32-bit values reversibly. */
|
||||||
#define __jhash_mix(a, b, c) \
|
#define __jhash_mix(a, b, c) \
|
||||||
{ \
|
{ \
|
||||||
a -= c; a ^= rol32(c, 4); c += b; \
|
a -= c; a ^= rol32(c, 4); c += b; \
|
||||||
@ -60,7 +60,7 @@
|
|||||||
/* jhash - hash an arbitrary key
|
/* jhash - hash an arbitrary key
|
||||||
* @k: sequence of bytes as key
|
* @k: sequence of bytes as key
|
||||||
* @length: the length of the key
|
* @length: the length of the key
|
||||||
* @initval: the previous hash, or an arbitray value
|
* @initval: the previous hash, or an arbitrary value
|
||||||
*
|
*
|
||||||
* The generic version, hashes an arbitrary sequence of bytes.
|
* The generic version, hashes an arbitrary sequence of bytes.
|
||||||
* No alignment or length assumptions are made about the input key.
|
* No alignment or length assumptions are made about the input key.
|
||||||
@ -110,7 +110,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
|
|||||||
/* jhash2 - hash an array of u32's
|
/* jhash2 - hash an array of u32's
|
||||||
* @k: the key which must be an array of u32's
|
* @k: the key which must be an array of u32's
|
||||||
* @length: the number of u32's in the key
|
* @length: the number of u32's in the key
|
||||||
* @initval: the previous hash, or an arbitray value
|
* @initval: the previous hash, or an arbitrary value
|
||||||
*
|
*
|
||||||
* Returns the hash value of the key.
|
* Returns the hash value of the key.
|
||||||
*/
|
*/
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/vtime.h>
|
#include <linux/vtime.h>
|
||||||
|
@ -7,46 +7,89 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct min_heap - Data structure to hold a min-heap.
|
* Data structure to hold a min-heap.
|
||||||
* @data: Start of array holding the heap elements.
|
|
||||||
* @nr: Number of elements currently in the heap.
|
* @nr: Number of elements currently in the heap.
|
||||||
* @size: Maximum number of elements that can be held in current storage.
|
* @size: Maximum number of elements that can be held in current storage.
|
||||||
|
* @data: Pointer to the start of array holding the heap elements.
|
||||||
|
* @preallocated: Start of the static preallocated array holding the heap elements.
|
||||||
*/
|
*/
|
||||||
struct min_heap {
|
#define MIN_HEAP_PREALLOCATED(_type, _name, _nr) \
|
||||||
void *data;
|
struct _name { \
|
||||||
int nr;
|
int nr; \
|
||||||
int size;
|
int size; \
|
||||||
};
|
_type *data; \
|
||||||
|
_type preallocated[_nr]; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DEFINE_MIN_HEAP(_type, _name) MIN_HEAP_PREALLOCATED(_type, _name, 0)
|
||||||
|
|
||||||
|
typedef DEFINE_MIN_HEAP(char, min_heap_char) min_heap_char;
|
||||||
|
|
||||||
|
#define __minheap_cast(_heap) (typeof((_heap)->data[0]) *)
|
||||||
|
#define __minheap_obj_size(_heap) sizeof((_heap)->data[0])
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct min_heap_callbacks - Data/functions to customise the min_heap.
|
* struct min_heap_callbacks - Data/functions to customise the min_heap.
|
||||||
* @elem_size: The nr of each element in bytes.
|
|
||||||
* @less: Partial order function for this heap.
|
* @less: Partial order function for this heap.
|
||||||
* @swp: Swap elements function.
|
* @swp: Swap elements function.
|
||||||
*/
|
*/
|
||||||
struct min_heap_callbacks {
|
struct min_heap_callbacks {
|
||||||
int elem_size;
|
bool (*less)(const void *lhs, const void *rhs, void *args);
|
||||||
bool (*less)(const void *lhs, const void *rhs);
|
void (*swp)(void *lhs, void *rhs, void *args);
|
||||||
void (*swp)(void *lhs, void *rhs);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Initialize a min-heap. */
|
||||||
|
static __always_inline
|
||||||
|
void __min_heap_init(min_heap_char *heap, void *data, int size)
|
||||||
|
{
|
||||||
|
heap->nr = 0;
|
||||||
|
heap->size = size;
|
||||||
|
if (data)
|
||||||
|
heap->data = data;
|
||||||
|
else
|
||||||
|
heap->data = heap->preallocated;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define min_heap_init(_heap, _data, _size) \
|
||||||
|
__min_heap_init((min_heap_char *)_heap, _data, _size)
|
||||||
|
|
||||||
|
/* Get the minimum element from the heap. */
|
||||||
|
static __always_inline
|
||||||
|
void *__min_heap_peek(struct min_heap_char *heap)
|
||||||
|
{
|
||||||
|
return heap->nr ? heap->data : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define min_heap_peek(_heap) \
|
||||||
|
(__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap))
|
||||||
|
|
||||||
|
/* Check if the heap is full. */
|
||||||
|
static __always_inline
|
||||||
|
bool __min_heap_full(min_heap_char *heap)
|
||||||
|
{
|
||||||
|
return heap->nr == heap->size;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define min_heap_full(_heap) \
|
||||||
|
__min_heap_full((min_heap_char *)_heap)
|
||||||
|
|
||||||
/* Sift the element at pos down the heap. */
|
/* Sift the element at pos down the heap. */
|
||||||
static __always_inline
|
static __always_inline
|
||||||
void min_heapify(struct min_heap *heap, int pos,
|
void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size,
|
||||||
const struct min_heap_callbacks *func)
|
const struct min_heap_callbacks *func, void *args)
|
||||||
{
|
{
|
||||||
void *left, *right;
|
void *left, *right;
|
||||||
void *data = heap->data;
|
void *data = heap->data;
|
||||||
void *root = data + pos * func->elem_size;
|
void *root = data + pos * elem_size;
|
||||||
int i = pos, j;
|
int i = pos, j;
|
||||||
|
|
||||||
/* Find the sift-down path all the way to the leaves. */
|
/* Find the sift-down path all the way to the leaves. */
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (i * 2 + 2 >= heap->nr)
|
if (i * 2 + 2 >= heap->nr)
|
||||||
break;
|
break;
|
||||||
left = data + (i * 2 + 1) * func->elem_size;
|
left = data + (i * 2 + 1) * elem_size;
|
||||||
right = data + (i * 2 + 2) * func->elem_size;
|
right = data + (i * 2 + 2) * elem_size;
|
||||||
i = func->less(left, right) ? i * 2 + 1 : i * 2 + 2;
|
i = func->less(left, right, args) ? i * 2 + 1 : i * 2 + 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Special case for the last leaf with no sibling. */
|
/* Special case for the last leaf with no sibling. */
|
||||||
@ -54,83 +97,140 @@ void min_heapify(struct min_heap *heap, int pos,
|
|||||||
i = i * 2 + 1;
|
i = i * 2 + 1;
|
||||||
|
|
||||||
/* Backtrack to the correct location. */
|
/* Backtrack to the correct location. */
|
||||||
while (i != pos && func->less(root, data + i * func->elem_size))
|
while (i != pos && func->less(root, data + i * elem_size, args))
|
||||||
i = (i - 1) / 2;
|
i = (i - 1) / 2;
|
||||||
|
|
||||||
/* Shift the element into its correct place. */
|
/* Shift the element into its correct place. */
|
||||||
j = i;
|
j = i;
|
||||||
while (i != pos) {
|
while (i != pos) {
|
||||||
i = (i - 1) / 2;
|
i = (i - 1) / 2;
|
||||||
func->swp(data + i * func->elem_size, data + j * func->elem_size);
|
func->swp(data + i * elem_size, data + j * elem_size, args);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define min_heap_sift_down(_heap, _pos, _func, _args) \
|
||||||
|
__min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args)
|
||||||
|
|
||||||
|
/* Sift up ith element from the heap, O(log2(nr)). */
|
||||||
|
static __always_inline
|
||||||
|
void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx,
|
||||||
|
const struct min_heap_callbacks *func, void *args)
|
||||||
|
{
|
||||||
|
void *data = heap->data;
|
||||||
|
size_t parent;
|
||||||
|
|
||||||
|
while (idx) {
|
||||||
|
parent = (idx - 1) / 2;
|
||||||
|
if (func->less(data + parent * elem_size, data + idx * elem_size, args))
|
||||||
|
break;
|
||||||
|
func->swp(data + parent * elem_size, data + idx * elem_size, args);
|
||||||
|
idx = parent;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define min_heap_sift_up(_heap, _idx, _func, _args) \
|
||||||
|
__min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args)
|
||||||
|
|
||||||
/* Floyd's approach to heapification that is O(nr). */
|
/* Floyd's approach to heapification that is O(nr). */
|
||||||
static __always_inline
|
static __always_inline
|
||||||
void min_heapify_all(struct min_heap *heap,
|
void __min_heapify_all(min_heap_char *heap, size_t elem_size,
|
||||||
const struct min_heap_callbacks *func)
|
const struct min_heap_callbacks *func, void *args)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = heap->nr / 2 - 1; i >= 0; i--)
|
for (i = heap->nr / 2 - 1; i >= 0; i--)
|
||||||
min_heapify(heap, i, func);
|
__min_heap_sift_down(heap, i, elem_size, func, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define min_heapify_all(_heap, _func, _args) \
|
||||||
|
__min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
|
||||||
|
|
||||||
/* Remove minimum element from the heap, O(log2(nr)). */
|
/* Remove minimum element from the heap, O(log2(nr)). */
|
||||||
static __always_inline
|
static __always_inline
|
||||||
void min_heap_pop(struct min_heap *heap,
|
bool __min_heap_pop(min_heap_char *heap, size_t elem_size,
|
||||||
const struct min_heap_callbacks *func)
|
const struct min_heap_callbacks *func, void *args)
|
||||||
{
|
{
|
||||||
void *data = heap->data;
|
void *data = heap->data;
|
||||||
|
|
||||||
if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
|
if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
/* Place last element at the root (position 0) and then sift down. */
|
/* Place last element at the root (position 0) and then sift down. */
|
||||||
heap->nr--;
|
heap->nr--;
|
||||||
memcpy(data, data + (heap->nr * func->elem_size), func->elem_size);
|
memcpy(data, data + (heap->nr * elem_size), elem_size);
|
||||||
min_heapify(heap, 0, func);
|
__min_heap_sift_down(heap, 0, elem_size, func, args);
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define min_heap_pop(_heap, _func, _args) \
|
||||||
|
__min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove the minimum element and then push the given element. The
|
* Remove the minimum element and then push the given element. The
|
||||||
* implementation performs 1 sift (O(log2(nr))) and is therefore more
|
* implementation performs 1 sift (O(log2(nr))) and is therefore more
|
||||||
* efficient than a pop followed by a push that does 2.
|
* efficient than a pop followed by a push that does 2.
|
||||||
*/
|
*/
|
||||||
static __always_inline
|
static __always_inline
|
||||||
void min_heap_pop_push(struct min_heap *heap,
|
void __min_heap_pop_push(min_heap_char *heap,
|
||||||
const void *element,
|
const void *element, size_t elem_size,
|
||||||
const struct min_heap_callbacks *func)
|
const struct min_heap_callbacks *func,
|
||||||
|
void *args)
|
||||||
{
|
{
|
||||||
memcpy(heap->data, element, func->elem_size);
|
memcpy(heap->data, element, elem_size);
|
||||||
min_heapify(heap, 0, func);
|
__min_heap_sift_down(heap, 0, elem_size, func, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define min_heap_pop_push(_heap, _element, _func, _args) \
|
||||||
|
__min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args)
|
||||||
|
|
||||||
/* Push an element on to the heap, O(log2(nr)). */
|
/* Push an element on to the heap, O(log2(nr)). */
|
||||||
static __always_inline
|
static __always_inline
|
||||||
void min_heap_push(struct min_heap *heap, const void *element,
|
bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size,
|
||||||
const struct min_heap_callbacks *func)
|
const struct min_heap_callbacks *func, void *args)
|
||||||
{
|
{
|
||||||
void *data = heap->data;
|
void *data = heap->data;
|
||||||
void *child, *parent;
|
|
||||||
int pos;
|
int pos;
|
||||||
|
|
||||||
if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
|
if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
/* Place at the end of data. */
|
/* Place at the end of data. */
|
||||||
pos = heap->nr;
|
pos = heap->nr;
|
||||||
memcpy(data + (pos * func->elem_size), element, func->elem_size);
|
memcpy(data + (pos * elem_size), element, elem_size);
|
||||||
heap->nr++;
|
heap->nr++;
|
||||||
|
|
||||||
/* Sift child at pos up. */
|
/* Sift child at pos up. */
|
||||||
for (; pos > 0; pos = (pos - 1) / 2) {
|
__min_heap_sift_up(heap, elem_size, pos, func, args);
|
||||||
child = data + (pos * func->elem_size);
|
|
||||||
parent = data + ((pos - 1) / 2) * func->elem_size;
|
return true;
|
||||||
if (func->less(parent, child))
|
|
||||||
break;
|
|
||||||
func->swp(parent, child);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define min_heap_push(_heap, _element, _func, _args) \
|
||||||
|
__min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args)
|
||||||
|
|
||||||
|
/* Remove ith element from the heap, O(log2(nr)). */
|
||||||
|
static __always_inline
|
||||||
|
bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx,
|
||||||
|
const struct min_heap_callbacks *func, void *args)
|
||||||
|
{
|
||||||
|
void *data = heap->data;
|
||||||
|
|
||||||
|
if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Place last element at the root (position 0) and then sift down. */
|
||||||
|
heap->nr--;
|
||||||
|
if (idx == heap->nr)
|
||||||
|
return true;
|
||||||
|
func->swp(data + (idx * elem_size), data + (heap->nr * elem_size), args);
|
||||||
|
__min_heap_sift_up(heap, elem_size, idx, func, args);
|
||||||
|
__min_heap_sift_down(heap, idx, elem_size, func, args);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define min_heap_del(_heap, _idx, _func, _args) \
|
||||||
|
__min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args)
|
||||||
|
|
||||||
#endif /* _LINUX_MIN_HEAP_H */
|
#endif /* _LINUX_MIN_HEAP_H */
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/irqdomain_defs.h>
|
#include <linux/irqdomain_defs.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/msi_api.h>
|
#include <linux/msi_api.h>
|
||||||
#include <linux/xarray.h>
|
#include <linux/xarray.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#define _LINUX_NODE_H_
|
#define _LINUX_NODE_H_
|
||||||
|
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -620,7 +620,7 @@ enum {
|
|||||||
*
|
*
|
||||||
* Structure used between LLDD and nvmet-fc layer to represent the exchange
|
* Structure used between LLDD and nvmet-fc layer to represent the exchange
|
||||||
* context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
|
* context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
|
||||||
* memory transfers, and its assocated cqe transfer).
|
* memory transfers, and its associated cqe transfer).
|
||||||
*
|
*
|
||||||
* The structure is allocated by the LLDD whenever a FCP CMD IU is received
|
* The structure is allocated by the LLDD whenever a FCP CMD IU is received
|
||||||
* from the FC link. The address of the structure is passed to the nvmet-fc
|
* from the FC link. The address of the structure is passed to the nvmet-fc
|
||||||
|
@ -77,9 +77,10 @@ static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
|
|||||||
#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
|
#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
|
||||||
|
|
||||||
struct taint_flag {
|
struct taint_flag {
|
||||||
char c_true; /* character printed when tainted */
|
char c_true; /* character printed when tainted */
|
||||||
char c_false; /* character printed when not tainted */
|
char c_false; /* character printed when not tainted */
|
||||||
bool module; /* also show as a per-module taint flag */
|
bool module; /* also show as a per-module taint flag */
|
||||||
|
const char *desc; /* verbose description of the set taint flag */
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
|
extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
|
||||||
@ -90,6 +91,7 @@ enum lockdep_ok {
|
|||||||
};
|
};
|
||||||
|
|
||||||
extern const char *print_tainted(void);
|
extern const char *print_tainted(void);
|
||||||
|
extern const char *print_tainted_verbose(void);
|
||||||
extern void add_taint(unsigned flag, enum lockdep_ok);
|
extern void add_taint(unsigned flag, enum lockdep_ok);
|
||||||
extern int test_taint(unsigned flag);
|
extern int test_taint(unsigned flag);
|
||||||
extern unsigned long get_taint(void);
|
extern unsigned long get_taint(void);
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <linux/mmdebug.h>
|
#include <linux/mmdebug.h>
|
||||||
#include <linux/preempt.h>
|
#include <linux/preempt.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/pfn.h>
|
#include <linux/pfn.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/cleanup.h>
|
#include <linux/cleanup.h>
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/time64.h>
|
#include <linux/time64.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -49,12 +49,6 @@
|
|||||||
/********** arch/$ARCH/mm/init.c **********/
|
/********** arch/$ARCH/mm/init.c **********/
|
||||||
#define POISON_FREE_INITMEM 0xcc
|
#define POISON_FREE_INITMEM 0xcc
|
||||||
|
|
||||||
/********** arch/ia64/hp/common/sba_iommu.c **********/
|
|
||||||
/*
|
|
||||||
* arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
|
|
||||||
* value of "SBAIOMMU POISON\0" for spill-over poisoning.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/********** fs/jbd/journal.c **********/
|
/********** fs/jbd/journal.c **********/
|
||||||
#define JBD_POISON_FREE 0x5b
|
#define JBD_POISON_FREE 0x5b
|
||||||
#define JBD2_POISON_FREE 0x5c
|
#define JBD2_POISON_FREE 0x5c
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
|
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
#include <linux/cleanup.h>
|
#include <linux/cleanup.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/context_tracking_irq.h>
|
#include <linux/context_tracking_irq.h>
|
||||||
|
|
||||||
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
|
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <linux/thread_info.h>
|
#include <linux/thread_info.h>
|
||||||
#include <linux/preempt.h>
|
#include <linux/preempt.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
|
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/irqflags_types.h>
|
#include <linux/irqflags_types.h>
|
||||||
@ -1618,7 +1618,7 @@ static inline char task_index_to_char(unsigned int state)
|
|||||||
{
|
{
|
||||||
static const char state_char[] = "RSDTtXZPI";
|
static const char state_char[] = "RSDTtXZPI";
|
||||||
|
|
||||||
BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
|
BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1));
|
||||||
|
|
||||||
return state_char[state];
|
return state_char[state];
|
||||||
}
|
}
|
||||||
@ -1792,7 +1792,8 @@ static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpuma
|
|||||||
}
|
}
|
||||||
static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
||||||
{
|
{
|
||||||
if (!cpumask_test_cpu(0, new_mask))
|
/* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */
|
||||||
|
if ((*cpumask_bits(new_mask) & 1) == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#include <linux/string_helpers.h>
|
#include <linux/string_helpers.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/nodemask.h>
|
#include <linux/nodemask.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/cred.h>
|
#include <linux/cred.h>
|
||||||
|
@ -69,7 +69,7 @@ struct apple_rtkit;
|
|||||||
* Initializes the internal state required to handle RTKit. This
|
* Initializes the internal state required to handle RTKit. This
|
||||||
* should usually be called within _probe.
|
* should usually be called within _probe.
|
||||||
*
|
*
|
||||||
* @dev: Pointer to the device node this coprocessor is assocated with
|
* @dev: Pointer to the device node this coprocessor is associated with
|
||||||
* @cookie: opaque cookie passed to all functions defined in rtkit_ops
|
* @cookie: opaque cookie passed to all functions defined in rtkit_ops
|
||||||
* @mbox_name: mailbox name used to communicate with the co-processor
|
* @mbox_name: mailbox name used to communicate with the co-processor
|
||||||
* @mbox_idx: mailbox index to be used if mbox_name is NULL
|
* @mbox_idx: mailbox index to be used if mbox_name is NULL
|
||||||
@ -83,7 +83,7 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
|
|||||||
* Non-devm version of devm_apple_rtkit_init. Must be freed with
|
* Non-devm version of devm_apple_rtkit_init. Must be freed with
|
||||||
* apple_rtkit_free.
|
* apple_rtkit_free.
|
||||||
*
|
*
|
||||||
* @dev: Pointer to the device node this coprocessor is assocated with
|
* @dev: Pointer to the device node this coprocessor is associated with
|
||||||
* @cookie: opaque cookie passed to all functions defined in rtkit_ops
|
* @cookie: opaque cookie passed to all functions defined in rtkit_ops
|
||||||
* @mbox_name: mailbox name used to communicate with the co-processor
|
* @mbox_name: mailbox name used to communicate with the co-processor
|
||||||
* @mbox_idx: mailbox index to be used if mbox_name is NULL
|
* @mbox_idx: mailbox index to be used if mbox_name is NULL
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
#define _LINUX_STOP_MACHINE
|
#define _LINUX_STOP_MACHINE
|
||||||
|
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/seqlock.h>
|
#include <linux/seqlock.h>
|
||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include <linux/srcu.h>
|
#include <linux/srcu.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/tracepoint-defs.h>
|
#include <linux/tracepoint-defs.h>
|
||||||
#include <linux/static_call.h>
|
#include <linux/static_call.h>
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask_types.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/workqueue_types.h>
|
#include <linux/workqueue_types.h>
|
||||||
|
|
||||||
|
@ -74,5 +74,6 @@ static void exitf(void)
|
|||||||
|
|
||||||
module_init(backtrace_regression_test);
|
module_init(backtrace_regression_test);
|
||||||
module_exit(exitf);
|
module_exit(exitf);
|
||||||
|
MODULE_DESCRIPTION("Simple stack backtrace regression test module");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
|
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
#include <linux/memory.h>
|
#include <linux/memory.h>
|
||||||
#include <linux/cpuhotplug.h>
|
#include <linux/cpuhotplug.h>
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/kexec.h>
|
|
||||||
#include <linux/kmemleak.h>
|
#include <linux/kmemleak.h>
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
@ -534,7 +534,7 @@ void perf_sample_event_took(u64 sample_len_ns)
|
|||||||
__this_cpu_write(running_sample_length, running_len);
|
__this_cpu_write(running_sample_length, running_len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: this will be biased artifically low until we have
|
* Note: this will be biased artificially low until we have
|
||||||
* seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
|
* seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
|
||||||
* from having to maintain a count.
|
* from having to maintain a count.
|
||||||
*/
|
*/
|
||||||
@ -596,10 +596,10 @@ static inline u64 perf_event_clock(struct perf_event *event)
|
|||||||
*
|
*
|
||||||
* Event groups make things a little more complicated, but not terribly so. The
|
* Event groups make things a little more complicated, but not terribly so. The
|
||||||
* rules for a group are that if the group leader is OFF the entire group is
|
* rules for a group are that if the group leader is OFF the entire group is
|
||||||
* OFF, irrespecive of what the group member states are. This results in
|
* OFF, irrespective of what the group member states are. This results in
|
||||||
* __perf_effective_state().
|
* __perf_effective_state().
|
||||||
*
|
*
|
||||||
* A futher ramification is that when a group leader flips between OFF and
|
* A further ramification is that when a group leader flips between OFF and
|
||||||
* !OFF, we need to update all group member times.
|
* !OFF, we need to update all group member times.
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
@ -891,7 +891,7 @@ static int perf_cgroup_ensure_storage(struct perf_event *event,
|
|||||||
int cpu, heap_size, ret = 0;
|
int cpu, heap_size, ret = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow storage to have sufficent space for an iterator for each
|
* Allow storage to have sufficient space for an iterator for each
|
||||||
* possibly nested cgroup plus an iterator for events with no cgroup.
|
* possibly nested cgroup plus an iterator for events with no cgroup.
|
||||||
*/
|
*/
|
||||||
for (heap_size = 1; css; css = css->parent)
|
for (heap_size = 1; css; css = css->parent)
|
||||||
@ -3671,7 +3671,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
|||||||
perf_cgroup_switch(next);
|
perf_cgroup_switch(next);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool perf_less_group_idx(const void *l, const void *r)
|
static bool perf_less_group_idx(const void *l, const void *r, void __always_unused *args)
|
||||||
{
|
{
|
||||||
const struct perf_event *le = *(const struct perf_event **)l;
|
const struct perf_event *le = *(const struct perf_event **)l;
|
||||||
const struct perf_event *re = *(const struct perf_event **)r;
|
const struct perf_event *re = *(const struct perf_event **)r;
|
||||||
@ -3679,20 +3679,21 @@ static bool perf_less_group_idx(const void *l, const void *r)
|
|||||||
return le->group_index < re->group_index;
|
return le->group_index < re->group_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void swap_ptr(void *l, void *r)
|
static void swap_ptr(void *l, void *r, void __always_unused *args)
|
||||||
{
|
{
|
||||||
void **lp = l, **rp = r;
|
void **lp = l, **rp = r;
|
||||||
|
|
||||||
swap(*lp, *rp);
|
swap(*lp, *rp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFINE_MIN_HEAP(struct perf_event *, perf_event_min_heap);
|
||||||
|
|
||||||
static const struct min_heap_callbacks perf_min_heap = {
|
static const struct min_heap_callbacks perf_min_heap = {
|
||||||
.elem_size = sizeof(struct perf_event *),
|
|
||||||
.less = perf_less_group_idx,
|
.less = perf_less_group_idx,
|
||||||
.swp = swap_ptr,
|
.swp = swap_ptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __heap_add(struct min_heap *heap, struct perf_event *event)
|
static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct perf_event **itrs = heap->data;
|
struct perf_event **itrs = heap->data;
|
||||||
|
|
||||||
@ -3726,7 +3727,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx,
|
|||||||
struct perf_cpu_context *cpuctx = NULL;
|
struct perf_cpu_context *cpuctx = NULL;
|
||||||
/* Space for per CPU and/or any CPU event iterators. */
|
/* Space for per CPU and/or any CPU event iterators. */
|
||||||
struct perf_event *itrs[2];
|
struct perf_event *itrs[2];
|
||||||
struct min_heap event_heap;
|
struct perf_event_min_heap event_heap;
|
||||||
struct perf_event **evt;
|
struct perf_event **evt;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -3735,7 +3736,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx,
|
|||||||
|
|
||||||
if (!ctx->task) {
|
if (!ctx->task) {
|
||||||
cpuctx = this_cpu_ptr(&perf_cpu_context);
|
cpuctx = this_cpu_ptr(&perf_cpu_context);
|
||||||
event_heap = (struct min_heap){
|
event_heap = (struct perf_event_min_heap){
|
||||||
.data = cpuctx->heap,
|
.data = cpuctx->heap,
|
||||||
.nr = 0,
|
.nr = 0,
|
||||||
.size = cpuctx->heap_size,
|
.size = cpuctx->heap_size,
|
||||||
@ -3748,7 +3749,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx,
|
|||||||
css = &cpuctx->cgrp->css;
|
css = &cpuctx->cgrp->css;
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
event_heap = (struct min_heap){
|
event_heap = (struct perf_event_min_heap){
|
||||||
.data = itrs,
|
.data = itrs,
|
||||||
.nr = 0,
|
.nr = 0,
|
||||||
.size = ARRAY_SIZE(itrs),
|
.size = ARRAY_SIZE(itrs),
|
||||||
@ -3770,7 +3771,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx,
|
|||||||
perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
|
perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
min_heapify_all(&event_heap, &perf_min_heap);
|
min_heapify_all(&event_heap, &perf_min_heap, NULL);
|
||||||
|
|
||||||
while (event_heap.nr) {
|
while (event_heap.nr) {
|
||||||
ret = func(*evt, data);
|
ret = func(*evt, data);
|
||||||
@ -3779,9 +3780,9 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx,
|
|||||||
|
|
||||||
*evt = perf_event_groups_next(*evt, pmu);
|
*evt = perf_event_groups_next(*evt, pmu);
|
||||||
if (*evt)
|
if (*evt)
|
||||||
min_heapify(&event_heap, 0, &perf_min_heap);
|
min_heap_sift_down(&event_heap, 0, &perf_min_heap, NULL);
|
||||||
else
|
else
|
||||||
min_heap_pop(&event_heap, &perf_min_heap);
|
min_heap_pop(&event_heap, &perf_min_heap, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -208,9 +208,10 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < NR_CACHED_STACKS; i++) {
|
for (i = 0; i < NR_CACHED_STACKS; i++) {
|
||||||
if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL)
|
struct vm_struct *tmp = NULL;
|
||||||
continue;
|
|
||||||
return true;
|
if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm))
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
|||||||
* Ok, the task did not get scheduled for more than 2 minutes,
|
* Ok, the task did not get scheduled for more than 2 minutes,
|
||||||
* complain:
|
* complain:
|
||||||
*/
|
*/
|
||||||
if (sysctl_hung_task_warnings) {
|
if (sysctl_hung_task_warnings || hung_task_call_panic) {
|
||||||
if (sysctl_hung_task_warnings > 0)
|
if (sysctl_hung_task_warnings > 0)
|
||||||
sysctl_hung_task_warnings--;
|
sysctl_hung_task_warnings--;
|
||||||
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
|
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
|
||||||
|
116
kernel/panic.c
116
kernel/panic.c
@ -35,6 +35,7 @@
|
|||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/sysfs.h>
|
#include <linux/sysfs.h>
|
||||||
#include <linux/context_tracking.h>
|
#include <linux/context_tracking.h>
|
||||||
|
#include <linux/seq_buf.h>
|
||||||
#include <trace/events/error_report.h>
|
#include <trace/events/error_report.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
|
||||||
@ -470,32 +471,83 @@ void panic(const char *fmt, ...)
|
|||||||
|
|
||||||
EXPORT_SYMBOL(panic);
|
EXPORT_SYMBOL(panic);
|
||||||
|
|
||||||
|
#define TAINT_FLAG(taint, _c_true, _c_false, _module) \
|
||||||
|
[ TAINT_##taint ] = { \
|
||||||
|
.c_true = _c_true, .c_false = _c_false, \
|
||||||
|
.module = _module, \
|
||||||
|
.desc = #taint, \
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TAINT_FORCED_RMMOD could be a per-module flag but the module
|
* TAINT_FORCED_RMMOD could be a per-module flag but the module
|
||||||
* is being removed anyway.
|
* is being removed anyway.
|
||||||
*/
|
*/
|
||||||
const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
|
const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
|
||||||
[ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true },
|
TAINT_FLAG(PROPRIETARY_MODULE, 'P', 'G', true),
|
||||||
[ TAINT_FORCED_MODULE ] = { 'F', ' ', true },
|
TAINT_FLAG(FORCED_MODULE, 'F', ' ', true),
|
||||||
[ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false },
|
TAINT_FLAG(CPU_OUT_OF_SPEC, 'S', ' ', false),
|
||||||
[ TAINT_FORCED_RMMOD ] = { 'R', ' ', false },
|
TAINT_FLAG(FORCED_RMMOD, 'R', ' ', false),
|
||||||
[ TAINT_MACHINE_CHECK ] = { 'M', ' ', false },
|
TAINT_FLAG(MACHINE_CHECK, 'M', ' ', false),
|
||||||
[ TAINT_BAD_PAGE ] = { 'B', ' ', false },
|
TAINT_FLAG(BAD_PAGE, 'B', ' ', false),
|
||||||
[ TAINT_USER ] = { 'U', ' ', false },
|
TAINT_FLAG(USER, 'U', ' ', false),
|
||||||
[ TAINT_DIE ] = { 'D', ' ', false },
|
TAINT_FLAG(DIE, 'D', ' ', false),
|
||||||
[ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false },
|
TAINT_FLAG(OVERRIDDEN_ACPI_TABLE, 'A', ' ', false),
|
||||||
[ TAINT_WARN ] = { 'W', ' ', false },
|
TAINT_FLAG(WARN, 'W', ' ', false),
|
||||||
[ TAINT_CRAP ] = { 'C', ' ', true },
|
TAINT_FLAG(CRAP, 'C', ' ', true),
|
||||||
[ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false },
|
TAINT_FLAG(FIRMWARE_WORKAROUND, 'I', ' ', false),
|
||||||
[ TAINT_OOT_MODULE ] = { 'O', ' ', true },
|
TAINT_FLAG(OOT_MODULE, 'O', ' ', true),
|
||||||
[ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true },
|
TAINT_FLAG(UNSIGNED_MODULE, 'E', ' ', true),
|
||||||
[ TAINT_SOFTLOCKUP ] = { 'L', ' ', false },
|
TAINT_FLAG(SOFTLOCKUP, 'L', ' ', false),
|
||||||
[ TAINT_LIVEPATCH ] = { 'K', ' ', true },
|
TAINT_FLAG(LIVEPATCH, 'K', ' ', true),
|
||||||
[ TAINT_AUX ] = { 'X', ' ', true },
|
TAINT_FLAG(AUX, 'X', ' ', true),
|
||||||
[ TAINT_RANDSTRUCT ] = { 'T', ' ', true },
|
TAINT_FLAG(RANDSTRUCT, 'T', ' ', true),
|
||||||
[ TAINT_TEST ] = { 'N', ' ', true },
|
TAINT_FLAG(TEST, 'N', ' ', true),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#undef TAINT_FLAG
|
||||||
|
|
||||||
|
static void print_tainted_seq(struct seq_buf *s, bool verbose)
|
||||||
|
{
|
||||||
|
const char *sep = "";
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!tainted_mask) {
|
||||||
|
seq_buf_puts(s, "Not tainted");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
seq_buf_printf(s, "Tainted: ");
|
||||||
|
for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
|
||||||
|
const struct taint_flag *t = &taint_flags[i];
|
||||||
|
bool is_set = test_bit(i, &tainted_mask);
|
||||||
|
char c = is_set ? t->c_true : t->c_false;
|
||||||
|
|
||||||
|
if (verbose) {
|
||||||
|
if (is_set) {
|
||||||
|
seq_buf_printf(s, "%s[%c]=%s", sep, c, t->desc);
|
||||||
|
sep = ", ";
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
seq_buf_putc(s, c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char *_print_tainted(bool verbose)
|
||||||
|
{
|
||||||
|
/* FIXME: what should the size be? */
|
||||||
|
static char buf[sizeof(taint_flags)];
|
||||||
|
struct seq_buf s;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);
|
||||||
|
|
||||||
|
seq_buf_init(&s, buf, sizeof(buf));
|
||||||
|
|
||||||
|
print_tainted_seq(&s, verbose);
|
||||||
|
|
||||||
|
return seq_buf_str(&s);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* print_tainted - return a string to represent the kernel taint state.
|
* print_tainted - return a string to represent the kernel taint state.
|
||||||
*
|
*
|
||||||
@ -506,25 +558,15 @@ const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
|
|||||||
*/
|
*/
|
||||||
const char *print_tainted(void)
|
const char *print_tainted(void)
|
||||||
{
|
{
|
||||||
static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")];
|
return _print_tainted(false);
|
||||||
|
}
|
||||||
|
|
||||||
BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);
|
/**
|
||||||
|
* print_tainted_verbose - A more verbose version of print_tainted()
|
||||||
if (tainted_mask) {
|
*/
|
||||||
char *s;
|
const char *print_tainted_verbose(void)
|
||||||
int i;
|
{
|
||||||
|
return _print_tainted(true);
|
||||||
s = buf + sprintf(buf, "Tainted: ");
|
|
||||||
for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
|
|
||||||
const struct taint_flag *t = &taint_flags[i];
|
|
||||||
*s++ = test_bit(i, &tainted_mask) ?
|
|
||||||
t->c_true : t->c_false;
|
|
||||||
}
|
|
||||||
*s = 0;
|
|
||||||
} else
|
|
||||||
snprintf(buf, sizeof(buf), "Not tainted");
|
|
||||||
|
|
||||||
return buf;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int test_taint(unsigned flag)
|
int test_taint(unsigned flag)
|
||||||
|
@ -149,4 +149,5 @@ static struct kunit_suite resource_test_suite = {
|
|||||||
};
|
};
|
||||||
kunit_test_suite(resource_test_suite);
|
kunit_test_suite(resource_test_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("I/O Port & Memory Resource manager unit tests");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -76,7 +76,7 @@ void bacct_add_tsk(struct user_namespace *user_ns,
|
|||||||
stats->ac_minflt = tsk->min_flt;
|
stats->ac_minflt = tsk->min_flt;
|
||||||
stats->ac_majflt = tsk->maj_flt;
|
stats->ac_majflt = tsk->maj_flt;
|
||||||
|
|
||||||
strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm));
|
strscpy_pad(stats->ac_comm, tsk->comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -75,11 +75,15 @@ static bool watchdog_check_timestamp(void)
|
|||||||
__this_cpu_write(last_timestamp, now);
|
__this_cpu_write(last_timestamp, now);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static inline bool watchdog_check_timestamp(void)
|
static void watchdog_init_timestamp(void)
|
||||||
{
|
{
|
||||||
return true;
|
__this_cpu_write(nmi_rearmed, 0);
|
||||||
|
__this_cpu_write(last_timestamp, ktime_get_mono_fast_ns());
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
static inline bool watchdog_check_timestamp(void) { return true; }
|
||||||
|
static inline void watchdog_init_timestamp(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct perf_event_attr wd_hw_attr = {
|
static struct perf_event_attr wd_hw_attr = {
|
||||||
@ -161,6 +165,7 @@ void watchdog_hardlockup_enable(unsigned int cpu)
|
|||||||
if (!atomic_fetch_inc(&watchdog_cpus))
|
if (!atomic_fetch_inc(&watchdog_cpus))
|
||||||
pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
|
pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
|
||||||
|
|
||||||
|
watchdog_init_timestamp();
|
||||||
perf_event_enable(this_cpu_read(watchdog_ev));
|
perf_event_enable(this_cpu_read(watchdog_ev));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1043,7 +1043,9 @@ config PANIC_TIMEOUT
|
|||||||
Set the timeout value (in seconds) until a reboot occurs when
|
Set the timeout value (in seconds) until a reboot occurs when
|
||||||
the kernel panics. If n = 0, then we wait forever. A timeout
|
the kernel panics. If n = 0, then we wait forever. A timeout
|
||||||
value n > 0 will wait n seconds before rebooting, while a timeout
|
value n > 0 will wait n seconds before rebooting, while a timeout
|
||||||
value n < 0 will reboot immediately.
|
value n < 0 will reboot immediately. This setting can be overridden
|
||||||
|
with the kernel command line option panic=, and from userspace via
|
||||||
|
/proc/sys/kernel/panic.
|
||||||
|
|
||||||
config LOCKUP_DETECTOR
|
config LOCKUP_DETECTOR
|
||||||
bool
|
bool
|
||||||
|
@ -449,4 +449,5 @@ asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(asn1_encode_boolean);
|
EXPORT_SYMBOL_GPL(asn1_encode_boolean);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Simple encoder primitives for ASN.1 BER/DER/CER");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -273,4 +273,5 @@ static __exit void test_atomics_exit(void) {}
|
|||||||
module_init(test_atomics_init);
|
module_init(test_atomics_init);
|
||||||
module_exit(test_atomics_exit);
|
module_exit(test_atomics_exit);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Testsuite for atomic64_t functions");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
20
lib/bch.c
20
lib/bch.c
@ -479,11 +479,8 @@ static int solve_linear_system(struct bch_control *bch, unsigned int *rows,
|
|||||||
/* find suitable row for elimination */
|
/* find suitable row for elimination */
|
||||||
for (r = p; r < m; r++) {
|
for (r = p; r < m; r++) {
|
||||||
if (rows[r] & mask) {
|
if (rows[r] & mask) {
|
||||||
if (r != p) {
|
if (r != p)
|
||||||
tmp = rows[r];
|
swap(rows[r], rows[p]);
|
||||||
rows[r] = rows[p];
|
|
||||||
rows[p] = tmp;
|
|
||||||
}
|
|
||||||
rem = r+1;
|
rem = r+1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -799,21 +796,14 @@ static void gf_poly_div(struct bch_control *bch, struct gf_poly *a,
|
|||||||
static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a,
|
static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a,
|
||||||
struct gf_poly *b)
|
struct gf_poly *b)
|
||||||
{
|
{
|
||||||
struct gf_poly *tmp;
|
|
||||||
|
|
||||||
dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b));
|
dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b));
|
||||||
|
|
||||||
if (a->deg < b->deg) {
|
if (a->deg < b->deg)
|
||||||
tmp = b;
|
swap(a, b);
|
||||||
b = a;
|
|
||||||
a = tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (b->deg > 0) {
|
while (b->deg > 0) {
|
||||||
gf_poly_mod(bch, a, b, NULL);
|
gf_poly_mod(bch, a, b, NULL);
|
||||||
tmp = b;
|
swap(a, b);
|
||||||
b = a;
|
|
||||||
a = tmp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dbg("%s\n", gf_poly_str(a));
|
dbg("%s\n", gf_poly_str(a));
|
||||||
|
@ -151,4 +151,5 @@ static struct kunit_suite bitfields_test_suite = {
|
|||||||
kunit_test_suites(&bitfields_test_suite);
|
kunit_test_suites(&bitfields_test_suite);
|
||||||
|
|
||||||
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
|
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
|
||||||
|
MODULE_DESCRIPTION("Test cases for bitfield helpers");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -73,6 +73,13 @@ static int get_build_id_32(const void *page_addr, unsigned char *build_id,
|
|||||||
Elf32_Phdr *phdr;
|
Elf32_Phdr *phdr;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME
|
||||||
|
* Neither ELF spec nor ELF loader require that program headers
|
||||||
|
* start immediately after ELF header.
|
||||||
|
*/
|
||||||
|
if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
|
||||||
|
return -EINVAL;
|
||||||
/* only supports phdr that fits in one page */
|
/* only supports phdr that fits in one page */
|
||||||
if (ehdr->e_phnum >
|
if (ehdr->e_phnum >
|
||||||
(PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
|
(PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
|
||||||
@ -98,6 +105,13 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id,
|
|||||||
Elf64_Phdr *phdr;
|
Elf64_Phdr *phdr;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME
|
||||||
|
* Neither ELF spec nor ELF loader require that program headers
|
||||||
|
* start immediately after ELF header.
|
||||||
|
*/
|
||||||
|
if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
|
||||||
|
return -EINVAL;
|
||||||
/* only supports phdr that fits in one page */
|
/* only supports phdr that fits in one page */
|
||||||
if (ehdr->e_phnum >
|
if (ehdr->e_phnum >
|
||||||
(PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
|
(PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
|
||||||
|
@ -639,4 +639,5 @@ static struct kunit_suite checksum_test_suite = {
|
|||||||
kunit_test_suites(&checksum_test_suite);
|
kunit_test_suites(&checksum_test_suite);
|
||||||
|
|
||||||
MODULE_AUTHOR("Noah Goldstein <goldstein.w.n@gmail.com>");
|
MODULE_AUTHOR("Noah Goldstein <goldstein.w.n@gmail.com>");
|
||||||
|
MODULE_DESCRIPTION("Test cases csum_* APIs");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -153,4 +153,5 @@ static struct kunit_suite cmdline_test_suite = {
|
|||||||
};
|
};
|
||||||
kunit_test_suite(cmdline_test_suite);
|
kunit_test_suite(cmdline_test_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Test cases for API provided by cmdline.c");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -83,4 +83,5 @@ static int __init dhry_init(void)
|
|||||||
module_init(dhry_init);
|
module_init(dhry_init);
|
||||||
|
|
||||||
MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
|
MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
|
||||||
|
MODULE_DESCRIPTION("Dhrystone benchmark test module");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -54,14 +54,19 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
|
|||||||
*/
|
*/
|
||||||
void dump_stack_print_info(const char *log_lvl)
|
void dump_stack_print_info(const char *log_lvl)
|
||||||
{
|
{
|
||||||
printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
|
printk("%sCPU: %d UID: %u PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
|
||||||
log_lvl, raw_smp_processor_id(), current->pid, current->comm,
|
log_lvl, raw_smp_processor_id(),
|
||||||
|
__kuid_val(current_real_cred()->euid),
|
||||||
|
current->pid, current->comm,
|
||||||
kexec_crash_loaded() ? "Kdump: loaded " : "",
|
kexec_crash_loaded() ? "Kdump: loaded " : "",
|
||||||
print_tainted(),
|
print_tainted(),
|
||||||
init_utsname()->release,
|
init_utsname()->release,
|
||||||
(int)strcspn(init_utsname()->version, " "),
|
(int)strcspn(init_utsname()->version, " "),
|
||||||
init_utsname()->version, BUILD_ID_VAL);
|
init_utsname()->version, BUILD_ID_VAL);
|
||||||
|
|
||||||
|
if (get_taint())
|
||||||
|
printk("%s%s\n", log_lvl, print_tainted_verbose());
|
||||||
|
|
||||||
if (dump_stack_arch_desc_str[0] != '\0')
|
if (dump_stack_arch_desc_str[0] != '\0')
|
||||||
printk("%sHardware name: %s\n",
|
printk("%sHardware name: %s\n",
|
||||||
log_lvl, dump_stack_arch_desc_str);
|
log_lvl, dump_stack_arch_desc_str);
|
||||||
|
@ -1093,4 +1093,5 @@ static struct kunit_suite fortify_test_suite = {
|
|||||||
|
|
||||||
kunit_test_suite(fortify_test_suite);
|
kunit_test_suite(fortify_test_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Runtime test cases for CONFIG_FORTIFY_SOURCE");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -314,4 +314,5 @@ static struct kunit_suite hashtable_test_module = {
|
|||||||
|
|
||||||
kunit_test_suites(&hashtable_test_module);
|
kunit_test_suites(&hashtable_test_module);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("KUnit test for the Kernel Hashtable structures");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -46,4 +46,5 @@ static struct kunit_suite is_signed_type_test_suite = {
|
|||||||
|
|
||||||
kunit_test_suite(is_signed_type_test_suite);
|
kunit_test_suite(is_signed_type_test_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("is_signed_type() KUnit test suite");
|
||||||
MODULE_LICENSE("Dual MIT/GPL");
|
MODULE_LICENSE("Dual MIT/GPL");
|
||||||
|
@ -108,4 +108,5 @@ void rational_best_approximation(
|
|||||||
|
|
||||||
EXPORT_SYMBOL(rational_best_approximation);
|
EXPORT_SYMBOL(rational_best_approximation);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Rational fraction support library");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
@ -510,4 +510,5 @@ static struct kunit_suite memcpy_test_suite = {
|
|||||||
|
|
||||||
kunit_test_suite(memcpy_test_suite);
|
kunit_test_suite(memcpy_test_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("test cases for memcpy(), memmove(), and memset()");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -1237,4 +1237,5 @@ static struct kunit_suite overflow_test_suite = {
|
|||||||
|
|
||||||
kunit_test_suite(overflow_test_suite);
|
kunit_test_suite(overflow_test_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Test cases for arithmetic overflow checks");
|
||||||
MODULE_LICENSE("Dual MIT/GPL");
|
MODULE_LICENSE("Dual MIT/GPL");
|
||||||
|
@ -73,17 +73,50 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
|
|||||||
EXPORT_SYMBOL(percpu_counter_set);
|
EXPORT_SYMBOL(percpu_counter_set);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* local_irq_save() is needed to make the function irq safe:
|
* Add to a counter while respecting batch size.
|
||||||
* - The slow path would be ok as protected by an irq-safe spinlock.
|
*
|
||||||
* - this_cpu_add would be ok as it is irq-safe by definition.
|
* There are 2 implementations, both dealing with the following problem:
|
||||||
* But:
|
*
|
||||||
* The decision slow path/fast path and the actual update must be atomic, too.
|
* The decision slow path/fast path and the actual update must be atomic.
|
||||||
* Otherwise a call in process context could check the current values and
|
* Otherwise a call in process context could check the current values and
|
||||||
* decide that the fast path can be used. If now an interrupt occurs before
|
* decide that the fast path can be used. If now an interrupt occurs before
|
||||||
* the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
|
* the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
|
||||||
* then the this_cpu_add() that is executed after the interrupt has completed
|
* then the this_cpu_add() that is executed after the interrupt has completed
|
||||||
* can produce values larger than "batch" or even overflows.
|
* can produce values larger than "batch" or even overflows.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
|
||||||
|
/*
|
||||||
|
* Safety against interrupts is achieved in 2 ways:
|
||||||
|
* 1. the fast path uses local cmpxchg (note: no lock prefix)
|
||||||
|
* 2. the slow path operates with interrupts disabled
|
||||||
|
*/
|
||||||
|
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||||
|
{
|
||||||
|
s64 count;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
count = this_cpu_read(*fbc->counters);
|
||||||
|
do {
|
||||||
|
if (unlikely(abs(count + amount) >= batch)) {
|
||||||
|
raw_spin_lock_irqsave(&fbc->lock, flags);
|
||||||
|
/*
|
||||||
|
* Note: by now we might have migrated to another CPU
|
||||||
|
* or the value might have changed.
|
||||||
|
*/
|
||||||
|
count = __this_cpu_read(*fbc->counters);
|
||||||
|
fbc->count += count + amount;
|
||||||
|
__this_cpu_sub(*fbc->counters, count);
|
||||||
|
raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount));
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* local_irq_save() is used to make the function irq safe:
|
||||||
|
* - The slow path would be ok as protected by an irq-safe spinlock.
|
||||||
|
* - this_cpu_add would be ok as it is irq-safe by definition.
|
||||||
|
*/
|
||||||
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
|
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||||
{
|
{
|
||||||
s64 count;
|
s64 count;
|
||||||
@ -101,6 +134,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
|
|||||||
}
|
}
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
EXPORT_SYMBOL(percpu_counter_add_batch);
|
EXPORT_SYMBOL(percpu_counter_add_batch);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user