linux-next/lib/plist.c

300 lines
7.0 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* lib/plist.c
*
* Descending-priority-sorted double-linked list
*
* (C) 2002-2003 Intel Corp
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
*
* 2001-2005 (c) MontaVista Software, Inc.
* Daniel Walker <dwalker@mvista.com>
*
* (C) 2005 Thomas Gleixner <tglx@linutronix.de>
*
* Simplifications of the original code by
* Oleg Nesterov <oleg@tv-sign.ru>
*
* Based on simple lists (include/linux/list.h).
*
* This file contains the add / del functions which are considered to
* be too large to inline. See include/linux/plist.h for further
* information.
*/
#include <linux/bug.h>
#include <linux/plist.h>
#ifdef CONFIG_DEBUG_PLIST
static struct plist_head test_head;
static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n)
{
WARN(n->prev != p || p->next != n,
"top: %p, n: %p, p: %p\n"
"prev: %p, n: %p, p: %p\n"
"next: %p, n: %p, p: %p\n",
t, t->next, t->prev,
p, p->next, p->prev,
n, n->next, n->prev);
}
static void plist_check_list(struct list_head *top)
{
struct list_head *prev = top, *next = top->next;
plist_check_prev_next(top, prev, next);
while (next != top) {
WRITE_ONCE(prev, next);
WRITE_ONCE(next, prev->next);
plist_check_prev_next(top, prev, next);
}
}
static void plist_check_head(struct plist_head *head)
{
if (!plist_head_empty(head))
plist_check_list(&plist_first(head)->prio_list);
plist_check_list(&head->node_list);
}
#else
# define plist_check_head(h) do { } while (0)
#endif
/**
* plist_add - add @node to @head
*
* @node: &struct plist_node pointer
* @head: &struct plist_head pointer
*/
void plist_add(struct plist_node *node, struct plist_head *head)
{
lib/plist.c: avoid worst case scenario in plist_add Worst case scenario of plist_add() happens when the priority of the inserted plist_node is going to be the largest after the insertion is done. The cost is going to be more significant when the original plist is longer, because the iterator is going to traverse the whole plist to find the correct position to insert the new node. The situation can be avoided by using a reverse iterator at the same time, doing so the maximum possible number of iteration is going to shrink from N to N/2. The proposed change of plist_add pasts the test in lib/plist.c to validate its correctness, also add the worst case scenario test for plist_add() in plist_test(). The worst case test are tested with the size of test_data and test_node growing from 200 to 1000. The result are showned in the following table, in which we can observed that the proposed change of plist_add performs better than the original version, and the difference between these two implementations are more significant with the size of N growing. The random case test [1], and best case test [2] are also provided, with result showing the proposed change performs slightly better in random case test while the original implementation performs slightly better in best case test, while the difference in both test are minor, we can see them as even in those two situations. ----------------------------------------------------------- | Test size | 200 | 400 | 600 | 800 | 1000 | ----------------------------------------------------------- | new_plist_add | 140911| 548681| 1220512| 2048493| 3763755| ----------------------------------------------------------- | old_plist_add | 188198| 774222| 1643547| 3008929| 4947435| ----------------------------------------------------------- Link: https://lkml.kernel.org/r/20240614154603.65203-1-richard120310@gmail.com Signed-off-by: I Hsin Cheng <richard120310@gmail.com> Signed-off-by: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-06-14 15:46:03 +00:00
struct plist_node *first, *iter, *prev = NULL, *last, *reverse_iter;
struct list_head *node_next = &head->node_list;
plist_check_head(head);
WARN_ON(!plist_node_empty(node));
WARN_ON(!list_empty(&node->prio_list));
if (plist_head_empty(head))
goto ins_node;
first = iter = plist_first(head);
lib/plist.c: avoid worst case scenario in plist_add Worst case scenario of plist_add() happens when the priority of the inserted plist_node is going to be the largest after the insertion is done. The cost is going to be more significant when the original plist is longer, because the iterator is going to traverse the whole plist to find the correct position to insert the new node. The situation can be avoided by using a reverse iterator at the same time, doing so the maximum possible number of iteration is going to shrink from N to N/2. The proposed change of plist_add pasts the test in lib/plist.c to validate its correctness, also add the worst case scenario test for plist_add() in plist_test(). The worst case test are tested with the size of test_data and test_node growing from 200 to 1000. The result are showned in the following table, in which we can observed that the proposed change of plist_add performs better than the original version, and the difference between these two implementations are more significant with the size of N growing. The random case test [1], and best case test [2] are also provided, with result showing the proposed change performs slightly better in random case test while the original implementation performs slightly better in best case test, while the difference in both test are minor, we can see them as even in those two situations. ----------------------------------------------------------- | Test size | 200 | 400 | 600 | 800 | 1000 | ----------------------------------------------------------- | new_plist_add | 140911| 548681| 1220512| 2048493| 3763755| ----------------------------------------------------------- | old_plist_add | 188198| 774222| 1643547| 3008929| 4947435| ----------------------------------------------------------- Link: https://lkml.kernel.org/r/20240614154603.65203-1-richard120310@gmail.com Signed-off-by: I Hsin Cheng <richard120310@gmail.com> Signed-off-by: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-06-14 15:46:03 +00:00
last = reverse_iter = list_entry(first->prio_list.prev, struct plist_node, prio_list);
do {
if (node->prio < iter->prio) {
node_next = &iter->node_list;
break;
lib/plist.c: avoid worst case scenario in plist_add Worst case scenario of plist_add() happens when the priority of the inserted plist_node is going to be the largest after the insertion is done. The cost is going to be more significant when the original plist is longer, because the iterator is going to traverse the whole plist to find the correct position to insert the new node. The situation can be avoided by using a reverse iterator at the same time, doing so the maximum possible number of iteration is going to shrink from N to N/2. The proposed change of plist_add pasts the test in lib/plist.c to validate its correctness, also add the worst case scenario test for plist_add() in plist_test(). The worst case test are tested with the size of test_data and test_node growing from 200 to 1000. The result are showned in the following table, in which we can observed that the proposed change of plist_add performs better than the original version, and the difference between these two implementations are more significant with the size of N growing. The random case test [1], and best case test [2] are also provided, with result showing the proposed change performs slightly better in random case test while the original implementation performs slightly better in best case test, while the difference in both test are minor, we can see them as even in those two situations. ----------------------------------------------------------- | Test size | 200 | 400 | 600 | 800 | 1000 | ----------------------------------------------------------- | new_plist_add | 140911| 548681| 1220512| 2048493| 3763755| ----------------------------------------------------------- | old_plist_add | 188198| 774222| 1643547| 3008929| 4947435| ----------------------------------------------------------- Link: https://lkml.kernel.org/r/20240614154603.65203-1-richard120310@gmail.com Signed-off-by: I Hsin Cheng <richard120310@gmail.com> Signed-off-by: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-06-14 15:46:03 +00:00
} else if (node->prio >= reverse_iter->prio) {
prev = reverse_iter;
iter = list_entry(reverse_iter->prio_list.next,
struct plist_node, prio_list);
if (likely(reverse_iter != last))
node_next = &iter->node_list;
break;
}
prev = iter;
iter = list_entry(iter->prio_list.next,
struct plist_node, prio_list);
lib/plist.c: avoid worst case scenario in plist_add Worst case scenario of plist_add() happens when the priority of the inserted plist_node is going to be the largest after the insertion is done. The cost is going to be more significant when the original plist is longer, because the iterator is going to traverse the whole plist to find the correct position to insert the new node. The situation can be avoided by using a reverse iterator at the same time, doing so the maximum possible number of iteration is going to shrink from N to N/2. The proposed change of plist_add pasts the test in lib/plist.c to validate its correctness, also add the worst case scenario test for plist_add() in plist_test(). The worst case test are tested with the size of test_data and test_node growing from 200 to 1000. The result are showned in the following table, in which we can observed that the proposed change of plist_add performs better than the original version, and the difference between these two implementations are more significant with the size of N growing. The random case test [1], and best case test [2] are also provided, with result showing the proposed change performs slightly better in random case test while the original implementation performs slightly better in best case test, while the difference in both test are minor, we can see them as even in those two situations. ----------------------------------------------------------- | Test size | 200 | 400 | 600 | 800 | 1000 | ----------------------------------------------------------- | new_plist_add | 140911| 548681| 1220512| 2048493| 3763755| ----------------------------------------------------------- | old_plist_add | 188198| 774222| 1643547| 3008929| 4947435| ----------------------------------------------------------- Link: https://lkml.kernel.org/r/20240614154603.65203-1-richard120310@gmail.com Signed-off-by: I Hsin Cheng <richard120310@gmail.com> Signed-off-by: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-06-14 15:46:03 +00:00
reverse_iter = list_entry(reverse_iter->prio_list.prev,
struct plist_node, prio_list);
} while (iter != first);
if (!prev || prev->prio != node->prio)
list_add_tail(&node->prio_list, &iter->prio_list);
ins_node:
list_add_tail(&node->node_list, node_next);
plist_check_head(head);
}
/**
* plist_del - Remove a @node from plist.
*
* @node: &struct plist_node pointer - entry to be removed
* @head: &struct plist_head pointer - list head
*/
void plist_del(struct plist_node *node, struct plist_head *head)
{
plist_check_head(head);
if (!list_empty(&node->prio_list)) {
if (node->node_list.next != &head->node_list) {
struct plist_node *next;
next = list_entry(node->node_list.next,
struct plist_node, node_list);
/* add the next plist_node into prio_list */
if (list_empty(&next->prio_list))
list_add(&next->prio_list, &node->prio_list);
}
list_del_init(&node->prio_list);
}
list_del_init(&node->node_list);
plist_check_head(head);
}
lib/plist: add plist_requeue Add plist_requeue(), which moves the specified plist_node after all other same-priority plist_nodes in the list. This is essentially an optimized plist_del() followed by plist_add(). This is needed by swap, which (with the next patch in this set) uses a plist of available swap devices. When a swap device (either a swap partition or swap file) are added to the system with swapon(), the device is added to a plist, ordered by the swap device's priority. When swap needs to allocate a page from one of the swap devices, it takes the page from the first swap device on the plist, which is the highest priority swap device. The swap device is left in the plist until all its pages are used, and then removed from the plist when it becomes full. However, as described in man 2 swapon, swap must allocate pages from swap devices with the same priority in round-robin order; to do this, on each swap page allocation, swap uses a page from the first swap device in the plist, and then calls plist_requeue() to move that swap device entry to after any other same-priority swap devices. The next swap page allocation will again use a page from the first swap device in the plist and requeue it, and so on, resulting in round-robin usage of equal-priority swap devices. Also add plist_test_requeue() test function, for use by plist_test() to test plist_requeue() function. Signed-off-by: Dan Streetman <ddstreet@ieee.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Shaohua Li <shli@fusionio.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> Cc: Weijie Yang <weijieut@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Bob Liu <bob.liu@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-04 23:09:57 +00:00
/**
* plist_requeue - Requeue @node at end of same-prio entries.
*
* This is essentially an optimized plist_del() followed by
* plist_add(). It moves an entry already in the plist to
* after any other same-priority entries.
*
* @node: &struct plist_node pointer - entry to be moved
* @head: &struct plist_head pointer - list head
*/
void plist_requeue(struct plist_node *node, struct plist_head *head)
{
struct plist_node *iter;
struct list_head *node_next = &head->node_list;
plist_check_head(head);
BUG_ON(plist_head_empty(head));
BUG_ON(plist_node_empty(node));
if (node == plist_last(head))
return;
iter = plist_next(node);
if (node->prio != iter->prio)
return;
plist_del(node, head);
plist_for_each_continue(iter, head) {
if (node->prio != iter->prio) {
node_next = &iter->node_list;
break;
}
}
list_add_tail(&node->node_list, node_next);
plist_check_head(head);
}
#ifdef CONFIG_DEBUG_PLIST
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/module.h>
#include <linux/init.h>
static struct plist_node __initdata test_node[241];
static void __init plist_test_check(int nr_expect)
{
struct plist_node *first, *prio_pos, *node_pos;
if (plist_head_empty(&test_head)) {
BUG_ON(nr_expect != 0);
return;
}
prio_pos = first = plist_first(&test_head);
plist_for_each(node_pos, &test_head) {
if (nr_expect-- < 0)
break;
if (node_pos == first)
continue;
if (node_pos->prio == prio_pos->prio) {
BUG_ON(!list_empty(&node_pos->prio_list));
continue;
}
BUG_ON(prio_pos->prio > node_pos->prio);
BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list);
prio_pos = node_pos;
}
BUG_ON(nr_expect != 0);
BUG_ON(prio_pos->prio_list.next != &first->prio_list);
}
lib/plist: add plist_requeue Add plist_requeue(), which moves the specified plist_node after all other same-priority plist_nodes in the list. This is essentially an optimized plist_del() followed by plist_add(). This is needed by swap, which (with the next patch in this set) uses a plist of available swap devices. When a swap device (either a swap partition or swap file) are added to the system with swapon(), the device is added to a plist, ordered by the swap device's priority. When swap needs to allocate a page from one of the swap devices, it takes the page from the first swap device on the plist, which is the highest priority swap device. The swap device is left in the plist until all its pages are used, and then removed from the plist when it becomes full. However, as described in man 2 swapon, swap must allocate pages from swap devices with the same priority in round-robin order; to do this, on each swap page allocation, swap uses a page from the first swap device in the plist, and then calls plist_requeue() to move that swap device entry to after any other same-priority swap devices. The next swap page allocation will again use a page from the first swap device in the plist and requeue it, and so on, resulting in round-robin usage of equal-priority swap devices. Also add plist_test_requeue() test function, for use by plist_test() to test plist_requeue() function. Signed-off-by: Dan Streetman <ddstreet@ieee.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Shaohua Li <shli@fusionio.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> Cc: Weijie Yang <weijieut@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Bob Liu <bob.liu@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-04 23:09:57 +00:00
static void __init plist_test_requeue(struct plist_node *node)
{
plist_requeue(node, &test_head);
if (node != plist_last(&test_head))
BUG_ON(node->prio == plist_next(node)->prio);
}
static int __init plist_test(void)
{
int nr_expect = 0, i, loop;
unsigned int r = local_clock();
printk(KERN_DEBUG "start plist test\n");
plist_head_init(&test_head);
for (i = 0; i < ARRAY_SIZE(test_node); i++)
plist_node_init(test_node + i, 0);
for (loop = 0; loop < 1000; loop++) {
r = r * 193939 % 47629;
i = r % ARRAY_SIZE(test_node);
if (plist_node_empty(test_node + i)) {
r = r * 193939 % 47629;
test_node[i].prio = r % 99;
plist_add(test_node + i, &test_head);
nr_expect++;
} else {
plist_del(test_node + i, &test_head);
nr_expect--;
}
plist_test_check(nr_expect);
lib/plist: add plist_requeue Add plist_requeue(), which moves the specified plist_node after all other same-priority plist_nodes in the list. This is essentially an optimized plist_del() followed by plist_add(). This is needed by swap, which (with the next patch in this set) uses a plist of available swap devices. When a swap device (either a swap partition or swap file) are added to the system with swapon(), the device is added to a plist, ordered by the swap device's priority. When swap needs to allocate a page from one of the swap devices, it takes the page from the first swap device on the plist, which is the highest priority swap device. The swap device is left in the plist until all its pages are used, and then removed from the plist when it becomes full. However, as described in man 2 swapon, swap must allocate pages from swap devices with the same priority in round-robin order; to do this, on each swap page allocation, swap uses a page from the first swap device in the plist, and then calls plist_requeue() to move that swap device entry to after any other same-priority swap devices. The next swap page allocation will again use a page from the first swap device in the plist and requeue it, and so on, resulting in round-robin usage of equal-priority swap devices. Also add plist_test_requeue() test function, for use by plist_test() to test plist_requeue() function. Signed-off-by: Dan Streetman <ddstreet@ieee.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Shaohua Li <shli@fusionio.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> Cc: Weijie Yang <weijieut@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Bob Liu <bob.liu@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-04 23:09:57 +00:00
if (!plist_node_empty(test_node + i)) {
plist_test_requeue(test_node + i);
plist_test_check(nr_expect);
}
}
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
if (plist_node_empty(test_node + i))
continue;
plist_del(test_node + i, &test_head);
nr_expect--;
plist_test_check(nr_expect);
}
printk(KERN_DEBUG "end plist test\n");
lib/plist.c: avoid worst case scenario in plist_add Worst case scenario of plist_add() happens when the priority of the inserted plist_node is going to be the largest after the insertion is done. The cost is going to be more significant when the original plist is longer, because the iterator is going to traverse the whole plist to find the correct position to insert the new node. The situation can be avoided by using a reverse iterator at the same time, doing so the maximum possible number of iteration is going to shrink from N to N/2. The proposed change of plist_add pasts the test in lib/plist.c to validate its correctness, also add the worst case scenario test for plist_add() in plist_test(). The worst case test are tested with the size of test_data and test_node growing from 200 to 1000. The result are showned in the following table, in which we can observed that the proposed change of plist_add performs better than the original version, and the difference between these two implementations are more significant with the size of N growing. The random case test [1], and best case test [2] are also provided, with result showing the proposed change performs slightly better in random case test while the original implementation performs slightly better in best case test, while the difference in both test are minor, we can see them as even in those two situations. ----------------------------------------------------------- | Test size | 200 | 400 | 600 | 800 | 1000 | ----------------------------------------------------------- | new_plist_add | 140911| 548681| 1220512| 2048493| 3763755| ----------------------------------------------------------- | old_plist_add | 188198| 774222| 1643547| 3008929| 4947435| ----------------------------------------------------------- Link: https://lkml.kernel.org/r/20240614154603.65203-1-richard120310@gmail.com Signed-off-by: I Hsin Cheng <richard120310@gmail.com> Signed-off-by: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-06-14 15:46:03 +00:00
/* Worst case test for plist_add() */
unsigned int test_data[241];
for (i = 0; i < ARRAY_SIZE(test_data); i++)
test_data[i] = i;
ktime_t start, end, time_elapsed = 0;
plist_head_init(&test_head);
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
plist_node_init(test_node + i, 0);
test_node[i].prio = test_data[i];
}
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
if (plist_node_empty(test_node + i)) {
start = ktime_get();
plist_add(test_node + i, &test_head);
end = ktime_get();
time_elapsed += (end - start);
}
}
pr_debug("plist_add worst case test time elapsed %lld\n", time_elapsed);
return 0;
}
module_init(plist_test);
#endif