2014-08-06 17:09:46 -06:00
|
|
|
|
|
|
|
#define pr_fmt(fmt) "list_sort_test: " fmt
|
|
|
|
|
2010-01-11 23:39:16 -07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/list_sort.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
#define MAX_LIST_LENGTH_BITS 20
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns a list organized in an intermediate format suited
|
|
|
|
* to chaining of merge() calls: null-terminated, no reserved or
|
|
|
|
* sentinel head node, "prev" links not maintained.
|
|
|
|
*/
|
|
|
|
static struct list_head *merge(void *priv,
|
|
|
|
int (*cmp)(void *priv, struct list_head *a,
|
|
|
|
struct list_head *b),
|
|
|
|
struct list_head *a, struct list_head *b)
|
|
|
|
{
|
|
|
|
struct list_head head, *tail = &head;
|
|
|
|
|
|
|
|
while (a && b) {
|
|
|
|
/* if equal, take 'a' -- important for sort stability */
|
|
|
|
if ((*cmp)(priv, a, b) <= 0) {
|
|
|
|
tail->next = a;
|
|
|
|
a = a->next;
|
|
|
|
} else {
|
|
|
|
tail->next = b;
|
|
|
|
b = b->next;
|
|
|
|
}
|
|
|
|
tail = tail->next;
|
|
|
|
}
|
|
|
|
tail->next = a?:b;
|
|
|
|
return head.next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Combine final list merge with restoration of standard doubly-linked
|
|
|
|
* list structure. This approach duplicates code from merge(), but
|
|
|
|
* runs faster than the tidier alternatives of either a separate final
|
|
|
|
* prev-link restoration pass, or maintaining the prev links
|
|
|
|
* throughout.
|
|
|
|
*/
|
|
|
|
static void merge_and_restore_back_links(void *priv,
|
|
|
|
int (*cmp)(void *priv, struct list_head *a,
|
|
|
|
struct list_head *b),
|
|
|
|
struct list_head *head,
|
|
|
|
struct list_head *a, struct list_head *b)
|
|
|
|
{
|
|
|
|
struct list_head *tail = head;
|
2014-08-06 17:09:44 -06:00
|
|
|
u8 count = 0;
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
|
|
|
|
while (a && b) {
|
|
|
|
/* if equal, take 'a' -- important for sort stability */
|
|
|
|
if ((*cmp)(priv, a, b) <= 0) {
|
|
|
|
tail->next = a;
|
|
|
|
a->prev = tail;
|
|
|
|
a = a->next;
|
|
|
|
} else {
|
|
|
|
tail->next = b;
|
|
|
|
b->prev = tail;
|
|
|
|
b = b->next;
|
|
|
|
}
|
|
|
|
tail = tail->next;
|
|
|
|
}
|
|
|
|
tail->next = a ? : b;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* In worst cases this loop may run many iterations.
|
|
|
|
* Continue callbacks to the client even though no
|
|
|
|
* element comparison is needed, so the client's cmp()
|
|
|
|
* routine can invoke cond_resched() periodically.
|
|
|
|
*/
|
2014-08-06 17:09:44 -06:00
|
|
|
if (unlikely(!(++count)))
|
|
|
|
(*cmp)(priv, tail->next, tail->next);
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
|
|
|
|
tail->next->prev = tail;
|
|
|
|
tail = tail->next;
|
|
|
|
} while (tail->next);
|
|
|
|
|
|
|
|
tail->next = head;
|
|
|
|
head->prev = tail;
|
|
|
|
}
|
|
|
|
|
2010-01-11 23:39:16 -07:00
|
|
|
/**
|
2010-03-05 14:43:15 -07:00
|
|
|
* list_sort - sort a list
|
|
|
|
* @priv: private data, opaque to list_sort(), passed to @cmp
|
2010-01-11 23:39:16 -07:00
|
|
|
* @head: the list to sort
|
|
|
|
* @cmp: the elements comparison function
|
|
|
|
*
|
2010-03-05 14:43:15 -07:00
|
|
|
* This function implements "merge sort", which has O(nlog(n))
|
|
|
|
* complexity.
|
2010-01-11 23:39:16 -07:00
|
|
|
*
|
2010-03-05 14:43:15 -07:00
|
|
|
* The comparison function @cmp must return a negative value if @a
|
|
|
|
* should sort before @b, and a positive value if @a should sort after
|
|
|
|
* @b. If @a and @b are equivalent, and their original relative
|
|
|
|
* ordering is to be preserved, @cmp must return 0.
|
2010-01-11 23:39:16 -07:00
|
|
|
*/
|
|
|
|
void list_sort(void *priv, struct list_head *head,
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
int (*cmp)(void *priv, struct list_head *a,
|
|
|
|
struct list_head *b))
|
2010-01-11 23:39:16 -07:00
|
|
|
{
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
|
|
|
|
-- last slot is a sentinel */
|
|
|
|
int lev; /* index into part[] */
|
|
|
|
int max_lev = 0;
|
|
|
|
struct list_head *list;
|
2010-01-11 23:39:16 -07:00
|
|
|
|
|
|
|
if (list_empty(head))
|
|
|
|
return;
|
|
|
|
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
memset(part, 0, sizeof(part));
|
|
|
|
|
|
|
|
head->prev->next = NULL;
|
2010-01-11 23:39:16 -07:00
|
|
|
list = head->next;
|
|
|
|
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
while (list) {
|
|
|
|
struct list_head *cur = list;
|
|
|
|
list = list->next;
|
|
|
|
cur->next = NULL;
|
|
|
|
|
|
|
|
for (lev = 0; part[lev]; lev++) {
|
|
|
|
cur = merge(priv, cmp, part[lev], cur);
|
|
|
|
part[lev] = NULL;
|
|
|
|
}
|
|
|
|
if (lev > max_lev) {
|
|
|
|
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
|
2014-08-06 17:09:46 -06:00
|
|
|
printk_once(KERN_DEBUG "list too long for efficiency\n");
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
lev--;
|
2010-01-11 23:39:16 -07:00
|
|
|
}
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
max_lev = lev;
|
2010-01-11 23:39:16 -07:00
|
|
|
}
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
part[lev] = cur;
|
|
|
|
}
|
2010-01-11 23:39:16 -07:00
|
|
|
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
for (lev = 0; lev < max_lev; lev++)
|
|
|
|
if (part[lev])
|
|
|
|
list = merge(priv, cmp, part[lev], list);
|
2010-01-11 23:39:16 -07:00
|
|
|
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(list_sort);
|
2010-01-11 23:39:16 -07:00
|
|
|
|
2010-10-26 15:23:05 -06:00
|
|
|
#ifdef CONFIG_TEST_LIST_SORT
|
2010-10-26 15:23:06 -06:00
|
|
|
|
|
|
|
#include <linux/random.h>
|
|
|
|
|
2010-10-26 15:23:08 -06:00
|
|
|
/*
|
|
|
|
* The pattern of set bits in the list length determines which cases
|
|
|
|
* are hit in list_sort().
|
|
|
|
*/
|
|
|
|
#define TEST_LIST_LEN (512+128+2) /* not including head */
|
|
|
|
|
|
|
|
#define TEST_POISON1 0xDEADBEEF
|
|
|
|
#define TEST_POISON2 0xA324354C
|
|
|
|
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
struct debug_el {
|
2010-10-26 15:23:08 -06:00
|
|
|
unsigned int poison1;
|
2010-10-26 15:23:06 -06:00
|
|
|
struct list_head list;
|
2010-10-26 15:23:08 -06:00
|
|
|
unsigned int poison2;
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
int value;
|
|
|
|
unsigned serial;
|
|
|
|
};
|
2010-01-11 23:39:16 -07:00
|
|
|
|
2010-10-26 15:23:08 -06:00
|
|
|
/* Array, containing pointers to all elements in the test list */
|
|
|
|
static struct debug_el **elts __initdata;
|
|
|
|
|
|
|
|
static int __init check(struct debug_el *ela, struct debug_el *elb)
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
{
|
2010-10-26 15:23:08 -06:00
|
|
|
if (ela->serial >= TEST_LIST_LEN) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: incorrect serial %d\n", ela->serial);
|
2010-10-26 15:23:08 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (elb->serial >= TEST_LIST_LEN) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: incorrect serial %d\n", elb->serial);
|
2010-10-26 15:23:08 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: phantom element\n");
|
2010-10-26 15:23:08 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: bad poison: %#x/%#x\n",
|
|
|
|
ela->poison1, ela->poison2);
|
2010-10-26 15:23:08 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: bad poison: %#x/%#x\n",
|
|
|
|
elb->poison1, elb->poison2);
|
2010-10-26 15:23:08 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
2010-01-11 23:39:16 -07:00
|
|
|
}
|
|
|
|
|
2010-10-26 15:23:08 -06:00
|
|
|
static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
|
|
|
|
{
|
|
|
|
struct debug_el *ela, *elb;
|
|
|
|
|
|
|
|
ela = container_of(a, struct debug_el, list);
|
|
|
|
elb = container_of(b, struct debug_el, list);
|
|
|
|
|
|
|
|
check(ela, elb);
|
|
|
|
return ela->value - elb->value;
|
|
|
|
}
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
|
|
|
|
static int __init list_sort_test(void)
|
|
|
|
{
|
2014-08-06 17:09:38 -06:00
|
|
|
int i, count = 1, err = -ENOMEM;
|
2010-10-26 15:23:07 -06:00
|
|
|
struct debug_el *el;
|
2014-08-06 17:09:42 -06:00
|
|
|
struct list_head *cur;
|
2010-10-26 15:23:07 -06:00
|
|
|
LIST_HEAD(head);
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_debug("start testing list_sort()\n");
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
|
2014-08-06 17:09:42 -06:00
|
|
|
elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
|
2010-10-26 15:23:08 -06:00
|
|
|
if (!elts) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: cannot allocate memory\n");
|
2014-08-06 17:09:42 -06:00
|
|
|
return err;
|
2010-10-26 15:23:08 -06:00
|
|
|
}
|
|
|
|
|
2010-10-26 15:23:06 -06:00
|
|
|
for (i = 0; i < TEST_LIST_LEN; i++) {
|
2010-10-26 15:23:07 -06:00
|
|
|
el = kmalloc(sizeof(*el), GFP_KERNEL);
|
|
|
|
if (!el) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: cannot allocate memory\n");
|
2010-10-26 15:23:07 -06:00
|
|
|
goto exit;
|
|
|
|
}
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
/* force some equivalencies */
|
2013-04-29 17:21:28 -06:00
|
|
|
el->value = prandom_u32() % (TEST_LIST_LEN / 3);
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
el->serial = i;
|
2010-10-26 15:23:08 -06:00
|
|
|
el->poison1 = TEST_POISON1;
|
|
|
|
el->poison2 = TEST_POISON2;
|
|
|
|
elts[i] = el;
|
2010-10-26 15:23:07 -06:00
|
|
|
list_add_tail(&el->list, &head);
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
}
|
|
|
|
|
2010-10-26 15:23:07 -06:00
|
|
|
list_sort(NULL, &head, cmp);
|
|
|
|
|
2014-08-06 17:09:38 -06:00
|
|
|
err = -EINVAL;
|
2010-10-26 15:23:07 -06:00
|
|
|
for (cur = head.next; cur->next != &head; cur = cur->next) {
|
|
|
|
struct debug_el *el1;
|
|
|
|
int cmp_result;
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
|
|
|
|
if (cur->next->prev != cur) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: list is corrupted\n");
|
2010-10-26 15:23:07 -06:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmp_result = cmp(NULL, cur, cur->next);
|
|
|
|
if (cmp_result > 0) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: list is not sorted\n");
|
2010-10-26 15:23:07 -06:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
el = container_of(cur, struct debug_el, list);
|
|
|
|
el1 = container_of(cur->next, struct debug_el, list);
|
|
|
|
if (cmp_result == 0 && el->serial >= el1->serial) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: order of equivalent elements not "
|
|
|
|
"preserved\n");
|
2010-10-26 15:23:07 -06:00
|
|
|
goto exit;
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
}
|
2010-10-26 15:23:08 -06:00
|
|
|
|
|
|
|
if (check(el, el1)) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: element check failed\n");
|
2010-10-26 15:23:08 -06:00
|
|
|
goto exit;
|
|
|
|
}
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
count++;
|
|
|
|
}
|
2014-08-06 17:09:40 -06:00
|
|
|
if (head.prev != cur) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: list is corrupted\n");
|
2014-08-06 17:09:40 -06:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2010-10-26 15:23:07 -06:00
|
|
|
|
2010-10-26 15:23:06 -06:00
|
|
|
if (count != TEST_LIST_LEN) {
|
2014-08-06 17:09:46 -06:00
|
|
|
pr_err("error: bad list length %d", count);
|
2010-10-26 15:23:07 -06:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
exit:
|
2014-08-06 17:09:42 -06:00
|
|
|
for (i = 0; i < TEST_LIST_LEN; i++)
|
|
|
|
kfree(elts[i]);
|
2010-10-26 15:23:08 -06:00
|
|
|
kfree(elts);
|
2010-10-26 15:23:07 -06:00
|
|
|
return err;
|
lib: more scalable list_sort()
XFS and UBIFS can pass long lists to list_sort(); this alternative
implementation scales better, reaching ~3x performance gain when list
length exceeds the L2 cache size.
Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB,
gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is
581 bytes compared to 455 for Mark J. Roberts' code.
Worst case for either implementation is a list length just over a power of
two, and to roughly the same degree, so here are timing results for a
range of 2^N+1 lengths. List elements were 16 bytes each including malloc
overhead; initial order was random.
time (msec)
Tatham-Roberts
| generic-Mullis-v2
loop_count length | | ratio
4000000 2 206 294 1.427
2000000 3 176 227 1.289
1000000 5 199 172 0.864
500000 9 235 178 0.757
250000 17 243 182 0.748
125000 33 261 196 0.750
62500 65 277 209 0.754
31250 129 292 219 0.75
15625 257 317 235 0.741
7812 513 340 252 0.741
3906 1025 362 267 0.737
1953 2049 388 283 0.729 ~ L1 size
976 4097 556 323 0.580
488 8193 678 361 0.532
244 16385 773 395 0.510
122 32769 844 418 0.495
61 65537 917 454 0.495
30 131073 1128 543 0.481
15 262145 2355 869 0.369 ~ L2 size
7 524289 5597 1714 0.306
3 1048577 6218 2022 0.325
Mark's code does not actually implement the usual or generic mergesort,
but rather a variant from Simon Tatham described here:
http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
Simon's algorithm performs O(log N) passes over the entire input list,
doing merges of sublists that double in size on each pass. The generic
algorithm instead merges pairs of equal length lists as early as possible,
in recursive order. For either algorithm, the elements that extend the
list beyond power-of-two length are a special case, handled as nearly as
possible as a "rounding-up" to a full POT.
Some intuition for the locality of reference implications of merge order
may be gotten by watching this animation:
http://www.sorting-algorithms.com/merge-sort
Simon's algorithm requires only O(1) extra space rather than the generic
algorithm's O(log N), but in my non-recursive implementation the actual
O(log N) data is merely a vector of ~20 pointers, which I've put on the
stack.
Long-running list_sort() calls: If the list passed in may be long, or the
client's cmp() callback function is slow, the client's cmp() may
periodically invoke cond_resched() to voluntarily yield the CPU. All
inner loops of list_sort() call back to cmp().
Stability of the sort: distinct elements that compare equal emerge from
the sort in the same order as with Mark's code, for simple test cases. A
boot-time test is provided to verify this and other correctness
requirements.
A kernel that uses drm.ko appears to run normally with this change; I have
no suitable hardware to similarly test the use by UBIFS.
[akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init]
Signed-off-by: Don Mullis <don.mullis@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 14:43:15 -07:00
|
|
|
}
|
|
|
|
module_init(list_sort_test);
|
2010-10-26 15:23:05 -06:00
|
|
|
#endif /* CONFIG_TEST_LIST_SORT */
|