2009-07-29 16:04:18 -06:00
|
|
|
/*
|
|
|
|
* Flexible array managed in PAGE_SIZE parts
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
*
|
|
|
|
* Copyright IBM Corporation, 2009
|
|
|
|
*
|
|
|
|
* Author: Dave Hansen <dave@linux.vnet.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/flex_array.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/stddef.h>
|
2011-11-16 19:29:17 -07:00
|
|
|
#include <linux/export.h>
|
2011-05-26 17:25:02 -06:00
|
|
|
#include <linux/reciprocal_div.h>
|
2009-07-29 16:04:18 -06:00
|
|
|
|
|
|
|
struct flex_array_part {
|
|
|
|
char elements[FLEX_ARRAY_PART_SIZE];
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a user requests an allocation which is small
|
|
|
|
* enough, we may simply use the space in the
|
|
|
|
* flex_array->parts[] array to store the user
|
|
|
|
* data.
|
|
|
|
*/
|
|
|
|
static inline int elements_fit_in_base(struct flex_array *fa)
|
|
|
|
{
|
|
|
|
int data_size = fa->element_size * fa->total_nr_elements;
|
2009-09-21 18:04:33 -06:00
|
|
|
if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
|
2009-07-29 16:04:18 -06:00
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flex_array_alloc - allocate a new flexible array
|
|
|
|
* @element_size: the size of individual elements in the array
|
|
|
|
* @total: total number of elements that this should hold
|
2009-09-21 18:04:33 -06:00
|
|
|
* @flags: page allocation flags to use for base array
|
2009-07-29 16:04:18 -06:00
|
|
|
*
|
|
|
|
* Note: all locking must be provided by the caller.
|
|
|
|
*
|
|
|
|
* @total is used to size internal structures. If the user ever
|
|
|
|
* accesses any array indexes >=@total, it will produce errors.
|
|
|
|
*
|
|
|
|
* The maximum number of elements is defined as: the number of
|
|
|
|
* elements that can be stored in a page times the number of
|
|
|
|
* page pointers that we can fit in the base structure or (using
|
|
|
|
* integer math):
|
|
|
|
*
|
|
|
|
* (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
|
|
|
|
*
|
|
|
|
* Here's a table showing example capacities. Note that the maximum
|
|
|
|
* index that the get/put() functions is just nr_objects-1. This
|
|
|
|
* basically means that you get 4MB of storage on 32-bit and 2MB on
|
|
|
|
* 64-bit.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Element size | Objects | Objects |
|
|
|
|
* PAGE_SIZE=4k | 32-bit | 64-bit |
|
|
|
|
* ---------------------------------|
|
2011-05-26 17:25:02 -06:00
|
|
|
* 1 bytes | 4177920 | 2088960 |
|
|
|
|
* 2 bytes | 2088960 | 1044480 |
|
|
|
|
* 3 bytes | 1392300 | 696150 |
|
|
|
|
* 4 bytes | 1044480 | 522240 |
|
|
|
|
* 32 bytes | 130560 | 65408 |
|
|
|
|
* 33 bytes | 126480 | 63240 |
|
|
|
|
* 2048 bytes | 2040 | 1020 |
|
|
|
|
* 2049 bytes | 1020 | 510 |
|
|
|
|
* void * | 1044480 | 261120 |
|
2009-07-29 16:04:18 -06:00
|
|
|
*
|
|
|
|
* Since 64-bit pointers are twice the size, we lose half the
|
|
|
|
* capacity in the base structure. Also note that no effort is made
|
|
|
|
* to efficiently pack objects across page boundaries.
|
|
|
|
*/
|
2009-08-26 15:29:22 -06:00
|
|
|
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
|
|
|
|
gfp_t flags)
|
2009-07-29 16:04:18 -06:00
|
|
|
{
|
|
|
|
struct flex_array *ret;
|
2011-05-26 17:25:02 -06:00
|
|
|
int elems_per_part = 0;
|
2011-04-28 13:55:52 -06:00
|
|
|
int max_size = 0;
|
reciprocal_divide: update/correction of the algorithm
Jakub Zawadzki noticed that some divisions by reciprocal_divide()
were not correct [1][2], which he could also show with BPF code
after divisions are transformed into reciprocal_value() for runtime
invariance which can be passed to reciprocal_divide() later on;
reverse in BPF dump ended up with a different, off-by-one K in
some situations.
This has been fixed by Eric Dumazet in commit aee636c4809fa5
("bpf: do not use reciprocal divide"). This follow-up patch
improves reciprocal_value() and reciprocal_divide() to work in
all cases by using Granlund and Montgomery method, so that also
future use is safe and without any non-obvious side-effects.
Known problems with the old implementation were that division by 1
always returned 0 and some off-by-ones when the dividend and divisor
where very large. This seemed to not be problematic with its
current users, as far as we can tell. Eric Dumazet checked for
the slab usage, we cannot surely say so in the case of flex_array.
Still, in order to fix that, we propose an extension from the
original implementation from commit 6a2d7a955d8d resp. [3][4],
by using the algorithm proposed in "Division by Invariant Integers
Using Multiplication" [5], Torbjörn Granlund and Peter L.
Montgomery, that is, pseudocode for q = n/d where q, n, d is in
u32 universe:
1) Initialization:
int l = ceil(log_2 d)
uword m' = floor((1<<32)*((1<<l)-d)/d)+1
int sh_1 = min(l,1)
int sh_2 = max(l-1,0)
2) For q = n/d, all uword:
uword t = (n*m')>>32
q = (t+((n-t)>>sh_1))>>sh_2
The assembler implementation from Agner Fog [6] also helped a lot
while implementing. We have tested the implementation on x86_64,
ppc64, i686, s390x; on x86_64/haswell we're still half the latency
compared to normal divide.
Joint work with Daniel Borkmann.
[1] http://www.wireshark.org/~darkjames/reciprocal-buggy.c
[2] http://www.wireshark.org/~darkjames/set-and-dump-filter-k-bug.c
[3] https://gmplib.org/~tege/division-paper.pdf
[4] http://homepage.cs.uiowa.edu/~jones/bcd/divide.html
[5] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556
[6] http://www.agner.org/optimize/asmlib.zip
Reported-by: Jakub Zawadzki <darkjames-ws@darkjames.pl>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Austin S Hemmelgarn <ahferroin7@gmail.com>
Cc: linux-kernel@vger.kernel.org
Cc: Jesse Gross <jesse@nicira.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Andy Gospodarek <andy@greyhouse.net>
Cc: Veaceslav Falico <vfalico@redhat.com>
Cc: Jay Vosburgh <fubar@us.ibm.com>
Cc: Jakub Zawadzki <darkjames-ws@darkjames.pl>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-01-21 18:29:41 -07:00
|
|
|
struct reciprocal_value reciprocal_elems = { 0 };
|
2011-04-28 13:55:52 -06:00
|
|
|
|
2011-05-26 17:25:02 -06:00
|
|
|
if (element_size) {
|
|
|
|
elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
|
|
|
|
reciprocal_elems = reciprocal_value(elems_per_part);
|
|
|
|
max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
|
|
|
|
}
|
2009-07-29 16:04:18 -06:00
|
|
|
|
|
|
|
/* max_size will end up 0 if element_size > PAGE_SIZE */
|
|
|
|
if (total > max_size)
|
|
|
|
return NULL;
|
|
|
|
ret = kzalloc(sizeof(struct flex_array), flags);
|
|
|
|
if (!ret)
|
|
|
|
return NULL;
|
|
|
|
ret->element_size = element_size;
|
|
|
|
ret->total_nr_elements = total;
|
2011-05-26 17:25:02 -06:00
|
|
|
ret->elems_per_part = elems_per_part;
|
|
|
|
ret->reciprocal_elems = reciprocal_elems;
|
2009-09-21 18:04:31 -06:00
|
|
|
if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
|
2010-04-23 11:17:45 -06:00
|
|
|
memset(&ret->parts[0], FLEX_ARRAY_FREE,
|
2009-09-21 18:04:33 -06:00
|
|
|
FLEX_ARRAY_BASE_BYTES_LEFT);
|
2009-07-29 16:04:18 -06:00
|
|
|
return ret;
|
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_alloc);
|
2009-07-29 16:04:18 -06:00
|
|
|
|
2009-08-26 15:29:22 -06:00
|
|
|
static int fa_element_to_part_nr(struct flex_array *fa,
|
|
|
|
unsigned int element_nr)
|
2009-07-29 16:04:18 -06:00
|
|
|
{
|
reciprocal_divide: update/correction of the algorithm
Jakub Zawadzki noticed that some divisions by reciprocal_divide()
were not correct [1][2], which he could also show with BPF code
after divisions are transformed into reciprocal_value() for runtime
invariance which can be passed to reciprocal_divide() later on;
reverse in BPF dump ended up with a different, off-by-one K in
some situations.
This has been fixed by Eric Dumazet in commit aee636c4809fa5
("bpf: do not use reciprocal divide"). This follow-up patch
improves reciprocal_value() and reciprocal_divide() to work in
all cases by using Granlund and Montgomery method, so that also
future use is safe and without any non-obvious side-effects.
Known problems with the old implementation were that division by 1
always returned 0 and some off-by-ones when the dividend and divisor
where very large. This seemed to not be problematic with its
current users, as far as we can tell. Eric Dumazet checked for
the slab usage, we cannot surely say so in the case of flex_array.
Still, in order to fix that, we propose an extension from the
original implementation from commit 6a2d7a955d8d resp. [3][4],
by using the algorithm proposed in "Division by Invariant Integers
Using Multiplication" [5], Torbjörn Granlund and Peter L.
Montgomery, that is, pseudocode for q = n/d where q, n, d is in
u32 universe:
1) Initialization:
int l = ceil(log_2 d)
uword m' = floor((1<<32)*((1<<l)-d)/d)+1
int sh_1 = min(l,1)
int sh_2 = max(l-1,0)
2) For q = n/d, all uword:
uword t = (n*m')>>32
q = (t+((n-t)>>sh_1))>>sh_2
The assembler implementation from Agner Fog [6] also helped a lot
while implementing. We have tested the implementation on x86_64,
ppc64, i686, s390x; on x86_64/haswell we're still half the latency
compared to normal divide.
Joint work with Daniel Borkmann.
[1] http://www.wireshark.org/~darkjames/reciprocal-buggy.c
[2] http://www.wireshark.org/~darkjames/set-and-dump-filter-k-bug.c
[3] https://gmplib.org/~tege/division-paper.pdf
[4] http://homepage.cs.uiowa.edu/~jones/bcd/divide.html
[5] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556
[6] http://www.agner.org/optimize/asmlib.zip
Reported-by: Jakub Zawadzki <darkjames-ws@darkjames.pl>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Austin S Hemmelgarn <ahferroin7@gmail.com>
Cc: linux-kernel@vger.kernel.org
Cc: Jesse Gross <jesse@nicira.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Andy Gospodarek <andy@greyhouse.net>
Cc: Veaceslav Falico <vfalico@redhat.com>
Cc: Jay Vosburgh <fubar@us.ibm.com>
Cc: Jakub Zawadzki <darkjames-ws@darkjames.pl>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-01-21 18:29:41 -07:00
|
|
|
/*
|
|
|
|
* if element_size == 0 we don't get here, so we never touch
|
|
|
|
* the zeroed fa->reciprocal_elems, which would yield invalid
|
|
|
|
* results
|
|
|
|
*/
|
2011-05-26 17:25:02 -06:00
|
|
|
return reciprocal_divide(element_nr, fa->reciprocal_elems);
|
2009-07-29 16:04:18 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flex_array_free_parts - just free the second-level pages
|
2009-09-21 18:04:33 -06:00
|
|
|
* @fa: the flex array from which to free parts
|
2009-07-29 16:04:18 -06:00
|
|
|
*
|
|
|
|
* This is to be used in cases where the base 'struct flex_array'
|
|
|
|
* has been statically allocated and should not be free.
|
|
|
|
*/
|
|
|
|
void flex_array_free_parts(struct flex_array *fa)
|
|
|
|
{
|
|
|
|
int part_nr;
|
|
|
|
|
|
|
|
if (elements_fit_in_base(fa))
|
|
|
|
return;
|
2009-09-21 18:04:33 -06:00
|
|
|
for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
|
2009-07-29 16:04:18 -06:00
|
|
|
kfree(fa->parts[part_nr]);
|
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_free_parts);
|
2009-07-29 16:04:18 -06:00
|
|
|
|
|
|
|
void flex_array_free(struct flex_array *fa)
|
|
|
|
{
|
|
|
|
flex_array_free_parts(fa);
|
|
|
|
kfree(fa);
|
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_free);
|
2009-07-29 16:04:18 -06:00
|
|
|
|
2009-08-26 15:29:22 -06:00
|
|
|
static unsigned int index_inside_part(struct flex_array *fa,
|
2011-05-26 17:25:02 -06:00
|
|
|
unsigned int element_nr,
|
|
|
|
unsigned int part_nr)
|
2009-07-29 16:04:18 -06:00
|
|
|
{
|
2009-08-26 15:29:22 -06:00
|
|
|
unsigned int part_offset;
|
2009-07-29 16:04:18 -06:00
|
|
|
|
2011-05-26 17:25:02 -06:00
|
|
|
part_offset = element_nr - part_nr * fa->elems_per_part;
|
2009-07-29 16:04:18 -06:00
|
|
|
return part_offset * fa->element_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct flex_array_part *
|
|
|
|
__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
|
|
|
|
{
|
|
|
|
struct flex_array_part *part = fa->parts[part_nr];
|
|
|
|
if (!part) {
|
2009-09-21 18:04:31 -06:00
|
|
|
part = kmalloc(sizeof(struct flex_array_part), flags);
|
2009-07-29 16:04:18 -06:00
|
|
|
if (!part)
|
|
|
|
return NULL;
|
2009-09-21 18:04:31 -06:00
|
|
|
if (!(flags & __GFP_ZERO))
|
|
|
|
memset(part, FLEX_ARRAY_FREE,
|
|
|
|
sizeof(struct flex_array_part));
|
2009-07-29 16:04:18 -06:00
|
|
|
fa->parts[part_nr] = part;
|
|
|
|
}
|
|
|
|
return part;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flex_array_put - copy data into the array at @element_nr
|
2009-09-21 18:04:33 -06:00
|
|
|
* @fa: the flex array to copy data into
|
2009-07-29 16:04:18 -06:00
|
|
|
* @element_nr: index of the position in which to insert
|
|
|
|
* the new element.
|
2009-09-21 18:04:33 -06:00
|
|
|
* @src: address of data to copy into the array
|
|
|
|
* @flags: page allocation flags to use for array expansion
|
|
|
|
*
|
2009-07-29 16:04:18 -06:00
|
|
|
*
|
|
|
|
* Note that this *copies* the contents of @src into
|
|
|
|
* the array. If you are trying to store an array of
|
|
|
|
* pointers, make sure to pass in &ptr instead of ptr.
|
2010-08-09 18:20:56 -06:00
|
|
|
* You may instead wish to use the flex_array_put_ptr()
|
|
|
|
* helper function.
|
2009-07-29 16:04:18 -06:00
|
|
|
*
|
|
|
|
* Locking must be provided by the caller.
|
|
|
|
*/
|
2009-08-26 15:29:22 -06:00
|
|
|
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
|
|
|
|
gfp_t flags)
|
2009-07-29 16:04:18 -06:00
|
|
|
{
|
2011-05-26 17:25:02 -06:00
|
|
|
int part_nr = 0;
|
2009-07-29 16:04:18 -06:00
|
|
|
struct flex_array_part *part;
|
|
|
|
void *dst;
|
|
|
|
|
|
|
|
if (element_nr >= fa->total_nr_elements)
|
|
|
|
return -ENOSPC;
|
2011-04-28 13:55:52 -06:00
|
|
|
if (!fa->element_size)
|
|
|
|
return 0;
|
2009-07-29 16:04:18 -06:00
|
|
|
if (elements_fit_in_base(fa))
|
|
|
|
part = (struct flex_array_part *)&fa->parts[0];
|
2009-08-26 15:29:20 -06:00
|
|
|
else {
|
2011-04-28 13:55:52 -06:00
|
|
|
part_nr = fa_element_to_part_nr(fa, element_nr);
|
2009-07-29 16:04:18 -06:00
|
|
|
part = __fa_get_part(fa, part_nr, flags);
|
2009-08-26 15:29:20 -06:00
|
|
|
if (!part)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2011-05-26 17:25:02 -06:00
|
|
|
dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
|
2009-07-29 16:04:18 -06:00
|
|
|
memcpy(dst, src, fa->element_size);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_put);
|
2009-07-29 16:04:18 -06:00
|
|
|
|
2009-09-21 18:04:30 -06:00
|
|
|
/**
|
|
|
|
* flex_array_clear - clear element in array at @element_nr
|
2009-09-21 18:04:33 -06:00
|
|
|
* @fa: the flex array of the element.
|
2009-09-21 18:04:30 -06:00
|
|
|
* @element_nr: index of the position to clear.
|
|
|
|
*
|
|
|
|
* Locking must be provided by the caller.
|
|
|
|
*/
|
|
|
|
int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
|
|
|
|
{
|
2011-05-26 17:25:02 -06:00
|
|
|
int part_nr = 0;
|
2009-09-21 18:04:30 -06:00
|
|
|
struct flex_array_part *part;
|
|
|
|
void *dst;
|
|
|
|
|
|
|
|
if (element_nr >= fa->total_nr_elements)
|
|
|
|
return -ENOSPC;
|
2011-04-28 13:55:52 -06:00
|
|
|
if (!fa->element_size)
|
|
|
|
return 0;
|
2009-09-21 18:04:30 -06:00
|
|
|
if (elements_fit_in_base(fa))
|
|
|
|
part = (struct flex_array_part *)&fa->parts[0];
|
|
|
|
else {
|
2011-04-28 13:55:52 -06:00
|
|
|
part_nr = fa_element_to_part_nr(fa, element_nr);
|
2009-09-21 18:04:30 -06:00
|
|
|
part = fa->parts[part_nr];
|
|
|
|
if (!part)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2011-05-26 17:25:02 -06:00
|
|
|
dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
|
2009-09-21 18:04:31 -06:00
|
|
|
memset(dst, FLEX_ARRAY_FREE, fa->element_size);
|
2009-09-21 18:04:30 -06:00
|
|
|
return 0;
|
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_clear);
|
2009-09-21 18:04:30 -06:00
|
|
|
|
2009-07-29 16:04:18 -06:00
|
|
|
/**
|
|
|
|
* flex_array_prealloc - guarantee that array space exists
|
2011-04-28 13:55:52 -06:00
|
|
|
* @fa: the flex array for which to preallocate parts
|
|
|
|
* @start: index of first array element for which space is allocated
|
|
|
|
* @nr_elements: number of elements for which space is allocated
|
|
|
|
* @flags: page allocation flags
|
2009-07-29 16:04:18 -06:00
|
|
|
*
|
|
|
|
* This will guarantee that no future calls to flex_array_put()
|
|
|
|
* will allocate memory. It can be used if you are expecting to
|
|
|
|
* be holding a lock or in some atomic context while writing
|
|
|
|
* data into the array.
|
|
|
|
*
|
|
|
|
* Locking must be provided by the caller.
|
|
|
|
*/
|
2009-08-26 15:29:22 -06:00
|
|
|
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
|
2011-04-28 13:55:52 -06:00
|
|
|
unsigned int nr_elements, gfp_t flags)
|
2009-07-29 16:04:18 -06:00
|
|
|
{
|
|
|
|
int start_part;
|
|
|
|
int end_part;
|
|
|
|
int part_nr;
|
2011-04-28 13:55:52 -06:00
|
|
|
unsigned int end;
|
2009-07-29 16:04:18 -06:00
|
|
|
struct flex_array_part *part;
|
|
|
|
|
2011-04-28 13:55:52 -06:00
|
|
|
if (!start && !nr_elements)
|
|
|
|
return 0;
|
|
|
|
if (start >= fa->total_nr_elements)
|
|
|
|
return -ENOSPC;
|
|
|
|
if (!nr_elements)
|
|
|
|
return 0;
|
|
|
|
|
2011-04-28 13:55:52 -06:00
|
|
|
end = start + nr_elements - 1;
|
|
|
|
|
2011-04-28 13:55:52 -06:00
|
|
|
if (end >= fa->total_nr_elements)
|
2009-07-29 16:04:18 -06:00
|
|
|
return -ENOSPC;
|
2011-04-28 13:55:52 -06:00
|
|
|
if (!fa->element_size)
|
|
|
|
return 0;
|
2009-07-29 16:04:18 -06:00
|
|
|
if (elements_fit_in_base(fa))
|
|
|
|
return 0;
|
|
|
|
start_part = fa_element_to_part_nr(fa, start);
|
|
|
|
end_part = fa_element_to_part_nr(fa, end);
|
|
|
|
for (part_nr = start_part; part_nr <= end_part; part_nr++) {
|
|
|
|
part = __fa_get_part(fa, part_nr, flags);
|
|
|
|
if (!part)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_prealloc);
|
2009-07-29 16:04:18 -06:00
|
|
|
|
|
|
|
/**
|
|
|
|
* flex_array_get - pull data back out of the array
|
2009-09-21 18:04:33 -06:00
|
|
|
* @fa: the flex array from which to extract data
|
2009-07-29 16:04:18 -06:00
|
|
|
* @element_nr: index of the element to fetch from the array
|
|
|
|
*
|
|
|
|
* Returns a pointer to the data at index @element_nr. Note
|
|
|
|
* that this is a copy of the data that was passed in. If you
|
2010-08-09 18:20:56 -06:00
|
|
|
* are using this to store pointers, you'll get back &ptr. You
|
|
|
|
* may instead wish to use the flex_array_get_ptr helper.
|
2009-07-29 16:04:18 -06:00
|
|
|
*
|
|
|
|
* Locking must be provided by the caller.
|
|
|
|
*/
|
2009-08-26 15:29:22 -06:00
|
|
|
void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
|
2009-07-29 16:04:18 -06:00
|
|
|
{
|
2011-05-26 17:25:02 -06:00
|
|
|
int part_nr = 0;
|
2009-07-29 16:04:18 -06:00
|
|
|
struct flex_array_part *part;
|
|
|
|
|
2011-04-28 13:55:52 -06:00
|
|
|
if (!fa->element_size)
|
|
|
|
return NULL;
|
2009-07-29 16:04:18 -06:00
|
|
|
if (element_nr >= fa->total_nr_elements)
|
|
|
|
return NULL;
|
|
|
|
if (elements_fit_in_base(fa))
|
|
|
|
part = (struct flex_array_part *)&fa->parts[0];
|
2009-08-26 15:29:20 -06:00
|
|
|
else {
|
2011-04-28 13:55:52 -06:00
|
|
|
part_nr = fa_element_to_part_nr(fa, element_nr);
|
2009-07-29 16:04:18 -06:00
|
|
|
part = fa->parts[part_nr];
|
2009-08-26 15:29:20 -06:00
|
|
|
if (!part)
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-05-26 17:25:02 -06:00
|
|
|
return &part->elements[index_inside_part(fa, element_nr, part_nr)];
|
2009-07-29 16:04:18 -06:00
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_get);
|
2009-09-21 18:04:31 -06:00
|
|
|
|
2010-08-09 18:20:56 -06:00
|
|
|
/**
|
|
|
|
* flex_array_get_ptr - pull a ptr back out of the array
|
|
|
|
* @fa: the flex array from which to extract data
|
|
|
|
* @element_nr: index of the element to fetch from the array
|
|
|
|
*
|
|
|
|
* Returns the pointer placed in the flex array at element_nr using
|
|
|
|
* flex_array_put_ptr(). This function should not be called if the
|
|
|
|
* element in question was not set using the _put_ptr() helper.
|
|
|
|
*/
|
|
|
|
void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
|
|
|
|
{
|
|
|
|
void **tmp;
|
|
|
|
|
|
|
|
tmp = flex_array_get(fa, element_nr);
|
|
|
|
if (!tmp)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return *tmp;
|
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_get_ptr);
|
2010-08-09 18:20:56 -06:00
|
|
|
|
2009-09-21 18:04:31 -06:00
|
|
|
static int part_is_free(struct flex_array_part *part)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(struct flex_array_part); i++)
|
|
|
|
if (part->elements[i] != FLEX_ARRAY_FREE)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flex_array_shrink - free unused second-level pages
|
2009-09-21 18:04:33 -06:00
|
|
|
* @fa: the flex array to shrink
|
2009-09-21 18:04:31 -06:00
|
|
|
*
|
|
|
|
* Frees all second-level pages that consist solely of unused
|
|
|
|
* elements. Returns the number of pages freed.
|
|
|
|
*
|
|
|
|
* Locking must be provided by the caller.
|
|
|
|
*/
|
|
|
|
int flex_array_shrink(struct flex_array *fa)
|
|
|
|
{
|
|
|
|
struct flex_array_part *part;
|
|
|
|
int part_nr;
|
|
|
|
int ret = 0;
|
|
|
|
|
2011-04-28 13:55:52 -06:00
|
|
|
if (!fa->total_nr_elements || !fa->element_size)
|
2011-04-28 13:55:52 -06:00
|
|
|
return 0;
|
2009-09-21 18:04:31 -06:00
|
|
|
if (elements_fit_in_base(fa))
|
|
|
|
return ret;
|
2009-09-21 18:04:33 -06:00
|
|
|
for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
|
2009-09-21 18:04:31 -06:00
|
|
|
part = fa->parts[part_nr];
|
|
|
|
if (!part)
|
|
|
|
continue;
|
|
|
|
if (part_is_free(part)) {
|
|
|
|
fa->parts[part_nr] = NULL;
|
|
|
|
kfree(part);
|
|
|
|
ret++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2011-01-12 17:59:55 -07:00
|
|
|
EXPORT_SYMBOL(flex_array_shrink);
|