Merge branch 'akpm' (fixes from Andrew)
Merge misc fixes from Andrew Morton: - A bunch of fixes - Finish off the idr API conversions before someone starts to use the old interfaces again. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: idr: idr_alloc() shouldn't trigger lowmem warning when preloaded UAPI: fix endianness conditionals in M32R's asm/stat.h UAPI: fix endianness conditionals in linux/raid/md_p.h UAPI: fix endianness conditionals in linux/acct.h UAPI: fix endianness conditionals in linux/aio_abi.h decompressors: fix typo "POWERPC" mm/fremap.c: fix oops on error path idr: deprecate idr_pre_get() and idr_get_new[_above]() tidspbridge: convert to idr_alloc() zcache: convert to idr_alloc() mlx4: remove leftover idr_pre_get() call workqueue: convert to idr_alloc() nfsd: convert to idr_alloc() nfsd: remove unused get_new_stid() kernel/signal.c: use __ARCH_HAS_SA_RESTORER instead of SA_RESTORER signal: always clear sa_restorer on execve mm: remove_memory(): fix end_pfn setting include/linux/res_counter.h needs errno.h
This commit is contained in:
commit
842d223f28
16 changed files with 144 additions and 174 deletions
|
@ -63,10 +63,10 @@ struct stat64 {
|
|||
long long st_size;
|
||||
unsigned long st_blksize;
|
||||
|
||||
#if defined(__BIG_ENDIAN)
|
||||
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
|
||||
unsigned long __pad4; /* future possible st_blocks high bits */
|
||||
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
|
||||
#elif defined(__LITTLE_ENDIAN)
|
||||
#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
|
||||
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
|
||||
unsigned long __pad4; /* future possible st_blocks high bits */
|
||||
#else
|
||||
|
|
|
@ -362,7 +362,6 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
|
|||
INIT_LIST_HEAD(&dev->sriov.cm_list);
|
||||
dev->sriov.sl_id_map = RB_ROOT;
|
||||
idr_init(&dev->sriov.pv_id_table);
|
||||
idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/* slave = -1 ==> all slaves */
|
||||
|
|
|
@ -76,37 +76,28 @@ int drv_insert_node_res_element(void *hnode, void *node_resource,
|
|||
struct node_res_object **node_res_obj =
|
||||
(struct node_res_object **)node_resource;
|
||||
struct process_context *ctxt = (struct process_context *)process_ctxt;
|
||||
int status = 0;
|
||||
int retval;
|
||||
|
||||
*node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
|
||||
if (!*node_res_obj) {
|
||||
status = -ENOMEM;
|
||||
goto func_end;
|
||||
}
|
||||
if (!*node_res_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
(*node_res_obj)->node = hnode;
|
||||
retval = idr_get_new(ctxt->node_id, *node_res_obj,
|
||||
&(*node_res_obj)->id);
|
||||
if (retval == -EAGAIN) {
|
||||
if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
|
||||
pr_err("%s: OUT OF MEMORY\n", __func__);
|
||||
status = -ENOMEM;
|
||||
goto func_end;
|
||||
}
|
||||
|
||||
retval = idr_get_new(ctxt->node_id, *node_res_obj,
|
||||
&(*node_res_obj)->id);
|
||||
retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL);
|
||||
if (retval >= 0) {
|
||||
(*node_res_obj)->id = retval;
|
||||
return 0;
|
||||
}
|
||||
if (retval) {
|
||||
|
||||
kfree(*node_res_obj);
|
||||
|
||||
if (retval == -ENOSPC) {
|
||||
pr_err("%s: FAILED, IDR is FULL\n", __func__);
|
||||
status = -EFAULT;
|
||||
return -EFAULT;
|
||||
} else {
|
||||
pr_err("%s: OUT OF MEMORY\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
func_end:
|
||||
if (status)
|
||||
kfree(*node_res_obj);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Release all Node resources and its context
|
||||
|
@ -201,35 +192,26 @@ int drv_proc_insert_strm_res_element(void *stream_obj,
|
|||
struct strm_res_object **pstrm_res =
|
||||
(struct strm_res_object **)strm_res;
|
||||
struct process_context *ctxt = (struct process_context *)process_ctxt;
|
||||
int status = 0;
|
||||
int retval;
|
||||
|
||||
*pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
|
||||
if (*pstrm_res == NULL) {
|
||||
status = -EFAULT;
|
||||
goto func_end;
|
||||
}
|
||||
if (*pstrm_res == NULL)
|
||||
return -EFAULT;
|
||||
|
||||
(*pstrm_res)->stream = stream_obj;
|
||||
retval = idr_get_new(ctxt->stream_id, *pstrm_res,
|
||||
&(*pstrm_res)->id);
|
||||
if (retval == -EAGAIN) {
|
||||
if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
|
||||
pr_err("%s: OUT OF MEMORY\n", __func__);
|
||||
status = -ENOMEM;
|
||||
goto func_end;
|
||||
}
|
||||
|
||||
retval = idr_get_new(ctxt->stream_id, *pstrm_res,
|
||||
&(*pstrm_res)->id);
|
||||
retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL);
|
||||
if (retval >= 0) {
|
||||
(*pstrm_res)->id = retval;
|
||||
return 0;
|
||||
}
|
||||
if (retval) {
|
||||
|
||||
if (retval == -ENOSPC) {
|
||||
pr_err("%s: FAILED, IDR is FULL\n", __func__);
|
||||
status = -EPERM;
|
||||
return -EPERM;
|
||||
} else {
|
||||
pr_err("%s: OUT OF MEMORY\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
func_end:
|
||||
return status;
|
||||
}
|
||||
|
||||
static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
|
||||
|
|
|
@ -300,27 +300,22 @@ static u8 r2net_num_from_nn(struct r2net_node *nn)
|
|||
|
||||
static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
spin_lock(&nn->nn_lock);
|
||||
ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
|
||||
if (ret == 0)
|
||||
list_add_tail(&nsw->ns_node_item,
|
||||
&nn->nn_status_list);
|
||||
spin_unlock(&nn->nn_lock);
|
||||
} while (ret == -EAGAIN);
|
||||
spin_lock(&nn->nn_lock);
|
||||
ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
|
||||
if (ret >= 0) {
|
||||
nsw->ns_id = ret;
|
||||
list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
|
||||
}
|
||||
spin_unlock(&nn->nn_lock);
|
||||
|
||||
if (ret == 0) {
|
||||
if (ret >= 0) {
|
||||
init_waitqueue_head(&nsw->ns_wq);
|
||||
nsw->ns_sys_status = R2NET_ERR_NONE;
|
||||
nsw->ns_status = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -230,37 +230,6 @@ static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
|
|||
__nfs4_file_put_access(fp, oflag);
|
||||
}
|
||||
|
||||
static inline int get_new_stid(struct nfs4_stid *stid)
|
||||
{
|
||||
static int min_stateid = 0;
|
||||
struct idr *stateids = &stid->sc_client->cl_stateids;
|
||||
int new_stid;
|
||||
int error;
|
||||
|
||||
error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
|
||||
/*
|
||||
* Note: the necessary preallocation was done in
|
||||
* nfs4_alloc_stateid(). The idr code caps the number of
|
||||
* preallocations that can exist at a time, but the state lock
|
||||
* prevents anyone from using ours before we get here:
|
||||
*/
|
||||
WARN_ON_ONCE(error);
|
||||
/*
|
||||
* It shouldn't be a problem to reuse an opaque stateid value.
|
||||
* I don't think it is for 4.1. But with 4.0 I worry that, for
|
||||
* example, a stray write retransmission could be accepted by
|
||||
* the server when it should have been rejected. Therefore,
|
||||
* adopt a trick from the sctp code to attempt to maximize the
|
||||
* amount of time until an id is reused, by ensuring they always
|
||||
* "increase" (mod INT_MAX):
|
||||
*/
|
||||
|
||||
min_stateid = new_stid+1;
|
||||
if (min_stateid == INT_MAX)
|
||||
min_stateid = 0;
|
||||
return new_stid;
|
||||
}
|
||||
|
||||
static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
|
||||
kmem_cache *slab)
|
||||
{
|
||||
|
@ -273,9 +242,8 @@ kmem_cache *slab)
|
|||
if (!stid)
|
||||
return NULL;
|
||||
|
||||
if (!idr_pre_get(stateids, GFP_KERNEL))
|
||||
goto out_free;
|
||||
if (idr_get_new_above(stateids, stid, min_stateid, &new_id))
|
||||
new_id = idr_alloc(stateids, stid, min_stateid, 0, GFP_KERNEL);
|
||||
if (new_id < 0)
|
||||
goto out_free;
|
||||
stid->sc_client = cl;
|
||||
stid->sc_type = 0;
|
||||
|
|
|
@ -73,8 +73,6 @@ struct idr {
|
|||
*/
|
||||
|
||||
void *idr_find_slowpath(struct idr *idp, int id);
|
||||
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
|
||||
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
|
||||
void idr_preload(gfp_t gfp_mask);
|
||||
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
|
||||
int idr_for_each(struct idr *idp,
|
||||
|
@ -119,19 +117,6 @@ static inline void *idr_find(struct idr *idr, int id)
|
|||
return idr_find_slowpath(idr, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_get_new - allocate new idr entry
|
||||
* @idp: idr handle
|
||||
* @ptr: pointer you want associated with the id
|
||||
* @id: pointer to the allocated handle
|
||||
*
|
||||
* Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
|
||||
*/
|
||||
static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
|
||||
{
|
||||
return idr_get_new_above(idp, ptr, 0, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_for_each_entry - iterate over an idr's elements of a given type
|
||||
* @idp: idr handle
|
||||
|
@ -143,7 +128,56 @@ static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
|
|||
entry != NULL; \
|
||||
++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
|
||||
|
||||
void __idr_remove_all(struct idr *idp); /* don't use */
|
||||
/*
|
||||
* Don't use the following functions. These exist only to suppress
|
||||
* deprecated warnings on EXPORT_SYMBOL()s.
|
||||
*/
|
||||
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask);
|
||||
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
|
||||
void __idr_remove_all(struct idr *idp);
|
||||
|
||||
/**
|
||||
* idr_pre_get - reserve resources for idr allocation
|
||||
* @idp: idr handle
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* Part of old alloc interface. This is going away. Use
|
||||
* idr_preload[_end]() and idr_alloc() instead.
|
||||
*/
|
||||
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask)
|
||||
{
|
||||
return __idr_pre_get(idp, gfp_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_get_new_above - allocate new idr entry above or equal to a start id
|
||||
* @idp: idr handle
|
||||
* @ptr: pointer you want associated with the id
|
||||
* @starting_id: id to start search at
|
||||
* @id: pointer to the allocated handle
|
||||
*
|
||||
* Part of old alloc interface. This is going away. Use
|
||||
* idr_preload[_end]() and idr_alloc() instead.
|
||||
*/
|
||||
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr,
|
||||
int starting_id, int *id)
|
||||
{
|
||||
return __idr_get_new_above(idp, ptr, starting_id, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_get_new - allocate new idr entry
|
||||
* @idp: idr handle
|
||||
* @ptr: pointer you want associated with the id
|
||||
* @id: pointer to the allocated handle
|
||||
*
|
||||
* Part of old alloc interface. This is going away. Use
|
||||
* idr_preload[_end]() and idr_alloc() instead.
|
||||
*/
|
||||
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id)
|
||||
{
|
||||
return __idr_get_new_above(idp, ptr, 0, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_remove_all - remove all ids from the given idr tree
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
/*
|
||||
* The core object. the cgroup that wishes to account for some
|
||||
|
|
|
@ -107,10 +107,12 @@ struct acct_v3
|
|||
#define ACORE 0x08 /* ... dumped core */
|
||||
#define AXSIG 0x10 /* ... was killed by a signal */
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
|
||||
#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */
|
||||
#else
|
||||
#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
|
||||
#define ACCT_BYTEORDER 0x00 /* accounting file is little endian */
|
||||
#else
|
||||
#error unspecified endianness
|
||||
#endif
|
||||
|
||||
#ifndef __KERNEL__
|
||||
|
|
|
@ -62,9 +62,9 @@ struct io_event {
|
|||
__s64 res2; /* secondary result */
|
||||
};
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
|
||||
#define PADDED(x,y) x, y
|
||||
#elif defined(__BIG_ENDIAN)
|
||||
#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
|
||||
#define PADDED(x,y) y, x
|
||||
#else
|
||||
#error edit for your odd byteorder.
|
||||
|
|
|
@ -145,16 +145,18 @@ typedef struct mdp_superblock_s {
|
|||
__u32 failed_disks; /* 4 Number of failed disks */
|
||||
__u32 spare_disks; /* 5 Number of spare disks */
|
||||
__u32 sb_csum; /* 6 checksum of the whole superblock */
|
||||
#ifdef __BIG_ENDIAN
|
||||
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
|
||||
__u32 events_hi; /* 7 high-order of superblock update count */
|
||||
__u32 events_lo; /* 8 low-order of superblock update count */
|
||||
__u32 cp_events_hi; /* 9 high-order of checkpoint update count */
|
||||
__u32 cp_events_lo; /* 10 low-order of checkpoint update count */
|
||||
#else
|
||||
#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
|
||||
__u32 events_lo; /* 7 low-order of superblock update count */
|
||||
__u32 events_hi; /* 8 high-order of superblock update count */
|
||||
__u32 cp_events_lo; /* 9 low-order of checkpoint update count */
|
||||
__u32 cp_events_hi; /* 10 high-order of checkpoint update count */
|
||||
#else
|
||||
#error unspecified endianness
|
||||
#endif
|
||||
__u32 recovery_cp; /* 11 recovery checkpoint sector count */
|
||||
/* There are only valid for minor_version > 90 */
|
||||
|
|
|
@ -485,6 +485,9 @@ flush_signal_handlers(struct task_struct *t, int force_default)
|
|||
if (force_default || ka->sa.sa_handler != SIG_IGN)
|
||||
ka->sa.sa_handler = SIG_DFL;
|
||||
ka->sa.sa_flags = 0;
|
||||
#ifdef __ARCH_HAS_SA_RESTORER
|
||||
ka->sa.sa_restorer = NULL;
|
||||
#endif
|
||||
sigemptyset(&ka->sa.sa_mask);
|
||||
ka++;
|
||||
}
|
||||
|
|
|
@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool)
|
|||
int ret;
|
||||
|
||||
mutex_lock(&worker_pool_idr_mutex);
|
||||
idr_pre_get(&worker_pool_idr, GFP_KERNEL);
|
||||
ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
|
||||
ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
|
||||
if (ret >= 0)
|
||||
pool->id = ret;
|
||||
mutex_unlock(&worker_pool_idr_mutex);
|
||||
|
||||
return ret;
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
79
lib/idr.c
79
lib/idr.c
|
@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
|
|||
if (layer_idr)
|
||||
return get_from_free_list(layer_idr);
|
||||
|
||||
/* try to allocate directly from kmem_cache */
|
||||
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
|
||||
/*
|
||||
* Try to allocate directly from kmem_cache. We want to try this
|
||||
* before preload buffer; otherwise, non-preloading idr_alloc()
|
||||
* users will end up taking advantage of preloading ones. As the
|
||||
* following is allowed to fail for preloaded cases, suppress
|
||||
* warning this time.
|
||||
*/
|
||||
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
|
||||
if (new)
|
||||
return new;
|
||||
|
||||
|
@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
|
|||
* Try to fetch one from the per-cpu preload buffer if in process
|
||||
* context. See idr_preload() for details.
|
||||
*/
|
||||
if (in_interrupt())
|
||||
return NULL;
|
||||
|
||||
preempt_disable();
|
||||
new = __this_cpu_read(idr_preload_head);
|
||||
if (new) {
|
||||
__this_cpu_write(idr_preload_head, new->ary[0]);
|
||||
__this_cpu_dec(idr_preload_cnt);
|
||||
new->ary[0] = NULL;
|
||||
if (!in_interrupt()) {
|
||||
preempt_disable();
|
||||
new = __this_cpu_read(idr_preload_head);
|
||||
if (new) {
|
||||
__this_cpu_write(idr_preload_head, new->ary[0]);
|
||||
__this_cpu_dec(idr_preload_cnt);
|
||||
new->ary[0] = NULL;
|
||||
}
|
||||
preempt_enable();
|
||||
if (new)
|
||||
return new;
|
||||
}
|
||||
preempt_enable();
|
||||
return new;
|
||||
|
||||
/*
|
||||
* Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
|
||||
* that memory allocation failure warning is printed as intended.
|
||||
*/
|
||||
return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
|
||||
}
|
||||
|
||||
static void idr_layer_rcu_free(struct rcu_head *head)
|
||||
|
@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_pre_get - reserve resources for idr allocation
|
||||
* @idp: idr handle
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* This function should be called prior to calling the idr_get_new* functions.
|
||||
* It preallocates enough memory to satisfy the worst possible allocation. The
|
||||
* caller should pass in GFP_KERNEL if possible. This of course requires that
|
||||
* no spinning locks be held.
|
||||
*
|
||||
* If the system is REALLY out of memory this function returns %0,
|
||||
* otherwise %1.
|
||||
*/
|
||||
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
|
||||
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
|
||||
{
|
||||
while (idp->id_free_cnt < MAX_IDR_FREE) {
|
||||
struct idr_layer *new;
|
||||
|
@ -208,7 +207,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
|
|||
}
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(idr_pre_get);
|
||||
EXPORT_SYMBOL(__idr_pre_get);
|
||||
|
||||
/**
|
||||
* sub_alloc - try to allocate an id without growing the tree depth
|
||||
|
@ -375,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
|
|||
idr_mark_full(pa, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_get_new_above - allocate new idr entry above or equal to a start id
|
||||
* @idp: idr handle
|
||||
* @ptr: pointer you want associated with the id
|
||||
* @starting_id: id to start search at
|
||||
* @id: pointer to the allocated handle
|
||||
*
|
||||
* This is the allocate id function. It should be called with any
|
||||
* required locks.
|
||||
*
|
||||
* If allocation from IDR's private freelist fails, idr_get_new_above() will
|
||||
* return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
|
||||
* IDR's preallocation and then retry the idr_get_new_above() call.
|
||||
*
|
||||
* If the idr is full idr_get_new_above() will return %-ENOSPC.
|
||||
*
|
||||
* @id returns a value in the range @starting_id ... %0x7fffffff
|
||||
*/
|
||||
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
|
||||
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
|
||||
{
|
||||
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
|
||||
int rv;
|
||||
|
@ -406,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
|
|||
*id = rv;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(idr_get_new_above);
|
||||
EXPORT_SYMBOL(__idr_get_new_above);
|
||||
|
||||
/**
|
||||
* idr_preload - preload for idr_alloc()
|
||||
|
@ -907,7 +888,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
|
|||
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
|
||||
{
|
||||
/* allocate idr_layers */
|
||||
if (!idr_pre_get(&ida->idr, gfp_mask))
|
||||
if (!__idr_pre_get(&ida->idr, gfp_mask))
|
||||
return 0;
|
||||
|
||||
/* allocate free_bitmap */
|
||||
|
|
|
@ -15,7 +15,7 @@ config XZ_DEC_X86
|
|||
|
||||
config XZ_DEC_POWERPC
|
||||
bool "PowerPC BCJ filter decoder"
|
||||
default y if POWERPC
|
||||
default y if PPC
|
||||
select XZ_DEC_BCJ
|
||||
|
||||
config XZ_DEC_IA64
|
||||
|
|
|
@ -163,7 +163,8 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|||
* and that the remapped range is valid and fully within
|
||||
* the single existing vma.
|
||||
*/
|
||||
if (!vma || !(vma->vm_flags & VM_SHARED))
|
||||
vm_flags = vma->vm_flags;
|
||||
if (!vma || !(vm_flags & VM_SHARED))
|
||||
goto out;
|
||||
|
||||
if (!vma->vm_ops || !vma->vm_ops->remap_pages)
|
||||
|
@ -254,7 +255,8 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|||
*/
|
||||
|
||||
out:
|
||||
vm_flags = vma->vm_flags;
|
||||
if (vma)
|
||||
vm_flags = vma->vm_flags;
|
||||
if (likely(!has_write_lock))
|
||||
up_read(&mm->mmap_sem);
|
||||
else
|
||||
|
|
|
@ -1801,7 +1801,7 @@ int __ref remove_memory(int nid, u64 start, u64 size)
|
|||
int retry = 1;
|
||||
|
||||
start_pfn = PFN_DOWN(start);
|
||||
end_pfn = start_pfn + PFN_DOWN(size);
|
||||
end_pfn = PFN_UP(start + size - 1);
|
||||
|
||||
/*
|
||||
* When CONFIG_MEMCG is on, one memory block may be used by other
|
||||
|
|
Loading…
Reference in a new issue