ceph: convert encode/decode macros to inlines
This avoids the fugly pass by reference and makes the code a bit easier to read. Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
parent
535bbb5307
commit
c89136ea42
6 changed files with 137 additions and 132 deletions
129
fs/ceph/decode.h
129
fs/ceph/decode.h
|
@ -3,12 +3,44 @@
|
|||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "types.h"
|
||||
|
||||
/*
|
||||
* in all cases,
|
||||
* void **p pointer to position pointer
|
||||
* void *end pointer to end of buffer (last byte + 1)
|
||||
*/
|
||||
|
||||
static inline u64 ceph_decode_64(void **p)
|
||||
{
|
||||
u64 v = get_unaligned_le64(*p);
|
||||
*p += sizeof(u64);
|
||||
return v;
|
||||
}
|
||||
static inline u32 ceph_decode_32(void **p)
|
||||
{
|
||||
u32 v = get_unaligned_le32(*p);
|
||||
*p += sizeof(u32);
|
||||
return v;
|
||||
}
|
||||
static inline u16 ceph_decode_16(void **p)
|
||||
{
|
||||
u16 v = get_unaligned_le16(*p);
|
||||
*p += sizeof(u16);
|
||||
return v;
|
||||
}
|
||||
static inline u8 ceph_decode_8(void **p)
|
||||
{
|
||||
u8 v = *(u8 *)*p;
|
||||
(*p)++;
|
||||
return v;
|
||||
}
|
||||
static inline void ceph_decode_copy(void **p, void *pv, size_t n)
|
||||
{
|
||||
memcpy(pv, *p, n);
|
||||
*p += n;
|
||||
}
|
||||
|
||||
/*
|
||||
* bounds check input.
|
||||
*/
|
||||
|
@ -18,48 +50,20 @@
|
|||
goto bad; \
|
||||
} while (0)
|
||||
|
||||
#define ceph_decode_64(p, v) \
|
||||
do { \
|
||||
v = get_unaligned_le64(*(p)); \
|
||||
*(p) += sizeof(u64); \
|
||||
} while (0)
|
||||
#define ceph_decode_32(p, v) \
|
||||
do { \
|
||||
v = get_unaligned_le32(*(p)); \
|
||||
*(p) += sizeof(u32); \
|
||||
} while (0)
|
||||
#define ceph_decode_16(p, v) \
|
||||
do { \
|
||||
v = get_unaligned_le16(*(p)); \
|
||||
*(p) += sizeof(u16); \
|
||||
} while (0)
|
||||
#define ceph_decode_8(p, v) \
|
||||
do { \
|
||||
v = *(u8 *)*(p); \
|
||||
(*p)++; \
|
||||
} while (0)
|
||||
|
||||
#define ceph_decode_copy(p, pv, n) \
|
||||
do { \
|
||||
memcpy(pv, *(p), n); \
|
||||
*(p) += n; \
|
||||
} while (0)
|
||||
|
||||
/* bounds check too */
|
||||
#define ceph_decode_64_safe(p, end, v, bad) \
|
||||
do { \
|
||||
ceph_decode_need(p, end, sizeof(u64), bad); \
|
||||
ceph_decode_64(p, v); \
|
||||
v = ceph_decode_64(p); \
|
||||
} while (0)
|
||||
#define ceph_decode_32_safe(p, end, v, bad) \
|
||||
do { \
|
||||
ceph_decode_need(p, end, sizeof(u32), bad); \
|
||||
ceph_decode_32(p, v); \
|
||||
v = ceph_decode_32(p); \
|
||||
} while (0)
|
||||
#define ceph_decode_16_safe(p, end, v, bad) \
|
||||
do { \
|
||||
ceph_decode_need(p, end, sizeof(u16), bad); \
|
||||
ceph_decode_16(p, v); \
|
||||
v = ceph_decode_16(p); \
|
||||
} while (0)
|
||||
|
||||
#define ceph_decode_copy_safe(p, end, pv, n, bad) \
|
||||
|
@ -71,41 +75,42 @@
|
|||
/*
|
||||
* struct ceph_timespec <-> struct timespec
|
||||
*/
|
||||
#define ceph_decode_timespec(ts, tv) \
|
||||
do { \
|
||||
(ts)->tv_sec = le32_to_cpu((tv)->tv_sec); \
|
||||
(ts)->tv_nsec = le32_to_cpu((tv)->tv_nsec); \
|
||||
} while (0)
|
||||
#define ceph_encode_timespec(tv, ts) \
|
||||
do { \
|
||||
(tv)->tv_sec = cpu_to_le32((ts)->tv_sec); \
|
||||
(tv)->tv_nsec = cpu_to_le32((ts)->tv_nsec); \
|
||||
} while (0)
|
||||
|
||||
static inline void ceph_decode_timespec(struct timespec *ts,
|
||||
struct ceph_timespec *tv)
|
||||
{
|
||||
ts->tv_sec = le32_to_cpu(tv->tv_sec);
|
||||
ts->tv_nsec = le32_to_cpu(tv->tv_nsec);
|
||||
}
|
||||
static inline void ceph_encode_timespec(struct ceph_timespec *tv,
|
||||
struct timespec *ts)
|
||||
{
|
||||
tv->tv_sec = cpu_to_le32(ts->tv_sec);
|
||||
tv->tv_nsec = cpu_to_le32(ts->tv_nsec);
|
||||
}
|
||||
|
||||
/*
|
||||
* encoders
|
||||
*/
|
||||
#define ceph_encode_64(p, v) \
|
||||
do { \
|
||||
put_unaligned_le64(v, (__le64 *)*(p)); \
|
||||
*(p) += sizeof(u64); \
|
||||
} while (0)
|
||||
#define ceph_encode_32(p, v) \
|
||||
do { \
|
||||
put_unaligned_le32(v, (__le32 *)*(p)); \
|
||||
*(p) += sizeof(u32); \
|
||||
} while (0)
|
||||
#define ceph_encode_16(p, v) \
|
||||
do { \
|
||||
put_unaligned_le16(v), (__le16 *)*(p)); \
|
||||
*(p) += sizeof(u16); \
|
||||
} while (0)
|
||||
#define ceph_encode_8(p, v) \
|
||||
do { \
|
||||
*(u8 *)*(p) = v; \
|
||||
(*(p))++; \
|
||||
} while (0)
|
||||
static inline void ceph_encode_64(void **p, u64 v)
|
||||
{
|
||||
put_unaligned_le64(v, (__le64 *)*p);
|
||||
*p += sizeof(u64);
|
||||
}
|
||||
static inline void ceph_encode_32(void **p, u32 v)
|
||||
{
|
||||
put_unaligned_le32(v, (__le32 *)*p);
|
||||
*p += sizeof(u32);
|
||||
}
|
||||
static inline void ceph_encode_16(void **p, u16 v)
|
||||
{
|
||||
put_unaligned_le16(v, (__le16 *)*p);
|
||||
*p += sizeof(u16);
|
||||
}
|
||||
static inline void ceph_encode_8(void **p, u8 v)
|
||||
{
|
||||
*(u8 *)*p = v;
|
||||
(*p)++;
|
||||
}
|
||||
|
||||
/*
|
||||
* filepath, string encoders
|
||||
|
|
|
@ -136,9 +136,9 @@ static int parse_reply_info_dir(void **p, void *end,
|
|||
goto bad;
|
||||
|
||||
ceph_decode_need(p, end, sizeof(num) + 2, bad);
|
||||
ceph_decode_32(p, num);
|
||||
ceph_decode_8(p, info->dir_end);
|
||||
ceph_decode_8(p, info->dir_complete);
|
||||
num = ceph_decode_32(p);
|
||||
info->dir_end = ceph_decode_8(p);
|
||||
info->dir_complete = ceph_decode_8(p);
|
||||
if (num == 0)
|
||||
goto done;
|
||||
|
||||
|
@ -160,7 +160,7 @@ static int parse_reply_info_dir(void **p, void *end,
|
|||
while (num) {
|
||||
/* dentry */
|
||||
ceph_decode_need(p, end, sizeof(u32)*2, bad);
|
||||
ceph_decode_32(p, info->dir_dname_len[i]);
|
||||
info->dir_dname_len[i] = ceph_decode_32(p);
|
||||
ceph_decode_need(p, end, info->dir_dname_len[i], bad);
|
||||
info->dir_dname[i] = *p;
|
||||
*p += info->dir_dname_len[i];
|
||||
|
@ -1791,10 +1791,10 @@ static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
|
|||
from_mds = le64_to_cpu(msg->hdr.src.name.num);
|
||||
|
||||
ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
|
||||
ceph_decode_64(&p, tid);
|
||||
ceph_decode_32(&p, next_mds);
|
||||
ceph_decode_32(&p, fwd_seq);
|
||||
ceph_decode_8(&p, must_resend);
|
||||
tid = ceph_decode_64(&p);
|
||||
next_mds = ceph_decode_32(&p);
|
||||
fwd_seq = ceph_decode_32(&p);
|
||||
must_resend = ceph_decode_8(&p);
|
||||
|
||||
WARN_ON(must_resend); /* shouldn't happen. */
|
||||
|
||||
|
@ -2783,8 +2783,8 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
|
|||
pr_err("got mdsmap with wrong fsid\n");
|
||||
return;
|
||||
}
|
||||
ceph_decode_32(&p, epoch);
|
||||
ceph_decode_32(&p, maplen);
|
||||
epoch = ceph_decode_32(&p);
|
||||
maplen = ceph_decode_32(&p);
|
||||
dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
|
||||
|
||||
/* do we need it? */
|
||||
|
|
|
@ -60,21 +60,21 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
|
|||
ceph_decode_16_safe(p, end, version, bad);
|
||||
|
||||
ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
|
||||
ceph_decode_32(p, m->m_epoch);
|
||||
ceph_decode_32(p, m->m_client_epoch);
|
||||
ceph_decode_32(p, m->m_last_failure);
|
||||
ceph_decode_32(p, m->m_root);
|
||||
ceph_decode_32(p, m->m_session_timeout);
|
||||
ceph_decode_32(p, m->m_session_autoclose);
|
||||
ceph_decode_64(p, m->m_max_file_size);
|
||||
ceph_decode_32(p, m->m_max_mds);
|
||||
m->m_epoch = ceph_decode_32(p);
|
||||
m->m_client_epoch = ceph_decode_32(p);
|
||||
m->m_last_failure = ceph_decode_32(p);
|
||||
m->m_root = ceph_decode_32(p);
|
||||
m->m_session_timeout = ceph_decode_32(p);
|
||||
m->m_session_autoclose = ceph_decode_32(p);
|
||||
m->m_max_file_size = ceph_decode_64(p);
|
||||
m->m_max_mds = ceph_decode_32(p);
|
||||
|
||||
m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS);
|
||||
if (m->m_info == NULL)
|
||||
goto badmem;
|
||||
|
||||
/* pick out active nodes from mds_info (state > 0) */
|
||||
ceph_decode_32(p, n);
|
||||
n = ceph_decode_32(p);
|
||||
for (i = 0; i < n; i++) {
|
||||
u32 namelen;
|
||||
s32 mds, inc, state;
|
||||
|
@ -86,18 +86,18 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
|
|||
|
||||
ceph_decode_need(p, end, sizeof(addr) + 1 + sizeof(u32), bad);
|
||||
ceph_decode_copy(p, &addr, sizeof(addr));
|
||||
ceph_decode_8(p, infoversion);
|
||||
ceph_decode_32(p, namelen); /* skip mds name */
|
||||
infoversion = ceph_decode_8(p);
|
||||
namelen = ceph_decode_32(p); /* skip mds name */
|
||||
*p += namelen;
|
||||
|
||||
ceph_decode_need(p, end,
|
||||
4*sizeof(u32) + sizeof(u64) +
|
||||
sizeof(addr) + sizeof(struct ceph_timespec),
|
||||
bad);
|
||||
ceph_decode_32(p, mds);
|
||||
ceph_decode_32(p, inc);
|
||||
ceph_decode_32(p, state);
|
||||
ceph_decode_64(p, state_seq);
|
||||
mds = ceph_decode_32(p);
|
||||
inc = ceph_decode_32(p);
|
||||
state = ceph_decode_32(p);
|
||||
state_seq = ceph_decode_64(p);
|
||||
*p += sizeof(addr);
|
||||
*p += sizeof(struct ceph_timespec);
|
||||
*p += sizeof(u32);
|
||||
|
@ -123,8 +123,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
|
|||
kcalloc(num_export_targets, sizeof(u32),
|
||||
GFP_NOFS);
|
||||
for (j = 0; j < num_export_targets; j++)
|
||||
ceph_decode_32(&pexport_targets,
|
||||
m->m_info[mds].export_targets[j]);
|
||||
m->m_info[mds].export_targets[j] =
|
||||
ceph_decode_32(&pexport_targets);
|
||||
} else {
|
||||
m->m_info[mds].export_targets = NULL;
|
||||
}
|
||||
|
@ -139,8 +139,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
|
|||
goto badmem;
|
||||
ceph_decode_need(p, end, sizeof(u32)*(n+1), bad);
|
||||
for (i = 0; i < n; i++)
|
||||
ceph_decode_32(p, m->m_data_pg_pools[i]);
|
||||
ceph_decode_32(p, m->m_cas_pg_pool);
|
||||
m->m_data_pg_pools[i] = ceph_decode_32(p);
|
||||
m->m_cas_pg_pool = ceph_decode_32(p);
|
||||
|
||||
/* ok, we don't care about the rest. */
|
||||
dout("mdsmap_decode success epoch %u\n", m->m_epoch);
|
||||
|
|
|
@ -45,9 +45,9 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
|
|||
|
||||
ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
|
||||
ceph_decode_copy(&p, &fsid, sizeof(fsid));
|
||||
ceph_decode_32(&p, epoch);
|
||||
epoch = ceph_decode_32(&p);
|
||||
|
||||
ceph_decode_32(&p, num_mon);
|
||||
num_mon = ceph_decode_32(&p);
|
||||
ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
|
||||
|
||||
if (num_mon >= CEPH_MAX_MON)
|
||||
|
|
|
@ -894,8 +894,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
|
|||
dout(" %d inc maps\n", nr_maps);
|
||||
while (nr_maps > 0) {
|
||||
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
|
||||
ceph_decode_32(&p, epoch);
|
||||
ceph_decode_32(&p, maplen);
|
||||
epoch = ceph_decode_32(&p);
|
||||
maplen = ceph_decode_32(&p);
|
||||
ceph_decode_need(&p, end, maplen, bad);
|
||||
next = p + maplen;
|
||||
if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
|
||||
|
@ -927,8 +927,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
|
|||
dout(" %d full maps\n", nr_maps);
|
||||
while (nr_maps) {
|
||||
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
|
||||
ceph_decode_32(&p, epoch);
|
||||
ceph_decode_32(&p, maplen);
|
||||
epoch = ceph_decode_32(&p);
|
||||
maplen = ceph_decode_32(&p);
|
||||
ceph_decode_need(&p, end, maplen, bad);
|
||||
if (nr_maps > 1) {
|
||||
dout("skipping non-latest full map %u len %d\n",
|
||||
|
|
|
@ -67,7 +67,7 @@ static int crush_decode_uniform_bucket(void **p, void *end,
|
|||
{
|
||||
dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
|
||||
ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
|
||||
ceph_decode_32(p, b->item_weight);
|
||||
b->item_weight = ceph_decode_32(p);
|
||||
return 0;
|
||||
bad:
|
||||
return -EINVAL;
|
||||
|
@ -86,8 +86,8 @@ static int crush_decode_list_bucket(void **p, void *end,
|
|||
return -ENOMEM;
|
||||
ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
|
||||
for (j = 0; j < b->h.size; j++) {
|
||||
ceph_decode_32(p, b->item_weights[j]);
|
||||
ceph_decode_32(p, b->sum_weights[j]);
|
||||
b->item_weights[j] = ceph_decode_32(p);
|
||||
b->sum_weights[j] = ceph_decode_32(p);
|
||||
}
|
||||
return 0;
|
||||
bad:
|
||||
|
@ -105,7 +105,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
|
|||
return -ENOMEM;
|
||||
ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
|
||||
for (j = 0; j < b->num_nodes; j++)
|
||||
ceph_decode_32(p, b->node_weights[j]);
|
||||
b->node_weights[j] = ceph_decode_32(p);
|
||||
return 0;
|
||||
bad:
|
||||
return -EINVAL;
|
||||
|
@ -124,8 +124,8 @@ static int crush_decode_straw_bucket(void **p, void *end,
|
|||
return -ENOMEM;
|
||||
ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
|
||||
for (j = 0; j < b->h.size; j++) {
|
||||
ceph_decode_32(p, b->item_weights[j]);
|
||||
ceph_decode_32(p, b->straws[j]);
|
||||
b->item_weights[j] = ceph_decode_32(p);
|
||||
b->straws[j] = ceph_decode_32(p);
|
||||
}
|
||||
return 0;
|
||||
bad:
|
||||
|
@ -148,15 +148,15 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ceph_decode_need(p, end, 4*sizeof(u32), bad);
|
||||
ceph_decode_32(p, magic);
|
||||
magic = ceph_decode_32(p);
|
||||
if (magic != CRUSH_MAGIC) {
|
||||
pr_err("crush_decode magic %x != current %x\n",
|
||||
(unsigned)magic, (unsigned)CRUSH_MAGIC);
|
||||
goto bad;
|
||||
}
|
||||
ceph_decode_32(p, c->max_buckets);
|
||||
ceph_decode_32(p, c->max_rules);
|
||||
ceph_decode_32(p, c->max_devices);
|
||||
c->max_buckets = ceph_decode_32(p);
|
||||
c->max_rules = ceph_decode_32(p);
|
||||
c->max_devices = ceph_decode_32(p);
|
||||
|
||||
c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
|
||||
if (c->device_parents == NULL)
|
||||
|
@ -208,11 +208,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
|
|||
goto badmem;
|
||||
|
||||
ceph_decode_need(p, end, 4*sizeof(u32), bad);
|
||||
ceph_decode_32(p, b->id);
|
||||
ceph_decode_16(p, b->type);
|
||||
ceph_decode_16(p, b->alg);
|
||||
ceph_decode_32(p, b->weight);
|
||||
ceph_decode_32(p, b->size);
|
||||
b->id = ceph_decode_32(p);
|
||||
b->type = ceph_decode_16(p);
|
||||
b->alg = ceph_decode_16(p);
|
||||
b->weight = ceph_decode_32(p);
|
||||
b->size = ceph_decode_32(p);
|
||||
|
||||
dout("crush_decode bucket size %d off %x %p to %p\n",
|
||||
b->size, (int)(*p-start), *p, end);
|
||||
|
@ -227,7 +227,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
|
|||
|
||||
ceph_decode_need(p, end, b->size*sizeof(u32), bad);
|
||||
for (j = 0; j < b->size; j++)
|
||||
ceph_decode_32(p, b->items[j]);
|
||||
b->items[j] = ceph_decode_32(p);
|
||||
|
||||
switch (b->alg) {
|
||||
case CRUSH_BUCKET_UNIFORM:
|
||||
|
@ -290,9 +290,9 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
|
|||
ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
|
||||
ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
|
||||
for (j = 0; j < r->len; j++) {
|
||||
ceph_decode_32(p, r->steps[j].op);
|
||||
ceph_decode_32(p, r->steps[j].arg1);
|
||||
ceph_decode_32(p, r->steps[j].arg2);
|
||||
r->steps[j].op = ceph_decode_32(p);
|
||||
r->steps[j].arg1 = ceph_decode_32(p);
|
||||
r->steps[j].arg2 = ceph_decode_32(p);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -411,11 +411,11 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
|
|||
|
||||
ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
|
||||
ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
|
||||
ceph_decode_32(p, map->epoch);
|
||||
map->epoch = ceph_decode_32(p);
|
||||
ceph_decode_copy(p, &map->created, sizeof(map->created));
|
||||
ceph_decode_copy(p, &map->modified, sizeof(map->modified));
|
||||
|
||||
ceph_decode_32(p, map->num_pools);
|
||||
map->num_pools = ceph_decode_32(p);
|
||||
map->pg_pool = kcalloc(map->num_pools, sizeof(*map->pg_pool),
|
||||
GFP_NOFS);
|
||||
if (!map->pg_pool) {
|
||||
|
@ -425,7 +425,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
|
|||
ceph_decode_32_safe(p, end, max, bad);
|
||||
while (max--) {
|
||||
ceph_decode_need(p, end, 4+sizeof(map->pg_pool->v), bad);
|
||||
ceph_decode_32(p, i);
|
||||
i = ceph_decode_32(p);
|
||||
if (i >= map->num_pools)
|
||||
goto bad;
|
||||
ceph_decode_copy(p, &map->pg_pool[i].v,
|
||||
|
@ -438,7 +438,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
|
|||
|
||||
ceph_decode_32_safe(p, end, map->flags, bad);
|
||||
|
||||
ceph_decode_32(p, max);
|
||||
max = ceph_decode_32(p);
|
||||
|
||||
/* (re)alloc osd arrays */
|
||||
err = osdmap_set_max_osd(map, max);
|
||||
|
@ -456,7 +456,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
|
|||
|
||||
*p += 4; /* skip length field (should match max) */
|
||||
for (i = 0; i < map->max_osd; i++)
|
||||
ceph_decode_32(p, map->osd_weight[i]);
|
||||
map->osd_weight[i] = ceph_decode_32(p);
|
||||
|
||||
*p += 4; /* skip length field (should match max) */
|
||||
ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
|
||||
|
@ -469,8 +469,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
|
|||
struct ceph_pg_mapping *pg;
|
||||
|
||||
ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
|
||||
ceph_decode_64(p, pgid);
|
||||
ceph_decode_32(p, n);
|
||||
pgid = ceph_decode_64(p);
|
||||
n = ceph_decode_32(p);
|
||||
ceph_decode_need(p, end, n * sizeof(u32), bad);
|
||||
pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
|
||||
if (!pg) {
|
||||
|
@ -480,7 +480,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
|
|||
pg->pgid = pgid;
|
||||
pg->len = n;
|
||||
for (j = 0; j < n; j++)
|
||||
ceph_decode_32(p, pg->osds[j]);
|
||||
pg->osds[j] = ceph_decode_32(p);
|
||||
|
||||
err = __insert_pg_mapping(pg, &map->pg_temp);
|
||||
if (err)
|
||||
|
@ -537,10 +537,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|||
ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
|
||||
bad);
|
||||
ceph_decode_copy(p, &fsid, sizeof(fsid));
|
||||
ceph_decode_32(p, epoch);
|
||||
epoch = ceph_decode_32(p);
|
||||
BUG_ON(epoch != map->epoch+1);
|
||||
ceph_decode_copy(p, &modified, sizeof(modified));
|
||||
ceph_decode_32(p, new_flags);
|
||||
new_flags = ceph_decode_32(p);
|
||||
|
||||
/* full map? */
|
||||
ceph_decode_32_safe(p, end, len, bad);
|
||||
|
@ -568,7 +568,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|||
ceph_decode_need(p, end, 5*sizeof(u32), bad);
|
||||
|
||||
/* new max? */
|
||||
ceph_decode_32(p, max);
|
||||
max = ceph_decode_32(p);
|
||||
if (max >= 0) {
|
||||
err = osdmap_set_max_osd(map, max);
|
||||
if (err < 0)
|
||||
|
@ -641,8 +641,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|||
while (len--) {
|
||||
u32 osd, off;
|
||||
ceph_decode_need(p, end, sizeof(u32)*2, bad);
|
||||
ceph_decode_32(p, osd);
|
||||
ceph_decode_32(p, off);
|
||||
osd = ceph_decode_32(p);
|
||||
off = ceph_decode_32(p);
|
||||
pr_info("osd%d weight 0x%x %s\n", osd, off,
|
||||
off == CEPH_OSD_IN ? "(in)" :
|
||||
(off == CEPH_OSD_OUT ? "(out)" : ""));
|
||||
|
@ -659,8 +659,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|||
u64 pgid;
|
||||
u32 pglen;
|
||||
ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
|
||||
ceph_decode_64(p, pgid);
|
||||
ceph_decode_32(p, pglen);
|
||||
pgid = ceph_decode_64(p);
|
||||
pglen = ceph_decode_32(p);
|
||||
|
||||
/* remove any? */
|
||||
while (rbp && rb_entry(rbp, struct ceph_pg_mapping,
|
||||
|
@ -683,7 +683,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|||
pg->pgid = pgid;
|
||||
pg->len = pglen;
|
||||
for (j = 0; j < len; j++)
|
||||
ceph_decode_32(p, pg->osds[j]);
|
||||
pg->osds[j] = ceph_decode_32(p);
|
||||
err = __insert_pg_mapping(pg, &map->pg_temp);
|
||||
if (err)
|
||||
goto bad;
|
||||
|
|
Loading…
Reference in a new issue