ce39f9d17c
This patch supports packed write command of eMMC4.5 devices. Several writes can be grouped in packed command and all data of the individual commands can be sent in a single transfer on the bus. Large amounts of data in one transfer rather than several data of small size are effective for eMMC write internally. As a result, packed command help write throughput be improved. The following tables show the results of packed write. Type A: test none | packed iozone 25.8 | 31 tiotest 27.6 | 31.2 lmdd 31.2 | 35.4 Type B: test none | packed iozone 44.1 | 51.1 tiotest 47.9 | 52.5 lmdd 51.6 | 59.2 Type C: test none | packed iozone 19.5 | 32 tiotest 19.9 | 34.5 lmdd 22.8 | 40.7 Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Reviewed-by: Maya Erez <merez@codeaurora.org> Reviewed-by: Namjae Jeon <linkinjeon@gmail.com> Signed-off-by: Chris Ball <cjb@laptop.org>
74 lines
1.8 KiB
C
74 lines
1.8 KiB
C
#ifndef MMC_QUEUE_H
|
|
#define MMC_QUEUE_H
|
|
|
|
struct request;
|
|
struct task_struct;
|
|
|
|
struct mmc_blk_request {
|
|
struct mmc_request mrq;
|
|
struct mmc_command sbc;
|
|
struct mmc_command cmd;
|
|
struct mmc_command stop;
|
|
struct mmc_data data;
|
|
};
|
|
|
|
enum mmc_packed_type {
|
|
MMC_PACKED_NONE = 0,
|
|
MMC_PACKED_WRITE,
|
|
};
|
|
|
|
#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
|
|
#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
|
|
|
|
struct mmc_packed {
|
|
struct list_head list;
|
|
u32 cmd_hdr[1024];
|
|
unsigned int blocks;
|
|
u8 nr_entries;
|
|
u8 retries;
|
|
s16 idx_failure;
|
|
};
|
|
|
|
struct mmc_queue_req {
|
|
struct request *req;
|
|
struct mmc_blk_request brq;
|
|
struct scatterlist *sg;
|
|
char *bounce_buf;
|
|
struct scatterlist *bounce_sg;
|
|
unsigned int bounce_sg_len;
|
|
struct mmc_async_req mmc_active;
|
|
enum mmc_packed_type cmd_type;
|
|
struct mmc_packed *packed;
|
|
};
|
|
|
|
struct mmc_queue {
|
|
struct mmc_card *card;
|
|
struct task_struct *thread;
|
|
struct semaphore thread_sem;
|
|
unsigned int flags;
|
|
#define MMC_QUEUE_SUSPENDED (1 << 0)
|
|
#define MMC_QUEUE_NEW_REQUEST (1 << 1)
|
|
|
|
int (*issue_fn)(struct mmc_queue *, struct request *);
|
|
void *data;
|
|
struct request_queue *queue;
|
|
struct mmc_queue_req mqrq[2];
|
|
struct mmc_queue_req *mqrq_cur;
|
|
struct mmc_queue_req *mqrq_prev;
|
|
};
|
|
|
|
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
|
|
const char *);
|
|
extern void mmc_cleanup_queue(struct mmc_queue *);
|
|
extern void mmc_queue_suspend(struct mmc_queue *);
|
|
extern void mmc_queue_resume(struct mmc_queue *);
|
|
|
|
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
|
|
struct mmc_queue_req *);
|
|
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
|
|
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
|
|
|
|
extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
|
|
extern void mmc_packed_clean(struct mmc_queue *);
|
|
|
|
#endif
|