[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [Qemu-stable] [PATCH 1/1] virtio-blk: Use a req pool in
From: |
Fam Zheng |
Subject: |
Re: [Qemu-devel] [Qemu-stable] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free |
Date: |
Thu, 27 Mar 2014 16:15:28 +0800 |
User-agent: |
Mutt/1.5.23 (2014-03-12) |
Please send this patch to address@hidden
On Tue, 03/25 16:42, Li, Zhen-Hua wrote:
> From: "Li, ZhenHua" <address@hidden>
>
> In virtio-blk module, when there is new request, new req structure
> will be created by malloc. Use a req pool instead of this, will increase
s/Use/Using/
> performance;
>
> Increacement: about 5% to 10%.
s/Increacement/Increment/
>
> Signed-off-by: Li, ZhenHua <address@hidden>
> ---
> hw/block/virtio-blk.c | 87
> ++++++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 75 insertions(+), 12 deletions(-)
>
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index 8a568e5..da5b570 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -39,6 +39,70 @@ typedef struct VirtIOBlockReq
> BlockAcctCookie acct;
> } VirtIOBlockReq;
>
> +#define POOL_PAGE 512
> +static VirtIOBlockReq * * req_pool;
> +static char * req_pool_used;
Why char *? It looks like a bool array.
> +static unsigned long req_pool_size = 0;
Bad coding style (and below), please use scripts/checkpatch.pl to check you
patch before posting.
> +
> +static void remalloc_reqs(void){
Bikeshedding, but remalloc? realloc?
> + unsigned long old_size = req_pool_size;
> + unsigned long int i;
> + char * old_used = req_pool_used;
> + VirtIOBlockReq * * old_pool = req_pool;
> +
> + req_pool_size += POOL_PAGE;
> + req_pool_used = (char * )malloc(req_pool_size * sizeof(char));
Please use g_malloc0 and friends. No need to do explict type cast.
Thanks,
Fam
> + req_pool = (VirtIOBlockReq * * )malloc(req_pool_size *
> sizeof(VirtIOBlockReq *));
> +
> + if(old_size != 0){
> + memcpy(req_pool_used, old_used, old_size*(sizeof(char)));
> + memcpy(req_pool, old_pool, old_size*(sizeof(VirtIOBlockReq *)));
> + }
> + for(i=old_size; i<req_pool_size; i++){
> + req_pool[i] = (VirtIOBlockReq *)malloc(sizeof(VirtIOBlockReq));
> + req_pool_used[i] = 0;
> + }
> +
> + if(old_size != 0){
> + free(old_used);
> + free(old_pool);
> + }
> +}
> +static VirtIOBlockReq * req_pool_get_new(void){
> + unsigned long int i;
> + char * used;
> + VirtIOBlockReq * * req;
> +
> + if(req_pool_size == 0){
> + remalloc_reqs();
> + }
> + for(i=0, used=req_pool_used, req=req_pool;
> + i<req_pool_size; i++, used ++, req++){
> + if(*used == 0){
> + *used = 1;
> + return *req;
> + }
> + }
> + remalloc_reqs();
> + req_pool_used[req_pool_size-POOL_PAGE] = 1;
> + *req = req_pool[req_pool_size-POOL_PAGE];
> + return *req;
> +}
> +
> +static void virtio_blk_free_request(VirtIOBlockReq *req0){
> + unsigned long int i;
> + char * used;
> + VirtIOBlockReq * * req;
> +
> + for(i=0, used=req_pool_used, req=req_pool;
> + i<req_pool_size; i++, used ++, req++){
> + if(*req == req0){
> + *used = 0;
> + }
> + }
> +}
> +
> +
> static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
> {
> VirtIOBlock *s = req->dev;
> @@ -63,7 +127,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req,
> int error,
> } else if (action == BDRV_ACTION_REPORT) {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
> bdrv_acct_done(s->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> bdrv_error_action(s->bs, action, is_read, error);
> @@ -84,7 +148,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
>
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> bdrv_acct_done(req->dev->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> static void virtio_blk_flush_complete(void *opaque, int ret)
> @@ -99,25 +163,24 @@ static void virtio_blk_flush_complete(void *opaque, int
> ret)
>
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> bdrv_acct_done(req->dev->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
> -
> static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
> {
> - VirtIOBlockReq *req = g_malloc(sizeof(*req));
> + VirtIOBlockReq *req ;
> + req = req_pool_get_new();
> req->dev = s;
> req->qiov.size = 0;
> req->next = NULL;
> return req;
> }
> -
> static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
> {
> VirtIOBlockReq *req = virtio_blk_alloc_request(s);
>
> if (req != NULL) {
> if (!virtqueue_pop(s->vq, &req->elem)) {
> - g_free(req);
> + virtio_blk_free_request(req);
> return NULL;
> }
> }
> @@ -142,7 +205,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
> */
> if (req->elem.out_num < 2 || req->elem.in_num < 3) {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
> - g_free(req);
> + virtio_blk_free_request(req);
> return;
> }
>
> @@ -232,7 +295,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
> stl_p(&req->scsi->data_len, hdr.dxfer_len);
>
> virtio_blk_req_complete(req, status);
> - g_free(req);
> + virtio_blk_free_request(req);
> return;
> #else
> abort();
> @@ -242,7 +305,7 @@ fail:
> /* Just put anything nonzero so that the ioctl fails in the guest. */
> stl_p(&req->scsi->errors, 255);
> virtio_blk_req_complete(req, status);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> typedef struct MultiReqBuffer {
> @@ -375,7 +438,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
> s->blk.serial ? s->blk.serial : "",
> MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> - g_free(req);
> + virtio_blk_free_request(req);
> } else if (type & VIRTIO_BLK_T_OUT) {
> qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
> req->elem.out_num - 1);
> @@ -387,7 +450,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
> virtio_blk_handle_read(req);
> } else {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
> }
>
> --
> 1.7.10.4
>
>
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- Re: [Qemu-devel] [Qemu-stable] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free,
Fam Zheng <=