[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v3 7/7] virtio-scsi: Handle TMF request cancellation
From: |
Fam Zheng |
Subject: |
[Qemu-devel] [PATCH v3 7/7] virtio-scsi: Handle TMF request cancellation asynchronously |
Date: |
Thu, 25 Sep 2014 10:20:49 +0800 |
For VIRTIO_SCSI_T_TMF_ABORT_TASK and VIRTIO_SCSI_T_TMF_ABORT_TASK_SET,
use scsi_req_cancel_async to start the cancellation.
Because each tmf command may cancel multiple requests, we need to use a
counter to track the number of remaining requests we still need to wait
for.
Signed-off-by: Fam Zheng <address@hidden>
---
hw/scsi/virtio-scsi.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 78 insertions(+), 7 deletions(-)
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index fa36e23..7a6b71a 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -208,12 +208,40 @@ static void *virtio_scsi_load_request(QEMUFile *f,
SCSIRequest *sreq)
return req;
}
-static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
+typedef struct {
+ VirtIOSCSIReq *tmf_req;
+ int remaining;
+} VirtIOSCSICancelTracker;
+
+typedef struct {
+ Notifier notifier;
+ VirtIOSCSICancelTracker *tracker;
+} VirtIOSCSICancelNotifier;
+
+static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
+{
+ VirtIOSCSICancelNotifier *n = container_of(notifier,
+ VirtIOSCSICancelNotifier,
+ notifier);
+
+ if (--n->tracker->remaining == 0) {
+ virtio_scsi_complete_req(n->tracker->tmf_req);
+ g_slice_free(VirtIOSCSICancelTracker, n->tracker);
+ }
+ g_slice_free(VirtIOSCSICancelNotifier, n);
+}
+
+/* Return 0 if the request is ready to be completed and return to guest;
+ * -EINPROGRESS if the request is submitted and will be completed later, in the
+ * case of async cancellation. */
+static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf.lun);
SCSIRequest *r, *next;
BusChild *kid;
int target;
+ int ret = 0;
+ int cancel_count;
if (s->dataplane_started && bdrv_get_aio_context(d->conf.bs) != s->ctx) {
aio_context_acquire(s->ctx);
@@ -251,7 +279,18 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s,
VirtIOSCSIReq *req)
*/
req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
} else {
- scsi_req_cancel(r);
+ VirtIOSCSICancelNotifier *notifier;
+ VirtIOSCSICancelTracker *tracker;
+
+ notifier = g_slice_new(VirtIOSCSICancelNotifier);
+ notifier->notifier.notify
+ = virtio_scsi_cancel_notify;
+ tracker = g_slice_new(VirtIOSCSICancelTracker);
+ tracker->tmf_req = req;
+ tracker->remaining = 1;
+ notifier->tracker = tracker;
+ scsi_req_cancel_async(r, ¬ifier->notifier);
+ ret = -EINPROGRESS;
}
}
break;
@@ -277,6 +316,7 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq
*req)
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
goto incorrect_lun;
}
+ cancel_count = 0;
QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
if (r->hba_private) {
if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
@@ -286,10 +326,35 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s,
VirtIOSCSIReq *req)
req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
break;
} else {
- scsi_req_cancel(r);
+ /* Before we actually cancel any requests in the next for
+ * loop, let's count them. This way, if the bus starts
+ * calling back to the notifier even before we finish the
+ * loop, the counter, which value is already seen in
+ * virtio_scsi_cancel_notify, will prevent us from
+ * completing the tmf too quickly. */
+ cancel_count++;
}
}
}
+ if (cancel_count) {
+ VirtIOSCSICancelNotifier *notifier;
+ VirtIOSCSICancelTracker *tracker;
+
+ tracker = g_slice_new(VirtIOSCSICancelTracker);
+ tracker->tmf_req = req;
+ tracker->remaining = cancel_count;
+
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
+ if (r->hba_private) {
+ notifier = g_slice_new(VirtIOSCSICancelNotifier);
+ notifier->notifier.notify = virtio_scsi_cancel_notify;
+ notifier->tracker = tracker;
+ scsi_req_cancel_async(r, ¬ifier->notifier);
+ }
+ }
+ ret = -EINPROGRESS;
+ }
+
break;
case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
@@ -310,20 +375,22 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s,
VirtIOSCSIReq *req)
break;
}
- return;
+ return ret;
incorrect_lun:
req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
- return;
+ return ret;
fail:
req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ return ret;
}
void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
VirtIODevice *vdev = (VirtIODevice *)s;
int type;
+ int r = 0;
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
&type, sizeof(type)) < sizeof(type)) {
@@ -337,7 +404,7 @@ void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s,
VirtIOSCSIReq *req)
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
virtio_scsi_bad_req();
} else {
- virtio_scsi_do_tmf(s, req);
+ r = virtio_scsi_do_tmf(s, req);
}
} else if (req->req.tmf.type == VIRTIO_SCSI_T_AN_QUERY ||
@@ -350,7 +417,11 @@ void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s,
VirtIOSCSIReq *req)
req->resp.an.response = VIRTIO_SCSI_S_OK;
}
}
- virtio_scsi_complete_req(req);
+ if (r == 0) {
+ virtio_scsi_complete_req(req);
+ } else {
+ assert(r = -EINPROGRESS);
+ }
}
static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
--
1.9.3
- [Qemu-devel] [PATCH v3 0/7] virtio-scsi: Asynchronous cancellation, Fam Zheng, 2014/09/24
- [Qemu-devel] [PATCH v3 2/7] scsi-generic: Handle canceled request in scsi_command_complete, Fam Zheng, 2014/09/24
- [Qemu-devel] [PATCH v3 1/7] scsi: Drop scsi_req_abort, Fam Zheng, 2014/09/24
- [Qemu-devel] [PATCH v3 3/7] scsi-bus: Unify request unref in scsi_req_cancel, Fam Zheng, 2014/09/24
- [Qemu-devel] [PATCH v3 5/7] scsi: Introduce scsi_req_cancel_complete, Fam Zheng, 2014/09/24
- [Qemu-devel] [PATCH v3 4/7] scsi: Drop SCSIReqOps.cancel_io, Fam Zheng, 2014/09/24
- [Qemu-devel] [PATCH v3 6/7] scsi: Introduce scsi_req_cancel_async, Fam Zheng, 2014/09/24
- [Qemu-devel] [PATCH v3 7/7] virtio-scsi: Handle TMF request cancellation asynchronously,
Fam Zheng <=