4 * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2007 Vladislav Bolkhovitin
6 * Copyright (C) 2007 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/sched.h>
19 #include <linux/file.h>
20 #include <linux/kthread.h>
21 #include <asm/ioctls.h>
22 #include <linux/delay.h>
29 RX_INIT_BHS, /* Must be zero. */
50 TX_INIT, /* Must be zero. */
58 static void close_conn(struct iscsi_conn *conn)
60 struct iscsi_session *session = conn->session;
61 struct iscsi_target *target = conn->target;
65 TRACE_CONN_CLOSE("conn %p, conn_ref_cnt=%d", conn,
66 atomic_read(&conn->conn_ref_cnt));
68 iscsi_extracheck_is_rd_thread(conn);
70 /* We want all our already send operations to complete */
71 conn->sock->ops->shutdown(conn->sock, RCV_SHUTDOWN);
75 if (conn->read_state != RX_INIT_BHS) {
76 req_cmnd_release_force(conn->read_cmnd, 0);
77 conn->read_cmnd = NULL;
78 conn->read_state = RX_INIT_BHS;
81 /* ToDo: not the best way to wait */
82 while(atomic_read(&conn->conn_ref_cnt) != 0) {
83 struct iscsi_cmnd *cmnd;
85 if (!list_empty(&session->pending_list)) {
86 struct list_head *pending_list = &session->pending_list;
87 struct iscsi_cmnd *tmp;
89 TRACE_CONN_CLOSE("Disposing pending commands on conn "
90 "%p, conn_ref_cnt=%d", conn,
91 atomic_read(&conn->conn_ref_cnt));
93 list_for_each_entry_safe(cmnd, tmp, pending_list,
95 if (cmnd->conn == conn) {
96 TRACE_CONN_CLOSE("Freeing pending cmd %p",
98 list_del(&cmnd->pending_list_entry);
100 req_cmnd_release_force(cmnd, 0);
105 iscsi_make_conn_wr_active(conn);
108 TRACE_CONN_CLOSE("conn %p, conn_ref_cnt %d left, wr_state %d",
109 conn, atomic_read(&conn->conn_ref_cnt), conn->wr_state);
112 #ifdef NET_PAGE_CALLBACKS_DEFINED
113 struct iscsi_cmnd *rsp;
115 spin_lock_bh(&conn->cmd_list_lock);
116 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
117 TRACE_DBG("cmd %p, scst_state %x, data_waiting "
118 "%d, ref_cnt %d, parent_req %p", cmnd,
119 cmnd->scst_state, cmnd->data_waiting,
120 atomic_read(&cmnd->ref_cnt), cmnd->parent_req);
121 #ifdef NET_PAGE_CALLBACKS_DEFINED
122 TRACE_DBG("net_ref_cnt %d, sg %p",
123 atomic_read(&cmnd->net_ref_cnt), cmnd->sg);
124 if (cmnd->sg != NULL) {
126 sg_cnt = get_pgcnt(cmnd->bufflen,
128 for(i = 0; i < sg_cnt; i++) {
129 TRACE_DBG("page %p, net_priv %p, _count %d",
130 cmnd->sg[i].page, cmnd->sg[i].page->net_priv,
131 atomic_read(&cmnd->sg[i].page->_count));
135 sBUG_ON(cmnd->parent_req != NULL);
137 spin_lock_bh(&cmnd->rsp_cmd_lock);
138 list_for_each_entry(rsp, &cmnd->rsp_cmd_list, rsp_cmd_list_entry) {
139 TRACE_DBG(" rsp %p, ref_cnt %d, net_ref_cnt %d, "
140 "sg %p", rsp, atomic_read(&rsp->ref_cnt),
141 atomic_read(&rsp->net_ref_cnt), rsp->sg);
142 if ((rsp->sg != cmnd->sg) && (rsp->sg != NULL)) {
144 sg_cnt = get_pgcnt(rsp->bufflen,
146 sBUG_ON(rsp->sg_cnt != sg_cnt);
147 for(i = 0; i < sg_cnt; i++) {
148 TRACE_DBG(" page %p, net_priv %p, "
149 "_count %d", rsp->sg[i].page,
150 rsp->sg[i].page->net_priv,
151 atomic_read(&rsp->sg[i].page->_count));
155 spin_unlock_bh(&cmnd->rsp_cmd_lock);
158 spin_unlock_bh(&conn->cmd_list_lock);
163 write_lock_bh(&conn->sock->sk->sk_callback_lock);
164 conn->sock->sk->sk_state_change = conn->old_state_change;
165 conn->sock->sk->sk_data_ready = conn->old_data_ready;
166 conn->sock->sk->sk_write_space = conn->old_write_space;
167 write_unlock_bh(&conn->sock->sk->sk_callback_lock);
169 while(conn->wr_state != ISCSI_CONN_WR_STATE_IDLE) {
170 TRACE_CONN_CLOSE("Waiting for wr thread (conn %p), wr_state %x",
171 conn, conn->wr_state);
175 mutex_lock(&target->target_mutex);
177 if (list_empty(&session->conn_list))
178 session_del(target, session->sid);
179 mutex_unlock(&target->target_mutex);
181 TRACE_CONN_CLOSE("Notifying user space about closing conn %p", conn);
182 event_send(target->tid, session->sid, conn->cid, E_CONN_CLOSE, 0);
188 static inline void iscsi_conn_init_read(struct iscsi_conn *conn, void *data, size_t len)
190 len = (len + 3) & -4; // XXX ???
191 conn->read_iov[0].iov_base = data;
192 conn->read_iov[0].iov_len = len;
193 conn->read_msg.msg_iov = conn->read_iov;
194 conn->read_msg.msg_iovlen = 1;
195 conn->read_size = (len + 3) & -4;
198 static void iscsi_conn_read_ahs(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
200 /* ToDo: __GFP_NOFAIL ?? */
201 cmnd->pdu.ahs = kmalloc(cmnd->pdu.ahssize, __GFP_NOFAIL|GFP_KERNEL);
202 sBUG_ON(cmnd->pdu.ahs == NULL);
203 iscsi_conn_init_read(conn, cmnd->pdu.ahs, cmnd->pdu.ahssize);
206 static struct iscsi_cmnd *iscsi_get_send_cmnd(struct iscsi_conn *conn)
208 struct iscsi_cmnd *cmnd = NULL;
210 spin_lock(&conn->write_list_lock);
211 if (!list_empty(&conn->write_list)) {
212 cmnd = list_entry(conn->write_list.next, struct iscsi_cmnd,
214 cmd_del_from_write_list(cmnd);
215 cmnd->write_processing_started = 1;
217 spin_unlock(&conn->write_list_lock);
222 static int do_recv(struct iscsi_conn *conn, int state)
228 if (unlikely(conn->closing)) {
233 memset(&msg, 0, sizeof(msg));
234 msg.msg_iov = conn->read_msg.msg_iov;
235 msg.msg_iovlen = conn->read_msg.msg_iovlen;
236 first_len = msg.msg_iov->iov_len;
240 res = sock_recvmsg(conn->sock, &msg, conn->read_size, MSG_DONTWAIT | MSG_NOSIGNAL);
247 TRACE_DBG("EAGAIN or ERESTARTSYS (%d) received for "
248 "conn %p", res, conn);
251 PRINT_ERROR_PR("sock_recvmsg() failed: %d", res);
252 mark_conn_closed(conn);
257 * To save some considerable effort and CPU power we suppose
258 * that TCP functions adjust conn->read_msg.msg_iov and
259 * conn->read_msg.msg_iovlen on amount of copied data. This
260 * BUG_ON is intended to catch if it is changed in the future.
262 sBUG_ON((res >= first_len) &&
263 (conn->read_msg.msg_iov->iov_len != 0));
264 conn->read_size -= res;
265 if (conn->read_size) {
266 if (res >= first_len) {
267 int done = 1 + ((res - first_len) >> PAGE_SHIFT);
268 conn->read_msg.msg_iov += done;
269 conn->read_msg.msg_iovlen -= done;
272 conn->read_state = state;
280 static int rx_hdigest(struct iscsi_conn *conn)
282 struct iscsi_cmnd *cmnd = conn->read_cmnd;
283 int res = digest_rx_header(cmnd);
285 if (unlikely(res != 0)) {
286 PRINT_ERROR_PR("rx header digest for initiator %s failed "
287 "(%d)", conn->session->initiator_name, res);
288 mark_conn_closed(conn);
293 static struct iscsi_cmnd *create_cmnd(struct iscsi_conn *conn)
295 struct iscsi_cmnd *cmnd;
297 cmnd = cmnd_alloc(conn, NULL);
298 iscsi_conn_init_read(cmnd->conn, &cmnd->pdu.bhs, sizeof(cmnd->pdu.bhs));
299 conn->read_state = RX_BHS;
304 /* Returns >0 for success, <=0 for error or successful finish */
305 static int recv(struct iscsi_conn *conn)
307 struct iscsi_cmnd *cmnd = conn->read_cmnd;
308 int hdigest, ddigest, res = 1, rc;
312 hdigest = conn->hdigest_type & DIGEST_NONE ? 0 : 1;
313 ddigest = conn->ddigest_type & DIGEST_NONE ? 0 : 1;
315 switch (conn->read_state) {
317 sBUG_ON(cmnd != NULL);
318 cmnd = conn->read_cmnd = create_cmnd(conn);
320 res = do_recv(conn, RX_INIT_AHS);
321 if (res <= 0 || conn->read_state != RX_INIT_AHS)
324 iscsi_cmnd_get_length(&cmnd->pdu);
325 if (cmnd->pdu.ahssize) {
326 iscsi_conn_read_ahs(conn, cmnd);
327 conn->read_state = RX_AHS;
329 conn->read_state = hdigest ? RX_INIT_HDIGEST : RX_INIT_DATA;
331 if (conn->read_state != RX_AHS)
334 res = do_recv(conn, hdigest ? RX_INIT_HDIGEST : RX_INIT_DATA);
335 if (res <= 0 || conn->read_state != RX_INIT_HDIGEST)
337 case RX_INIT_HDIGEST:
338 iscsi_conn_init_read(conn, &cmnd->hdigest, sizeof(u32));
339 conn->read_state = RX_HDIGEST;
341 res = do_recv(conn, RX_CHECK_HDIGEST);
342 if (res <= 0 || conn->read_state != RX_CHECK_HDIGEST)
344 case RX_CHECK_HDIGEST:
345 rc = rx_hdigest(conn);
347 conn->read_state = RX_INIT_DATA;
353 rc = cmnd_rx_start(cmnd);
354 if (unlikely(rc != 0)) {
355 sBUG_ON(!conn->closing);
356 conn->read_state = RX_END;
358 /* cmnd will be freed in close_conn() */
361 conn->read_state = cmnd->pdu.datasize ? RX_DATA : RX_END;
362 if (conn->read_state != RX_DATA)
365 res = do_recv(conn, ddigest ? RX_INIT_DDIGEST : RX_END);
366 if (res <= 0 || conn->read_state != RX_INIT_DDIGEST)
368 case RX_INIT_DDIGEST:
369 iscsi_conn_init_read(conn, &cmnd->ddigest, sizeof(u32));
370 conn->read_state = RX_DDIGEST;
372 res = do_recv(conn, RX_CHECK_DDIGEST);
373 if (res <= 0 || conn->read_state != RX_CHECK_DDIGEST)
375 case RX_CHECK_DDIGEST:
376 conn->read_state = RX_END;
377 if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD) {
378 TRACE_DBG("Adding RX ddigest cmd %p to digest list "
380 list_add_tail(&cmnd->rx_ddigest_cmd_list_entry,
381 &cmnd->rx_ddigest_cmd_list);
383 conn->read_state = RX_END;
384 } else if (cmnd_opcode(cmnd) != ISCSI_OP_SCSI_DATA_OUT) {
386 * We could get here only for NOP-Out. ISCSI RFC doesn't
387 * specify how to deal with digest errors in this case.
388 * Is closing connection correct?
390 TRACE_DBG("cmnd %p, opcode %x: checking RX "
391 "ddigest inline", cmnd, cmnd_opcode(cmnd));
392 rc = digest_rx_data(cmnd);
393 if (unlikely(rc != 0)) {
394 conn->read_state = RX_CHECK_DDIGEST;
395 mark_conn_closed(conn);
400 PRINT_ERROR_PR("%d %x", conn->read_state, cmnd_opcode(cmnd));
407 if (conn->read_state != RX_END)
410 if (conn->read_size) {
411 PRINT_ERROR_PR("%d %x %d", res, cmnd_opcode(cmnd), conn->read_size);
417 sBUG_ON(conn->read_size != 0);
419 conn->read_cmnd = NULL;
420 conn->read_state = RX_INIT_BHS;
428 /* No locks, conn is rd processing */
429 static int process_read_io(struct iscsi_conn *conn, int *closed)
435 if (unlikely(conn->closing)) {
447 * Called under iscsi_rd_lock and BHs disabled, but will drop it inside,
450 static void scst_do_job_rd(void)
454 /* We delete/add to tail connections to maintain fairness between them */
456 while(!list_empty(&iscsi_rd_list)) {
458 struct iscsi_conn *conn = list_entry(iscsi_rd_list.next,
459 typeof(*conn), rd_list_entry);
461 list_del(&conn->rd_list_entry);
463 sBUG_ON(conn->rd_state == ISCSI_CONN_RD_STATE_PROCESSING);
464 conn->rd_data_ready = 0;
465 conn->rd_state = ISCSI_CONN_RD_STATE_PROCESSING;
467 conn->rd_task = current;
469 spin_unlock_bh(&iscsi_rd_lock);
471 rc = process_read_io(conn, &closed);
473 spin_lock_bh(&iscsi_rd_lock);
479 conn->rd_task = NULL;
481 if ((rc == 0) || conn->rd_data_ready) {
482 list_add_tail(&conn->rd_list_entry, &iscsi_rd_list);
483 conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
485 conn->rd_state = ISCSI_CONN_RD_STATE_IDLE;
492 static inline int test_rd_list(void)
494 int res = !list_empty(&iscsi_rd_list) ||
495 unlikely(kthread_should_stop());
503 current->flags |= PF_NOFREEZE;
505 spin_lock_bh(&iscsi_rd_lock);
506 while(!kthread_should_stop()) {
508 init_waitqueue_entry(&wait, current);
510 if (!test_rd_list()) {
511 add_wait_queue_exclusive(&iscsi_rd_waitQ, &wait);
513 set_current_state(TASK_INTERRUPTIBLE);
516 spin_unlock_bh(&iscsi_rd_lock);
518 spin_lock_bh(&iscsi_rd_lock);
520 set_current_state(TASK_RUNNING);
521 remove_wait_queue(&iscsi_rd_waitQ, &wait);
525 spin_unlock_bh(&iscsi_rd_lock);
528 * If kthread_should_stop() is true, we are guaranteed to be
529 * on the module unload, so iscsi_rd_list must be empty.
531 sBUG_ON(!list_empty(&iscsi_rd_list));
537 #ifdef NET_PAGE_CALLBACKS_DEFINED
538 void iscsi_get_page_callback(struct page *page)
540 struct iscsi_cmnd *cmd = (struct iscsi_cmnd*)page->net_priv;
543 TRACE_DBG("cmd %p, page %p, _count %d, new net_ref_cnt %d",
544 cmd, page, atomic_read(&page->_count),
545 atomic_read(&cmd->net_ref_cnt)+1);
547 v = atomic_inc_return(&cmd->net_ref_cnt);
549 TRACE_DBG("getting cmd %p for page %p", cmd, page);
554 void iscsi_put_page_callback(struct page *page)
556 struct iscsi_cmnd *cmd = (struct iscsi_cmnd*)page->net_priv;
558 TRACE_DBG("cmd %p, page %p, _count %d, new net_ref_cnt %d",
559 cmd, page, atomic_read(&page->_count),
560 atomic_read(&cmd->net_ref_cnt)-1);
562 if (atomic_dec_and_test(&cmd->net_ref_cnt)) {
563 int i, sg_cnt = get_pgcnt(cmd->bufflen, cmd->sg[0].offset);
564 for(i = 0; i < sg_cnt; i++) {
565 TRACE_DBG("Clearing page %p", cmd->sg[i].page);
566 cmd->sg[i].page->net_priv = NULL;
573 /* This is partially taken from the Ardis code. */
574 static int write_data(struct iscsi_conn *conn)
579 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
580 struct iscsi_cmnd *write_cmnd = conn->write_cmnd;
581 struct iscsi_cmnd *ref_cmd;
582 struct scatterlist *sg;
584 int saved_size, size, sendsize;
586 int flags, res, count;
588 iscsi_extracheck_is_wr_thread(conn);
590 if (write_cmnd->own_sg == 0)
591 ref_cmd = write_cmnd->parent_req;
593 ref_cmd = write_cmnd;
596 saved_size = size = conn->write_size;
597 iop = conn->write_iop;
598 count = conn->write_iop_used;
604 sBUG_ON(count > sizeof(conn->write_iov)/sizeof(conn->write_iov[0]));
608 res = vfs_writev(file, (struct iovec __user *)iop, count, &off);
610 TRACE(TRACE_D_DATA, "%#Lx:%u: %d(%ld)",
611 (unsigned long long) conn->session->sid, conn->cid,
612 res, (long) iop->iov_len);
613 if (unlikely(res <= 0)) {
614 if (res == -EAGAIN) {
615 conn->write_iop = iop;
616 conn->write_iop_used = count;
618 } else if (res == -EINTR)
625 while (iop->iov_len <= rest && rest) {
626 rest -= iop->iov_len;
631 conn->write_iop = NULL;
632 conn->write_iop_used = 0;
637 sBUG_ON(iop > conn->write_iov +
638 sizeof(conn->write_iov)/sizeof(conn->write_iov[0]));
639 iop->iov_base += rest;
640 iop->iov_len -= rest;
645 PRINT_ERROR_PR("%s", "warning data missing!");
648 offset = conn->write_offset;
649 idx = offset >> PAGE_SHIFT;
650 offset &= ~PAGE_MASK;
654 #ifdef NET_PAGE_CALLBACKS_DEFINED
655 sendpage = sock->ops->sendpage;
657 if ((write_cmnd->parent_req->scst_cmd != NULL) &&
658 scst_cmd_get_data_buff_alloced(write_cmnd->parent_req->scst_cmd))
659 sendpage = sock_no_sendpage;
661 sendpage = sock->ops->sendpage;
664 flags = MSG_DONTWAIT;
667 #ifdef NET_PAGE_CALLBACKS_DEFINED
668 if (unlikely((sg[idx].page->net_priv != NULL) &&
669 (sg[idx].page->net_priv != ref_cmd))) {
670 PRINT_ERROR_PR("net_priv isn't NULL and != ref_cmd "
671 "(write_cmnd %p, ref_cmd %p, sg %p, idx %d, "
672 "net_priv %p)", write_cmnd, ref_cmd, sg, idx,
673 sg[idx].page->net_priv);
676 sg[idx].page->net_priv = ref_cmd;
678 sendsize = PAGE_SIZE - offset;
679 if (size <= sendsize) {
681 res = sendpage(sock, sg[idx].page, offset, size, flags);
682 TRACE(TRACE_D_DATA, "%s %#Lx:%u: %d(%lu,%u,%u)",
683 sock->ops->sendpage ? "sendpage" : "sock_no_sendpage",
684 (unsigned long long)conn->session->sid, conn->cid,
685 res, sg[idx].page->index, offset, size);
686 if (unlikely(res <= 0)) {
693 conn->write_size = 0;
702 res = sendpage(sock, sg[idx].page, offset, sendsize,
704 TRACE(TRACE_D_DATA, "%s %#Lx:%u: %d(%lu,%u,%u)",
705 sock->ops->sendpage ? "sendpage" : "sock_no_sendpage",
706 (unsigned long long ) conn->session->sid, conn->cid,
707 res, sg[idx].page->index, offset, sendsize);
708 if (unlikely(res <= 0)) {
714 if (res == sendsize) {
722 conn->write_offset = (idx << PAGE_SHIFT) + offset;
724 conn->write_size = size;
725 if ((saved_size == size) && res == -EAGAIN)
728 return saved_size - size;
731 #ifdef NET_PAGE_CALLBACKS_DEFINED
732 if (atomic_read(&ref_cmd->net_ref_cnt) == 0) {
733 TRACE_DBG("sendpage() returned %d, zeroing net_priv", res);
734 sg[idx].page->net_priv = NULL;
739 /* else go through */
746 PRINT_ERROR_PR("error %d at sid:cid %#Lx:%u, cmnd %p", res,
747 (unsigned long long)conn->session->sid, conn->cid,
753 static int exit_tx(struct iscsi_conn *conn, int res)
755 iscsi_extracheck_is_wr_thread(conn);
767 PRINT_ERROR_PR("Sending data failed: initiator %s, "
768 "write_size %d, write_state %d, res %d",
769 conn->session->initiator_name, conn->write_size,
770 conn->write_state, res);
772 conn->write_state = TX_END;
773 conn->write_size = 0;
774 mark_conn_closed(conn);
780 static int tx_ddigest(struct iscsi_cmnd *cmnd, int state)
782 int res, rest = cmnd->conn->write_size;
783 struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
786 iscsi_extracheck_is_wr_thread(cmnd->conn);
788 TRACE_DBG("Sending data digest %x (cmd %p)", cmnd->ddigest, cmnd);
790 iov.iov_base = (char *) (&cmnd->ddigest) + (sizeof(u32) - rest);
793 res = kernel_sendmsg(cmnd->conn->sock, &msg, &iov, 1, rest);
795 cmnd->conn->write_size -= res;
796 if (!cmnd->conn->write_size)
797 cmnd->conn->write_state = state;
799 res = exit_tx(cmnd->conn, res);
804 static void init_tx_hdigest(struct iscsi_cmnd *cmnd)
806 struct iscsi_conn *conn = cmnd->conn;
809 iscsi_extracheck_is_wr_thread(conn);
811 digest_tx_header(cmnd);
813 sBUG_ON(conn->write_iop_used >= sizeof(conn->write_iov)/sizeof(conn->write_iov[0]));
814 iop = &conn->write_iop[conn->write_iop_used];
815 conn->write_iop_used++;
816 iop->iov_base = &(cmnd->hdigest);
817 iop->iov_len = sizeof(u32);
818 conn->write_size += sizeof(u32);
823 static int iscsi_do_send(struct iscsi_conn *conn, int state)
827 iscsi_extracheck_is_wr_thread(conn);
829 res = write_data(conn);
831 if (!conn->write_size)
832 conn->write_state = state;
834 res = exit_tx(conn, res);
840 * No locks, conn is wr processing.
842 * IMPORTANT! Connection conn must be protected by additional conn_get()
843 * upon entrance in this function, because otherwise it could be destroyed
844 * inside as a result of cmnd release.
846 int iscsi_send(struct iscsi_conn *conn)
848 struct iscsi_cmnd *cmnd = conn->write_cmnd;
849 int ddigest, res = 0;
853 TRACE_DBG("conn %p, write_cmnd %p", conn, cmnd);
855 iscsi_extracheck_is_wr_thread(conn);
857 ddigest = conn->ddigest_type != DIGEST_NONE ? 1 : 0;
859 switch (conn->write_state) {
861 sBUG_ON(cmnd != NULL);
862 cmnd = conn->write_cmnd = iscsi_get_send_cmnd(conn);
866 if (!(conn->hdigest_type & DIGEST_NONE))
867 init_tx_hdigest(cmnd);
868 conn->write_state = TX_BHS_DATA;
870 res = iscsi_do_send(conn, ddigest && cmnd->pdu.datasize ?
871 TX_INIT_DDIGEST : TX_END);
872 if (res <= 0 || conn->write_state != TX_INIT_DDIGEST)
874 case TX_INIT_DDIGEST:
875 cmnd->conn->write_size = sizeof(u32);
876 conn->write_state = TX_DDIGEST;
878 res = tx_ddigest(cmnd, TX_END);
881 PRINT_ERROR_PR("%d %d %x", res, conn->write_state,
889 if (conn->write_state != TX_END)
892 if (conn->write_size) {
893 PRINT_ERROR_PR("%d %x %u", res, cmnd_opcode(cmnd),
899 rsp_cmnd_release(cmnd);
901 conn->write_cmnd = NULL;
902 conn->write_state = TX_INIT;
909 /* No locks, conn is wr processing.
911 * IMPORTANT! Connection conn must be protected by additional conn_get()
912 * upon entrance in this function, because otherwise it could be destroyed
913 * inside as a result of iscsi_send(), which releases sent commands.
915 static int process_write_queue(struct iscsi_conn *conn)
921 if (likely(test_write_ready(conn)))
922 res = iscsi_send(conn);
929 * Called under iscsi_wr_lock and BHs disabled, but will drop it inside,
932 static void scst_do_job_wr(void)
936 /* We delete/add to tail connections to maintain fairness between them */
938 while(!list_empty(&iscsi_wr_list)) {
940 struct iscsi_conn *conn = list_entry(iscsi_wr_list.next,
941 typeof(*conn), wr_list_entry);
943 TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d, "
944 "write ready %d", conn, conn->wr_state,
945 conn->wr_space_ready, test_write_ready(conn));
947 list_del(&conn->wr_list_entry);
949 sBUG_ON(conn->wr_state == ISCSI_CONN_WR_STATE_PROCESSING);
951 conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
952 conn->wr_space_ready = 0;
954 conn->wr_task = current;
956 spin_unlock_bh(&iscsi_wr_lock);
960 rc = process_write_queue(conn);
962 spin_lock_bh(&iscsi_wr_lock);
964 conn->wr_task = NULL;
966 if ((rc == -EAGAIN) && !conn->wr_space_ready) {
967 conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
971 if (test_write_ready(conn)) {
972 list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
973 conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
975 conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
985 static inline int test_wr_list(void)
987 int res = !list_empty(&iscsi_wr_list) ||
988 unlikely(kthread_should_stop());
996 current->flags |= PF_NOFREEZE;
998 spin_lock_bh(&iscsi_wr_lock);
999 while(!kthread_should_stop()) {
1001 init_waitqueue_entry(&wait, current);
1003 if (!test_wr_list()) {
1004 add_wait_queue_exclusive(&iscsi_wr_waitQ, &wait);
1006 set_current_state(TASK_INTERRUPTIBLE);
1009 spin_unlock_bh(&iscsi_wr_lock);
1011 spin_lock_bh(&iscsi_wr_lock);
1013 set_current_state(TASK_RUNNING);
1014 remove_wait_queue(&iscsi_wr_waitQ, &wait);
1018 spin_unlock_bh(&iscsi_wr_lock);
1021 * If kthread_should_stop() is true, we are guaranteed to be
1022 * on the module unload, so iscsi_wr_list must be empty.
1024 sBUG_ON(!list_empty(&iscsi_wr_list));