4 * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2007 Vladislav Bolkhovitin
6 * Copyright (C) 2007 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/sched.h>
19 #include <linux/file.h>
20 #include <linux/kthread.h>
21 #include <asm/ioctls.h>
22 #include <linux/delay.h>
29 RX_INIT_BHS, /* Must be zero. */
50 TX_INIT, /* Must be zero. */
58 static void close_conn(struct iscsi_conn *conn)
60 struct iscsi_session *session = conn->session;
61 struct iscsi_target *target = conn->target;
65 TRACE_CONN_CLOSE("Closing connection %p (conn_ref_cnt=%d)", conn,
66 atomic_read(&conn->conn_ref_cnt));
68 iscsi_extracheck_is_rd_thread(conn);
70 /* We want all our already send operations to complete */
71 conn->sock->ops->shutdown(conn->sock, RCV_SHUTDOWN);
75 if (conn->read_state != RX_INIT_BHS) {
76 req_cmnd_release_force(conn->read_cmnd, 0);
77 conn->read_cmnd = NULL;
78 conn->read_state = RX_INIT_BHS;
81 /* ToDo: not the best way to wait */
82 while(atomic_read(&conn->conn_ref_cnt) != 0) {
83 struct iscsi_cmnd *cmnd;
85 if (!list_empty(&session->pending_list)) {
86 struct list_head *pending_list = &session->pending_list;
87 struct iscsi_cmnd *tmp;
89 TRACE_CONN_CLOSE("Disposing pending commands on "
90 "connection %p (conn_ref_cnt=%d)", conn,
91 atomic_read(&conn->conn_ref_cnt));
93 list_for_each_entry_safe(cmnd, tmp, pending_list,
95 if (cmnd->conn == conn) {
96 TRACE_CONN_CLOSE("Freeing pending cmd %p",
98 list_del(&cmnd->pending_list_entry);
100 req_cmnd_release_force(cmnd, 0);
105 iscsi_make_conn_wr_active(conn);
108 TRACE_CONN_CLOSE("conn %p, conn_ref_cnt %d left, wr_state %d",
109 conn, atomic_read(&conn->conn_ref_cnt), conn->wr_state);
112 #ifdef NET_PAGE_CALLBACKS_DEFINED
113 struct iscsi_cmnd *rsp;
115 spin_lock_bh(&conn->cmd_list_lock);
116 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
117 TRACE_DBG("cmd %p, scst_state %x, data_waiting "
118 "%d, ref_cnt %d, parent_req %p", cmnd,
119 cmnd->scst_state, cmnd->data_waiting,
120 atomic_read(&cmnd->ref_cnt), cmnd->parent_req);
121 #ifdef NET_PAGE_CALLBACKS_DEFINED
122 TRACE_DBG("net_ref_cnt %d, sg %p",
123 atomic_read(&cmnd->net_ref_cnt), cmnd->sg);
124 if (cmnd->sg != NULL) {
126 sg_cnt = get_pgcnt(cmnd->bufflen,
128 for(i = 0; i < sg_cnt; i++) {
129 TRACE_DBG("page %p, net_priv %p, _count %d",
130 cmnd->sg[i].page, cmnd->sg[i].page->net_priv,
131 atomic_read(&cmnd->sg[i].page->_count));
135 sBUG_ON(cmnd->parent_req != NULL);
137 spin_lock_bh(&cmnd->rsp_cmd_lock);
138 list_for_each_entry(rsp, &cmnd->rsp_cmd_list, rsp_cmd_list_entry) {
139 TRACE_DBG(" rsp %p, ref_cnt %d, net_ref_cnt %d, "
140 "sg %p", rsp, atomic_read(&rsp->ref_cnt),
141 atomic_read(&rsp->net_ref_cnt), rsp->sg);
142 if ((rsp->sg != cmnd->sg) && (rsp->sg != NULL)) {
144 sg_cnt = get_pgcnt(rsp->bufflen,
146 sBUG_ON(rsp->sg_cnt != sg_cnt);
147 for(i = 0; i < sg_cnt; i++) {
148 TRACE_DBG(" page %p, net_priv %p, "
149 "_count %d", rsp->sg[i].page,
150 rsp->sg[i].page->net_priv,
151 atomic_read(&rsp->sg[i].page->_count));
155 spin_unlock_bh(&cmnd->rsp_cmd_lock);
158 spin_unlock_bh(&conn->cmd_list_lock);
163 write_lock_bh(&conn->sock->sk->sk_callback_lock);
164 conn->sock->sk->sk_state_change = conn->old_state_change;
165 conn->sock->sk->sk_data_ready = conn->old_data_ready;
166 conn->sock->sk->sk_write_space = conn->old_write_space;
167 write_unlock_bh(&conn->sock->sk->sk_callback_lock);
169 while(conn->wr_state != ISCSI_CONN_WR_STATE_IDLE) {
170 TRACE_CONN_CLOSE("Waiting for wr thread (conn %p), wr_state %x",
171 conn, conn->wr_state);
175 TRACE_CONN_CLOSE("Notifying user space about closing connection %p", conn);
176 event_send(target->tid, session->sid, conn->cid, E_CONN_CLOSE, 0);
178 mutex_lock(&target->target_mutex);
180 if (list_empty(&session->conn_list))
181 session_del(target, session->sid);
182 mutex_unlock(&target->target_mutex);
188 static inline void iscsi_conn_init_read(struct iscsi_conn *conn, void *data, size_t len)
190 len = (len + 3) & -4; // XXX ???
191 conn->read_iov[0].iov_base = data;
192 conn->read_iov[0].iov_len = len;
193 conn->read_msg.msg_iov = conn->read_iov;
194 conn->read_msg.msg_iovlen = 1;
195 conn->read_size = (len + 3) & -4;
198 static void iscsi_conn_read_ahs(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
200 /* ToDo: __GFP_NOFAIL ?? */
201 cmnd->pdu.ahs = kmalloc(cmnd->pdu.ahssize, __GFP_NOFAIL|GFP_KERNEL);
202 sBUG_ON(cmnd->pdu.ahs == NULL);
203 iscsi_conn_init_read(conn, cmnd->pdu.ahs, cmnd->pdu.ahssize);
206 static struct iscsi_cmnd *iscsi_get_send_cmnd(struct iscsi_conn *conn)
208 struct iscsi_cmnd *cmnd = NULL;
210 spin_lock(&conn->write_list_lock);
211 if (!list_empty(&conn->write_list)) {
212 cmnd = list_entry(conn->write_list.next, struct iscsi_cmnd,
214 cmd_del_from_write_list(cmnd);
215 cmnd->write_processing_started = 1;
217 spin_unlock(&conn->write_list_lock);
222 static int do_recv(struct iscsi_conn *conn, int state)
228 if (unlikely(conn->closing)) {
233 memset(&msg, 0, sizeof(msg));
234 msg.msg_iov = conn->read_msg.msg_iov;
235 msg.msg_iovlen = conn->read_msg.msg_iovlen;
236 first_len = msg.msg_iov->iov_len;
240 res = sock_recvmsg(conn->sock, &msg, conn->read_size, MSG_DONTWAIT | MSG_NOSIGNAL);
247 TRACE_DBG("EAGAIN or ERESTARTSYS (%d) received for "
248 "conn %p", res, conn);
251 PRINT_ERROR("sock_recvmsg() failed: %d", res);
252 mark_conn_closed(conn);
257 * To save some considerable effort and CPU power we suppose
258 * that TCP functions adjust conn->read_msg.msg_iov and
259 * conn->read_msg.msg_iovlen on amount of copied data. This
260 * BUG_ON is intended to catch if it is changed in the future.
262 sBUG_ON((res >= first_len) &&
263 (conn->read_msg.msg_iov->iov_len != 0));
264 conn->read_size -= res;
265 if (conn->read_size) {
266 if (res >= first_len) {
267 int done = 1 + ((res - first_len) >> PAGE_SHIFT);
268 conn->read_msg.msg_iov += done;
269 conn->read_msg.msg_iovlen -= done;
272 conn->read_state = state;
280 static int rx_hdigest(struct iscsi_conn *conn)
282 struct iscsi_cmnd *cmnd = conn->read_cmnd;
283 int res = digest_rx_header(cmnd);
285 if (unlikely(res != 0)) {
286 PRINT_ERROR("rx header digest for initiator %s failed "
287 "(%d)", conn->session->initiator_name, res);
288 mark_conn_closed(conn);
293 static struct iscsi_cmnd *create_cmnd(struct iscsi_conn *conn)
295 struct iscsi_cmnd *cmnd;
297 cmnd = cmnd_alloc(conn, NULL);
298 iscsi_conn_init_read(cmnd->conn, &cmnd->pdu.bhs, sizeof(cmnd->pdu.bhs));
299 conn->read_state = RX_BHS;
304 /* Returns >0 for success, <=0 for error or successful finish */
305 static int recv(struct iscsi_conn *conn)
307 struct iscsi_cmnd *cmnd = conn->read_cmnd;
308 int hdigest, ddigest, res = 1, rc;
312 hdigest = conn->hdigest_type & DIGEST_NONE ? 0 : 1;
313 ddigest = conn->ddigest_type & DIGEST_NONE ? 0 : 1;
315 switch (conn->read_state) {
317 sBUG_ON(cmnd != NULL);
318 cmnd = conn->read_cmnd = create_cmnd(conn);
320 res = do_recv(conn, RX_INIT_AHS);
321 if (res <= 0 || conn->read_state != RX_INIT_AHS)
324 iscsi_cmnd_get_length(&cmnd->pdu);
325 if (cmnd->pdu.ahssize) {
326 iscsi_conn_read_ahs(conn, cmnd);
327 conn->read_state = RX_AHS;
329 conn->read_state = hdigest ? RX_INIT_HDIGEST : RX_INIT_DATA;
331 if (conn->read_state != RX_AHS)
334 res = do_recv(conn, hdigest ? RX_INIT_HDIGEST : RX_INIT_DATA);
335 if (res <= 0 || conn->read_state != RX_INIT_HDIGEST)
337 case RX_INIT_HDIGEST:
338 iscsi_conn_init_read(conn, &cmnd->hdigest, sizeof(u32));
339 conn->read_state = RX_HDIGEST;
341 res = do_recv(conn, RX_CHECK_HDIGEST);
342 if (res <= 0 || conn->read_state != RX_CHECK_HDIGEST)
344 case RX_CHECK_HDIGEST:
345 rc = rx_hdigest(conn);
347 conn->read_state = RX_INIT_DATA;
353 rc = cmnd_rx_start(cmnd);
354 if (unlikely(rc != 0)) {
355 sBUG_ON(!conn->closing);
356 conn->read_state = RX_END;
358 /* cmnd will be freed in close_conn() */
361 conn->read_state = cmnd->pdu.datasize ? RX_DATA : RX_END;
362 if (conn->read_state != RX_DATA)
365 res = do_recv(conn, ddigest ? RX_INIT_DDIGEST : RX_END);
366 if (res <= 0 || conn->read_state != RX_INIT_DDIGEST)
368 case RX_INIT_DDIGEST:
369 iscsi_conn_init_read(conn, &cmnd->ddigest, sizeof(u32));
370 conn->read_state = RX_DDIGEST;
372 res = do_recv(conn, RX_CHECK_DDIGEST);
373 if (res <= 0 || conn->read_state != RX_CHECK_DDIGEST)
375 case RX_CHECK_DDIGEST:
376 conn->read_state = RX_END;
377 if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD) {
378 TRACE_DBG("Adding RX ddigest cmd %p to digest list "
380 list_add_tail(&cmnd->rx_ddigest_cmd_list_entry,
381 &cmnd->rx_ddigest_cmd_list);
383 conn->read_state = RX_END;
384 } else if (cmnd_opcode(cmnd) != ISCSI_OP_SCSI_DATA_OUT) {
386 * We could get here only for NOP-Out. ISCSI RFC doesn't
387 * specify how to deal with digest errors in this case.
388 * Is closing connection correct?
390 TRACE_DBG("cmnd %p, opcode %x: checking RX "
391 "ddigest inline", cmnd, cmnd_opcode(cmnd));
392 rc = digest_rx_data(cmnd);
393 if (unlikely(rc != 0)) {
394 conn->read_state = RX_CHECK_DDIGEST;
395 mark_conn_closed(conn);
400 PRINT_ERROR("%d %x", conn->read_state, cmnd_opcode(cmnd));
407 if (conn->read_state != RX_END)
410 if (conn->read_size) {
411 PRINT_ERROR("%d %x %d", res, cmnd_opcode(cmnd), conn->read_size);
417 sBUG_ON(conn->read_size != 0);
419 conn->read_cmnd = NULL;
420 conn->read_state = RX_INIT_BHS;
428 /* No locks, conn is rd processing */
429 static int process_read_io(struct iscsi_conn *conn, int *closed)
435 if (unlikely(conn->closing)) {
447 * Called under iscsi_rd_lock and BHs disabled, but will drop it inside,
450 static void scst_do_job_rd(void)
454 /* We delete/add to tail connections to maintain fairness between them */
456 while(!list_empty(&iscsi_rd_list)) {
458 struct iscsi_conn *conn = list_entry(iscsi_rd_list.next,
459 typeof(*conn), rd_list_entry);
461 list_del(&conn->rd_list_entry);
463 sBUG_ON(conn->rd_state == ISCSI_CONN_RD_STATE_PROCESSING);
464 conn->rd_data_ready = 0;
465 conn->rd_state = ISCSI_CONN_RD_STATE_PROCESSING;
467 conn->rd_task = current;
469 spin_unlock_bh(&iscsi_rd_lock);
471 rc = process_read_io(conn, &closed);
473 spin_lock_bh(&iscsi_rd_lock);
479 conn->rd_task = NULL;
481 if ((rc == 0) || conn->rd_data_ready) {
482 list_add_tail(&conn->rd_list_entry, &iscsi_rd_list);
483 conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
485 conn->rd_state = ISCSI_CONN_RD_STATE_IDLE;
492 static inline int test_rd_list(void)
494 int res = !list_empty(&iscsi_rd_list) ||
495 unlikely(kthread_should_stop());
503 current->flags |= PF_NOFREEZE;
505 spin_lock_bh(&iscsi_rd_lock);
506 while(!kthread_should_stop()) {
508 init_waitqueue_entry(&wait, current);
510 if (!test_rd_list()) {
511 add_wait_queue_exclusive(&iscsi_rd_waitQ, &wait);
513 set_current_state(TASK_INTERRUPTIBLE);
516 spin_unlock_bh(&iscsi_rd_lock);
518 spin_lock_bh(&iscsi_rd_lock);
520 set_current_state(TASK_RUNNING);
521 remove_wait_queue(&iscsi_rd_waitQ, &wait);
525 spin_unlock_bh(&iscsi_rd_lock);
528 * If kthread_should_stop() is true, we are guaranteed to be
529 * on the module unload, so iscsi_rd_list must be empty.
531 sBUG_ON(!list_empty(&iscsi_rd_list));
537 #ifdef NET_PAGE_CALLBACKS_DEFINED
538 void iscsi_get_page_callback(struct page *page)
540 struct iscsi_cmnd *cmd = (struct iscsi_cmnd*)page->net_priv;
543 TRACE_NET_PAGE("cmd %p, page %p, _count %d, new net_ref_cnt %d",
544 cmd, page, atomic_read(&page->_count),
545 atomic_read(&cmd->net_ref_cnt)+1);
547 v = atomic_inc_return(&cmd->net_ref_cnt);
549 TRACE_NET_PAGE("getting cmd %p for page %p", cmd, page);
554 void iscsi_put_page_callback(struct page *page)
556 struct iscsi_cmnd *cmd = (struct iscsi_cmnd*)page->net_priv;
558 TRACE_NET_PAGE("cmd %p, page %p, _count %d, new net_ref_cnt %d",
559 cmd, page, atomic_read(&page->_count),
560 atomic_read(&cmd->net_ref_cnt)-1);
562 if (atomic_dec_and_test(&cmd->net_ref_cnt)) {
563 int i, sg_cnt = get_pgcnt(cmd->bufflen, cmd->sg[0].offset);
564 for(i = 0; i < sg_cnt; i++) {
565 TRACE_NET_PAGE("Clearing page %p", cmd->sg[i].page);
566 cmd->sg[i].page->net_priv = NULL;
572 static void check_net_priv(struct iscsi_cmnd *cmd, struct page *page)
574 if (atomic_read(&cmd->net_ref_cnt) == 0) {
575 TRACE_DBG("%s", "sendpage() not called get_page(), "
577 page->net_priv = NULL;
581 static inline void check_net_priv(struct iscsi_cmnd *cmd, struct page *page) {}
584 /* This is partially taken from the Ardis code. */
585 static int write_data(struct iscsi_conn *conn)
590 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
591 struct iscsi_cmnd *write_cmnd = conn->write_cmnd;
592 struct iscsi_cmnd *ref_cmd;
593 struct scatterlist *sg;
595 int saved_size, size, sendsize;
597 int flags, res, count;
599 iscsi_extracheck_is_wr_thread(conn);
601 if (write_cmnd->own_sg == 0)
602 ref_cmd = write_cmnd->parent_req;
604 ref_cmd = write_cmnd;
607 saved_size = size = conn->write_size;
608 iop = conn->write_iop;
609 count = conn->write_iop_used;
615 sBUG_ON(count > sizeof(conn->write_iov)/sizeof(conn->write_iov[0]));
619 res = vfs_writev(file, (struct iovec __user *)iop, count, &off);
621 TRACE(TRACE_D_WRITE, "%#Lx:%u: %d(%ld)",
622 (unsigned long long) conn->session->sid, conn->cid,
623 res, (long) iop->iov_len);
624 if (unlikely(res <= 0)) {
625 if (res == -EAGAIN) {
626 conn->write_iop = iop;
627 conn->write_iop_used = count;
629 } else if (res == -EINTR)
636 while (iop->iov_len <= rest && rest) {
637 rest -= iop->iov_len;
642 conn->write_iop = NULL;
643 conn->write_iop_used = 0;
648 sBUG_ON(iop > conn->write_iov +
649 sizeof(conn->write_iov)/sizeof(conn->write_iov[0]));
650 iop->iov_base += rest;
651 iop->iov_len -= rest;
656 PRINT_ERROR("%s", "warning data missing!");
659 offset = conn->write_offset;
660 idx = offset >> PAGE_SHIFT;
661 offset &= ~PAGE_MASK;
665 #ifdef NET_PAGE_CALLBACKS_DEFINED
666 sendpage = sock->ops->sendpage;
668 if ((write_cmnd->parent_req->scst_cmd != NULL) &&
669 scst_cmd_get_data_buff_alloced(write_cmnd->parent_req->scst_cmd))
670 sendpage = sock_no_sendpage;
672 sendpage = sock->ops->sendpage;
675 flags = MSG_DONTWAIT;
678 #ifdef NET_PAGE_CALLBACKS_DEFINED
679 if (unlikely((sg[idx].page->net_priv != NULL) &&
680 (sg[idx].page->net_priv != ref_cmd))) {
681 PRINT_ERROR("net_priv isn't NULL and != ref_cmd "
682 "(write_cmnd %p, ref_cmd %p, sg %p, idx %d, "
683 "net_priv %p)", write_cmnd, ref_cmd, sg, idx,
684 sg[idx].page->net_priv);
687 sg[idx].page->net_priv = ref_cmd;
689 sendsize = PAGE_SIZE - offset;
690 if (size <= sendsize) {
692 res = sendpage(sock, sg[idx].page, offset, size, flags);
693 TRACE(TRACE_D_WRITE, "%s %#Lx:%u: %d(%lu,%u,%u)",
694 sock->ops->sendpage ? "sendpage" : "sock_no_sendpage",
695 (unsigned long long)conn->session->sid, conn->cid,
696 res, sg[idx].page->index, offset, size);
697 if (unlikely(res <= 0)) {
703 check_net_priv(ref_cmd, sg[idx].page);
705 conn->write_size = 0;
714 res = sendpage(sock, sg[idx].page, offset, sendsize,
716 TRACE(TRACE_D_WRITE, "%s %#Lx:%u: %d(%lu,%u,%u)",
717 sock->ops->sendpage ? "sendpage" : "sock_no_sendpage",
718 (unsigned long long ) conn->session->sid, conn->cid,
719 res, sg[idx].page->index, offset, sendsize);
720 if (unlikely(res <= 0)) {
726 check_net_priv(ref_cmd, sg[idx].page);
727 if (res == sendsize) {
735 conn->write_offset = (idx << PAGE_SHIFT) + offset;
737 conn->write_size = size;
738 if ((saved_size == size) && res == -EAGAIN)
741 return saved_size - size;
744 check_net_priv(ref_cmd, sg[idx].page);
747 /* else go through */
754 PRINT_ERROR("error %d at sid:cid %#Lx:%u, cmnd %p", res,
755 (unsigned long long)conn->session->sid, conn->cid,
761 static int exit_tx(struct iscsi_conn *conn, int res)
763 iscsi_extracheck_is_wr_thread(conn);
775 PRINT_ERROR("Sending data failed: initiator %s, "
776 "write_size %d, write_state %d, res %d",
777 conn->session->initiator_name, conn->write_size,
778 conn->write_state, res);
780 conn->write_state = TX_END;
781 conn->write_size = 0;
782 mark_conn_closed(conn);
788 static int tx_ddigest(struct iscsi_cmnd *cmnd, int state)
790 int res, rest = cmnd->conn->write_size;
791 struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
794 iscsi_extracheck_is_wr_thread(cmnd->conn);
796 TRACE_DBG("Sending data digest %x (cmd %p)", cmnd->ddigest, cmnd);
798 iov.iov_base = (char *) (&cmnd->ddigest) + (sizeof(u32) - rest);
801 res = kernel_sendmsg(cmnd->conn->sock, &msg, &iov, 1, rest);
803 cmnd->conn->write_size -= res;
804 if (!cmnd->conn->write_size)
805 cmnd->conn->write_state = state;
807 res = exit_tx(cmnd->conn, res);
812 static void init_tx_hdigest(struct iscsi_cmnd *cmnd)
814 struct iscsi_conn *conn = cmnd->conn;
817 iscsi_extracheck_is_wr_thread(conn);
819 digest_tx_header(cmnd);
821 sBUG_ON(conn->write_iop_used >= sizeof(conn->write_iov)/sizeof(conn->write_iov[0]));
822 iop = &conn->write_iop[conn->write_iop_used];
823 conn->write_iop_used++;
824 iop->iov_base = &(cmnd->hdigest);
825 iop->iov_len = sizeof(u32);
826 conn->write_size += sizeof(u32);
831 static int iscsi_do_send(struct iscsi_conn *conn, int state)
835 iscsi_extracheck_is_wr_thread(conn);
837 res = write_data(conn);
839 if (!conn->write_size)
840 conn->write_state = state;
842 res = exit_tx(conn, res);
848 * No locks, conn is wr processing.
850 * IMPORTANT! Connection conn must be protected by additional conn_get()
851 * upon entrance in this function, because otherwise it could be destroyed
852 * inside as a result of cmnd release.
854 int iscsi_send(struct iscsi_conn *conn)
856 struct iscsi_cmnd *cmnd = conn->write_cmnd;
857 int ddigest, res = 0;
861 TRACE_DBG("conn %p, write_cmnd %p", conn, cmnd);
863 iscsi_extracheck_is_wr_thread(conn);
865 ddigest = conn->ddigest_type != DIGEST_NONE ? 1 : 0;
867 switch (conn->write_state) {
869 sBUG_ON(cmnd != NULL);
870 cmnd = conn->write_cmnd = iscsi_get_send_cmnd(conn);
874 if (!(conn->hdigest_type & DIGEST_NONE))
875 init_tx_hdigest(cmnd);
876 conn->write_state = TX_BHS_DATA;
878 res = iscsi_do_send(conn, ddigest && cmnd->pdu.datasize ?
879 TX_INIT_DDIGEST : TX_END);
880 if (res <= 0 || conn->write_state != TX_INIT_DDIGEST)
882 case TX_INIT_DDIGEST:
883 cmnd->conn->write_size = sizeof(u32);
884 conn->write_state = TX_DDIGEST;
886 res = tx_ddigest(cmnd, TX_END);
889 PRINT_ERROR("%d %d %x", res, conn->write_state,
897 if (conn->write_state != TX_END)
900 if (conn->write_size) {
901 PRINT_ERROR("%d %x %u", res, cmnd_opcode(cmnd),
907 rsp_cmnd_release(cmnd);
909 conn->write_cmnd = NULL;
910 conn->write_state = TX_INIT;
917 /* No locks, conn is wr processing.
919 * IMPORTANT! Connection conn must be protected by additional conn_get()
920 * upon entrance in this function, because otherwise it could be destroyed
921 * inside as a result of iscsi_send(), which releases sent commands.
923 static int process_write_queue(struct iscsi_conn *conn)
929 if (likely(test_write_ready(conn)))
930 res = iscsi_send(conn);
937 * Called under iscsi_wr_lock and BHs disabled, but will drop it inside,
940 static void scst_do_job_wr(void)
944 /* We delete/add to tail connections to maintain fairness between them */
946 while(!list_empty(&iscsi_wr_list)) {
948 struct iscsi_conn *conn = list_entry(iscsi_wr_list.next,
949 typeof(*conn), wr_list_entry);
951 TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d, "
952 "write ready %d", conn, conn->wr_state,
953 conn->wr_space_ready, test_write_ready(conn));
955 list_del(&conn->wr_list_entry);
957 sBUG_ON(conn->wr_state == ISCSI_CONN_WR_STATE_PROCESSING);
959 conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
960 conn->wr_space_ready = 0;
962 conn->wr_task = current;
964 spin_unlock_bh(&iscsi_wr_lock);
968 rc = process_write_queue(conn);
970 spin_lock_bh(&iscsi_wr_lock);
972 conn->wr_task = NULL;
974 if ((rc == -EAGAIN) && !conn->wr_space_ready) {
975 conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
979 if (test_write_ready(conn)) {
980 list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
981 conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
983 conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
993 static inline int test_wr_list(void)
995 int res = !list_empty(&iscsi_wr_list) ||
996 unlikely(kthread_should_stop());
1000 int istwr(void *arg)
1004 current->flags |= PF_NOFREEZE;
1006 spin_lock_bh(&iscsi_wr_lock);
1007 while(!kthread_should_stop()) {
1009 init_waitqueue_entry(&wait, current);
1011 if (!test_wr_list()) {
1012 add_wait_queue_exclusive(&iscsi_wr_waitQ, &wait);
1014 set_current_state(TASK_INTERRUPTIBLE);
1017 spin_unlock_bh(&iscsi_wr_lock);
1019 spin_lock_bh(&iscsi_wr_lock);
1021 set_current_state(TASK_RUNNING);
1022 remove_wait_queue(&iscsi_wr_waitQ, &wait);
1026 spin_unlock_bh(&iscsi_wr_lock);
1029 * If kthread_should_stop() is true, we are guaranteed to be
1030 * on the module unload, so iscsi_wr_list must be empty.
1032 sBUG_ON(!list_empty(&iscsi_wr_list));