4 * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2007 - 2009 Vladislav Bolkhovitin
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/sched.h>
19 #include <linux/file.h>
20 #include <linux/kthread.h>
21 #include <asm/ioctls.h>
22 #include <linux/delay.h>
29 RX_INIT_BHS, /* Must be zero for better "switch" optimiztion. */
45 TX_INIT = 0, /* Must be zero for better "switch" optimiztion. */
54 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
55 static void iscsi_check_closewait(struct iscsi_conn *conn)
57 struct iscsi_cmnd *cmnd;
61 TRACE_CONN_CLOSE_DBG("conn %p, sk_state %d", conn,
62 conn->sock->sk->sk_state);
64 if (conn->sock->sk->sk_state != TCP_CLOSE) {
65 TRACE_CONN_CLOSE_DBG("conn %p, skipping", conn);
70 * No data are going to be sent, so all queued buffers can be freed
71 * now. In many cases TCP does that only in close(), but we can't rely
72 * on user space on calling it.
76 spin_lock_bh(&conn->cmd_list_lock);
77 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
78 struct iscsi_cmnd *rsp;
81 TRACE_CONN_CLOSE_DBG("cmd %p, scst_state %x, data_waiting %d, "
82 "ref_cnt %d, parent_req %p, net_ref_cnt %d, sg %p",
83 cmnd, cmnd->scst_state, cmnd->data_waiting,
84 atomic_read(&cmnd->ref_cnt), cmnd->parent_req,
85 atomic_read(&cmnd->net_ref_cnt), cmnd->sg);
87 sBUG_ON(cmnd->parent_req != NULL);
89 if (cmnd->sg != NULL) {
92 if (cmnd_get_check(cmnd))
95 for (i = 0; i < cmnd->sg_cnt; i++) {
96 struct page *page = sg_page(&cmnd->sg[i]);
97 TRACE_CONN_CLOSE_DBG("page %p, net_priv %p, "
98 "_count %d", page, page->net_priv,
99 atomic_read(&page->_count));
101 if (page->net_priv != NULL) {
103 spin_unlock_bh(&conn->cmd_list_lock);
106 while (page->net_priv != NULL)
107 iscsi_put_page_callback(page);
116 spin_lock_bh(&cmnd->rsp_cmd_lock);
117 list_for_each_entry(rsp, &cmnd->rsp_cmd_list,
118 rsp_cmd_list_entry) {
119 TRACE_CONN_CLOSE_DBG(" rsp %p, ref_cnt %d, "
120 "net_ref_cnt %d, sg %p",
121 rsp, atomic_read(&rsp->ref_cnt),
122 atomic_read(&rsp->net_ref_cnt), rsp->sg);
124 if ((rsp->sg != cmnd->sg) && (rsp->sg != NULL)) {
127 if (cmnd_get_check(rsp))
130 for (i = 0; i < rsp->sg_cnt; i++) {
132 sg_page(&rsp->sg[i]);
133 TRACE_CONN_CLOSE_DBG(
134 " page %p, net_priv %p, "
136 page, page->net_priv,
137 atomic_read(&page->_count));
139 if (page->net_priv != NULL) {
141 spin_unlock_bh(&cmnd->rsp_cmd_lock);
142 spin_unlock_bh(&conn->cmd_list_lock);
145 while (page->net_priv != NULL)
146 iscsi_put_page_callback(page);
155 spin_unlock_bh(&cmnd->rsp_cmd_lock);
157 spin_unlock_bh(&conn->cmd_list_lock);
164 static inline void iscsi_check_closewait(struct iscsi_conn *conn) {};
167 static void free_pending_commands(struct iscsi_conn *conn)
169 struct iscsi_session *session = conn->session;
170 struct list_head *pending_list = &session->pending_list;
172 struct iscsi_cmnd *cmnd;
174 spin_lock(&session->sn_lock);
177 list_for_each_entry(cmnd, pending_list, pending_list_entry) {
178 TRACE_CONN_CLOSE_DBG("Pending cmd %p"
179 "(conn %p, cmd_sn %u, exp_cmd_sn %u)",
180 cmnd, conn, cmnd->pdu.bhs.sn,
181 session->exp_cmd_sn);
182 if ((cmnd->conn == conn) &&
183 (session->exp_cmd_sn == cmnd->pdu.bhs.sn)) {
184 TRACE_CONN_CLOSE_DBG("Freeing pending cmd %p",
187 list_del(&cmnd->pending_list_entry);
190 session->exp_cmd_sn++;
192 spin_unlock(&session->sn_lock);
194 req_cmnd_release_force(cmnd, 0);
197 spin_lock(&session->sn_lock);
202 spin_unlock(&session->sn_lock);
207 static void free_orphaned_pending_commands(struct iscsi_conn *conn)
209 struct iscsi_session *session = conn->session;
210 struct list_head *pending_list = &session->pending_list;
212 struct iscsi_cmnd *cmnd;
214 spin_lock(&session->sn_lock);
217 list_for_each_entry(cmnd, pending_list, pending_list_entry) {
218 TRACE_CONN_CLOSE_DBG("Pending cmd %p"
219 "(conn %p, cmd_sn %u, exp_cmd_sn %u)",
220 cmnd, conn, cmnd->pdu.bhs.sn,
221 session->exp_cmd_sn);
222 if (cmnd->conn == conn) {
223 PRINT_ERROR("Freeing orphaned pending cmd %p",
226 list_del(&cmnd->pending_list_entry);
229 if (session->exp_cmd_sn == cmnd->pdu.bhs.sn)
230 session->exp_cmd_sn++;
232 spin_unlock(&session->sn_lock);
234 req_cmnd_release_force(cmnd, 0);
237 spin_lock(&session->sn_lock);
242 spin_unlock(&session->sn_lock);
247 #ifdef CONFIG_SCST_DEBUG
248 static void trace_conn_close(struct iscsi_conn *conn)
250 struct iscsi_cmnd *cmnd;
251 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
252 struct iscsi_cmnd *rsp;
256 if (time_after(jiffies, start_waiting + 10*HZ))
257 trace_flag |= TRACE_CONN_OC_DBG;
260 spin_lock_bh(&conn->cmd_list_lock);
261 list_for_each_entry(cmnd, &conn->cmd_list,
263 TRACE_CONN_CLOSE_DBG(
264 "cmd %p, scst_state %x, scst_cmd state %d, "
265 "data_waiting %d, ref_cnt %d, sn %u, "
266 "parent_req %p, pending %d",
267 cmnd, cmnd->scst_state,
268 (cmnd->parent_req && cmnd->scst_cmd) ?
269 cmnd->scst_cmd->state : -1,
270 cmnd->data_waiting, atomic_read(&cmnd->ref_cnt),
271 cmnd->pdu.bhs.sn, cmnd->parent_req, cmnd->pending);
272 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
273 TRACE_CONN_CLOSE_DBG("net_ref_cnt %d, sg %p",
274 atomic_read(&cmnd->net_ref_cnt),
276 if (cmnd->sg != NULL) {
278 for (i = 0; i < cmnd->sg_cnt; i++) {
279 struct page *page = sg_page(&cmnd->sg[i]);
280 TRACE_CONN_CLOSE_DBG("page %p, "
281 "net_priv %p, _count %d",
282 page, page->net_priv,
283 atomic_read(&page->_count));
287 sBUG_ON(cmnd->parent_req != NULL);
289 spin_lock_bh(&cmnd->rsp_cmd_lock);
290 list_for_each_entry(rsp, &cmnd->rsp_cmd_list,
291 rsp_cmd_list_entry) {
292 TRACE_CONN_CLOSE_DBG(" rsp %p, "
293 "ref_cnt %d, net_ref_cnt %d, sg %p",
294 rsp, atomic_read(&rsp->ref_cnt),
295 atomic_read(&rsp->net_ref_cnt), rsp->sg);
296 if (rsp->sg != cmnd->sg && rsp->sg) {
298 for (i = 0; i < rsp->sg_cnt; i++) {
299 TRACE_CONN_CLOSE_DBG(" page %p, "
300 "net_priv %p, _count %d",
301 sg_page(&rsp->sg[i]),
302 sg_page(&rsp->sg[i])->net_priv,
303 atomic_read(&sg_page(&rsp->sg[i])->
308 spin_unlock_bh(&cmnd->rsp_cmd_lock);
309 #endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */
311 spin_unlock_bh(&conn->cmd_list_lock);
314 #else /* CONFIG_SCST_DEBUG */
315 static void trace_conn_close(struct iscsi_conn *conn) {}
316 #endif /* CONFIG_SCST_DEBUG */
318 void iscsi_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *scst_mcmd)
320 int fn = scst_mgmt_cmd_get_fn(scst_mcmd);
321 void *priv = scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
323 TRACE_MGMT_DBG("scst_mcmd %p, fn %d, priv %p", scst_mcmd, fn, priv);
326 case SCST_NEXUS_LOSS_SESS:
327 case SCST_ABORT_ALL_TASKS_SESS:
329 struct iscsi_conn *conn = (struct iscsi_conn *)priv;
330 struct iscsi_session *sess = conn->session;
331 struct iscsi_conn *c;
333 mutex_lock(&sess->target->target_mutex);
336 * We can't mark sess as shutting down earlier, because until
337 * now it might have pending commands. Otherwise, in case of
338 * reinstatement it might lead to data corruption, because
339 * commands in being reinstated session can be executed
340 * after commands in the new session.
342 sess->sess_shutting_down = 1;
343 list_for_each_entry(c, &sess->conn_list, conn_list_entry) {
344 if (!test_bit(ISCSI_CONN_SHUTTINGDOWN, &c->conn_aflags)) {
345 sess->sess_shutting_down = 0;
350 if (conn->conn_reinst_successor != NULL) {
351 sBUG_ON(!test_bit(ISCSI_CONN_REINSTATING,
352 &conn->conn_reinst_successor->conn_aflags));
353 conn_reinst_finished(conn->conn_reinst_successor);
354 conn->conn_reinst_successor = NULL;
355 } else if (sess->sess_reinst_successor != NULL) {
356 sess_reinst_finished(sess->sess_reinst_successor);
357 sess->sess_reinst_successor = NULL;
359 mutex_unlock(&sess->target->target_mutex);
361 complete_all(&conn->ready_to_free);
373 static void close_conn(struct iscsi_conn *conn)
375 struct iscsi_session *session = conn->session;
376 struct iscsi_target *target = conn->target;
377 typeof(jiffies) start_waiting = jiffies;
378 typeof(jiffies) shut_start_waiting = start_waiting;
379 bool pending_reported = 0, wait_expired = 0, shut_expired = 0;
382 #define CONN_PENDING_TIMEOUT ((typeof(jiffies))10*HZ)
383 #define CONN_WAIT_TIMEOUT ((typeof(jiffies))10*HZ)
384 #define CONN_REG_SHUT_TIMEOUT ((typeof(jiffies))125*HZ)
385 #define CONN_DEL_SHUT_TIMEOUT ((typeof(jiffies))10*HZ)
389 TRACE_CONN_CLOSE("Closing connection %p (conn_ref_cnt=%d)", conn,
390 atomic_read(&conn->conn_ref_cnt));
392 iscsi_extracheck_is_rd_thread(conn);
394 sBUG_ON(!conn->closing);
396 if (conn->active_close) {
397 /* We want all our already send operations to complete */
398 conn->sock->ops->shutdown(conn->sock, RCV_SHUTDOWN);
400 conn->sock->ops->shutdown(conn->sock,
401 RCV_SHUTDOWN|SEND_SHUTDOWN);
404 mutex_lock(&session->target->target_mutex);
406 set_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags);
407 reinst = (conn->conn_reinst_successor != NULL);
409 mutex_unlock(&session->target->target_mutex);
415 /* Abort all outstanding commands */
416 rc = scst_rx_mgmt_fn_lun(session->scst_sess,
417 SCST_ABORT_ALL_TASKS_SESS, (uint8_t *)&lun, sizeof(lun),
418 SCST_NON_ATOMIC, conn);
420 PRINT_ERROR("SCST_ABORT_ALL_TASKS_SESS failed %d", rc);
425 rc = scst_rx_mgmt_fn_lun(session->scst_sess,
426 SCST_NEXUS_LOSS_SESS, (uint8_t *)&lun, sizeof(lun),
427 SCST_NON_ATOMIC, conn);
429 PRINT_ERROR("SCST_NEXUS_LOSS_SESS failed %d", rc);
432 if (conn->read_state != RX_INIT_BHS) {
433 struct iscsi_cmnd *cmnd = conn->read_cmnd;
435 if (cmnd->scst_state == ISCSI_CMD_STATE_RX_CMD) {
436 TRACE_DBG("Going to wait for cmnd %p to change state "
437 "from RX_CMD", cmnd);
439 wait_event(conn->read_state_waitQ,
440 cmnd->scst_state != ISCSI_CMD_STATE_RX_CMD);
442 conn->read_cmnd = NULL;
443 conn->read_state = RX_INIT_BHS;
444 req_cmnd_release_force(cmnd, 0);
449 /* ToDo: not the best way to wait */
450 while (atomic_read(&conn->conn_ref_cnt) != 0) {
451 mutex_lock(&target->target_mutex);
452 spin_lock(&session->sn_lock);
453 if (session->tm_rsp && session->tm_rsp->conn == conn) {
454 struct iscsi_cmnd *tm_rsp = session->tm_rsp;
455 TRACE(TRACE_MGMT_MINOR, "Dropping delayed TM rsp %p",
457 session->tm_rsp = NULL;
458 session->tm_active--;
459 WARN_ON(session->tm_active < 0);
460 spin_unlock(&session->sn_lock);
461 mutex_unlock(&target->target_mutex);
463 rsp_cmnd_release(tm_rsp);
465 spin_unlock(&session->sn_lock);
466 mutex_unlock(&target->target_mutex);
469 /* It's safe to check it without sn_lock */
470 if (!list_empty(&session->pending_list)) {
471 TRACE_CONN_CLOSE_DBG("Disposing pending commands on "
472 "connection %p (conn_ref_cnt=%d)", conn,
473 atomic_read(&conn->conn_ref_cnt));
475 free_pending_commands(conn);
477 if (time_after(jiffies,
478 start_waiting + CONN_PENDING_TIMEOUT)) {
479 if (!pending_reported) {
480 TRACE_CONN_CLOSE("%s",
481 "Pending wait time expired");
482 pending_reported = 1;
484 free_orphaned_pending_commands(conn);
488 iscsi_make_conn_wr_active(conn);
490 /* That's for active close only, actually */
491 if (time_after(jiffies, start_waiting + CONN_WAIT_TIMEOUT) &&
493 TRACE_CONN_CLOSE("Wait time expired (conn %p, "
495 conn, conn->sock->sk->sk_state);
496 conn->sock->ops->shutdown(conn->sock, SEND_SHUTDOWN);
498 shut_start_waiting = jiffies;
501 if (wait_expired && !shut_expired &&
502 time_after(jiffies, shut_start_waiting +
503 conn->deleting ? CONN_DEL_SHUT_TIMEOUT :
504 CONN_REG_SHUT_TIMEOUT)) {
505 TRACE_CONN_CLOSE("Wait time after shutdown expired "
506 "(conn %p, sk_state %d)", conn,
507 conn->sock->sk->sk_state);
508 conn->sock->sk->sk_prot->disconnect(conn->sock->sk, 0);
517 TRACE_CONN_CLOSE_DBG("conn %p, conn_ref_cnt %d left, "
518 "wr_state %d, exp_cmd_sn %u",
519 conn, atomic_read(&conn->conn_ref_cnt),
520 conn->wr_state, session->exp_cmd_sn);
522 trace_conn_close(conn);
524 iscsi_check_closewait(conn);
527 write_lock_bh(&conn->sock->sk->sk_callback_lock);
528 conn->sock->sk->sk_state_change = conn->old_state_change;
529 conn->sock->sk->sk_data_ready = conn->old_data_ready;
530 conn->sock->sk->sk_write_space = conn->old_write_space;
531 write_unlock_bh(&conn->sock->sk->sk_callback_lock);
536 spin_lock_bh(&iscsi_wr_lock);
537 t = (conn->wr_state == ISCSI_CONN_WR_STATE_IDLE);
538 spin_unlock_bh(&iscsi_wr_lock);
540 if (t && (atomic_read(&conn->conn_ref_cnt) == 0))
543 TRACE_CONN_CLOSE_DBG("Waiting for wr thread (conn %p), "
544 "wr_state %x", conn, conn->wr_state);
548 wait_for_completion(&conn->ready_to_free);
550 TRACE_CONN_CLOSE("Notifying user space about closing connection %p",
552 event_send(target->tid, session->sid, conn->cid, E_CONN_CLOSE);
554 #ifdef CONFIG_SCST_PROC
555 mutex_lock(&target->target_mutex);
557 mutex_unlock(&target->target_mutex);
559 kobject_put(&conn->iscsi_conn_kobj);
566 static int close_conn_thr(void *arg)
568 struct iscsi_conn *conn = (struct iscsi_conn *)arg;
572 #ifdef CONFIG_SCST_EXTRACHECKS
574 * To satisfy iscsi_extracheck_is_rd_thread() in functions called
575 * on the connection close. It is safe, because at this point conn
576 * can't be used by any other thread.
578 conn->rd_task = current;
587 static void start_close_conn(struct iscsi_conn *conn)
589 struct task_struct *t;
593 t = kthread_run(close_conn_thr, conn, "iscsi_conn_cleanup");
595 PRINT_ERROR("kthread_run() failed (%ld), closing conn %p "
596 "directly", PTR_ERR(t), conn);
604 static inline void iscsi_conn_init_read(struct iscsi_conn *conn,
605 void __user *data, size_t len)
607 conn->read_iov[0].iov_base = data;
608 conn->read_iov[0].iov_len = len;
609 conn->read_msg.msg_iov = conn->read_iov;
610 conn->read_msg.msg_iovlen = 1;
611 conn->read_size = len;
615 static void iscsi_conn_prepare_read_ahs(struct iscsi_conn *conn,
616 struct iscsi_cmnd *cmnd)
618 int asize = (cmnd->pdu.ahssize + 3) & -4;
620 /* ToDo: __GFP_NOFAIL ?? */
621 cmnd->pdu.ahs = kmalloc(asize, __GFP_NOFAIL|GFP_KERNEL);
622 sBUG_ON(cmnd->pdu.ahs == NULL);
623 iscsi_conn_init_read(conn, (void __force __user *)cmnd->pdu.ahs, asize);
627 static struct iscsi_cmnd *iscsi_get_send_cmnd(struct iscsi_conn *conn)
629 struct iscsi_cmnd *cmnd = NULL;
631 spin_lock_bh(&conn->write_list_lock);
632 if (!list_empty(&conn->write_list)) {
633 cmnd = list_entry(conn->write_list.next, struct iscsi_cmnd,
635 cmd_del_from_write_list(cmnd);
636 cmnd->write_processing_started = 1;
638 spin_unlock_bh(&conn->write_list_lock);
643 /* Returns number of bytes left to receive or <0 for error */
644 static int do_recv(struct iscsi_conn *conn)
651 EXTRACHECKS_BUG_ON(conn->read_cmnd == NULL);
653 if (unlikely(conn->closing)) {
659 * We suppose that if sock_recvmsg() returned less data than requested,
660 * then next time it will return -EAGAIN, so there's no point to call
665 memset(&msg, 0, sizeof(msg));
666 msg.msg_iov = conn->read_msg.msg_iov;
667 msg.msg_iovlen = conn->read_msg.msg_iovlen;
668 first_len = msg.msg_iov->iov_len;
672 res = sock_recvmsg(conn->sock, &msg, conn->read_size,
673 MSG_DONTWAIT | MSG_NOSIGNAL);
678 * To save some considerable effort and CPU power we
679 * suppose that TCP functions adjust
680 * conn->read_msg.msg_iov and conn->read_msg.msg_iovlen
681 * on amount of copied data. This BUG_ON is intended
682 * to catch if it is changed in the future.
684 sBUG_ON((res >= first_len) &&
685 (conn->read_msg.msg_iov->iov_len != 0));
686 conn->read_size -= res;
687 if (conn->read_size != 0) {
688 if (res >= first_len) {
689 int done = 1 + ((res - first_len) >> PAGE_SHIFT);
690 conn->read_msg.msg_iov += done;
691 conn->read_msg.msg_iovlen -= done;
694 res = conn->read_size;
698 TRACE_DBG("EAGAIN received for conn %p", conn);
699 res = conn->read_size;
702 TRACE_DBG("ERESTARTSYS received for conn %p", conn);
705 PRINT_ERROR("sock_recvmsg() failed: %d", res);
706 mark_conn_closed(conn);
718 static int iscsi_rx_check_ddigest(struct iscsi_conn *conn)
720 struct iscsi_cmnd *cmnd = conn->read_cmnd;
725 conn->read_state = RX_END;
727 if (cmnd->pdu.datasize <= 16*1024) {
729 * It's cache hot, so let's compute it inline. The
730 * choice here about what will expose more latency:
731 * possible cache misses or the digest calculation.
733 TRACE_DBG("cmnd %p, opcode %x: checking RX "
734 "ddigest inline", cmnd, cmnd_opcode(cmnd));
735 cmnd->ddigest_checked = 1;
736 res = digest_rx_data(cmnd);
737 if (unlikely(res != 0)) {
738 mark_conn_closed(conn);
741 } else if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD) {
742 cmd_add_on_rx_ddigest_list(cmnd, cmnd);
744 } else if (cmnd_opcode(cmnd) != ISCSI_OP_SCSI_DATA_OUT) {
746 * We could get here only for NOP-Out. ISCSI RFC
747 * doesn't specify how to deal with digest errors in
748 * this case. Is closing connection correct?
750 TRACE_DBG("cmnd %p, opcode %x: checking NOP RX "
751 "ddigest", cmnd, cmnd_opcode(cmnd));
752 res = digest_rx_data(cmnd);
753 if (unlikely(res != 0)) {
754 mark_conn_closed(conn);
764 /* No locks, conn is rd processing */
765 static void process_read_io(struct iscsi_conn *conn, int *closed)
767 struct iscsi_cmnd *cmnd = conn->read_cmnd;
772 /* In case of error cmnd will be freed in close_conn() */
775 switch (conn->read_state) {
777 EXTRACHECKS_BUG_ON(conn->read_cmnd != NULL);
778 cmnd = cmnd_alloc(conn, NULL);
779 conn->read_cmnd = cmnd;
780 iscsi_conn_init_read(cmnd->conn,
781 (void __force __user *)&cmnd->pdu.bhs,
782 sizeof(cmnd->pdu.bhs));
783 conn->read_state = RX_BHS;
789 iscsi_cmnd_get_length(&cmnd->pdu);
790 if (cmnd->pdu.ahssize == 0) {
791 if ((conn->hdigest_type & DIGEST_NONE) == 0)
792 conn->read_state = RX_INIT_HDIGEST;
794 conn->read_state = RX_CMD_START;
796 iscsi_conn_prepare_read_ahs(conn, cmnd);
797 conn->read_state = RX_AHS;
803 res = cmnd_rx_start(cmnd);
805 if (cmnd->pdu.datasize == 0)
806 conn->read_state = RX_END;
808 conn->read_state = RX_DATA;
810 conn->read_state = RX_CMD_CONTINUE;
812 sBUG_ON(!conn->closing);
815 case RX_CMD_CONTINUE:
816 if (cmnd->scst_state == ISCSI_CMD_STATE_RX_CMD) {
817 TRACE_DBG("cmnd %p is still in RX_CMD state",
822 res = cmnd_rx_continue(cmnd);
823 if (unlikely(res != 0))
824 sBUG_ON(!conn->closing);
826 if (cmnd->pdu.datasize == 0)
827 conn->read_state = RX_END;
829 conn->read_state = RX_DATA;
836 int psz = ((cmnd->pdu.datasize + 3) & -4) - cmnd->pdu.datasize;
838 TRACE_DBG("padding %d bytes", psz);
839 iscsi_conn_init_read(conn,
840 (void __force __user *)&conn->rpadding, psz);
841 conn->read_state = RX_PADDING;
842 } else if ((conn->ddigest_type & DIGEST_NONE) != 0)
843 conn->read_state = RX_END;
845 conn->read_state = RX_INIT_DDIGEST;
851 if (unlikely(conn->read_size != 0)) {
852 PRINT_CRIT_ERROR("%d %x %d", res,
853 cmnd_opcode(cmnd), conn->read_size);
856 conn->read_cmnd = NULL;
857 conn->read_state = RX_INIT_BHS;
861 EXTRACHECKS_BUG_ON(conn->read_size != 0);
864 case RX_INIT_HDIGEST:
865 iscsi_conn_init_read(conn,
866 (void __force __user *)&cmnd->hdigest, sizeof(u32));
867 conn->read_state = RX_CHECK_HDIGEST;
870 case RX_CHECK_HDIGEST:
873 res = digest_rx_header(cmnd);
874 if (unlikely(res != 0)) {
875 PRINT_ERROR("rx header digest for "
876 "initiator %s failed (%d)",
877 conn->session->initiator_name,
879 mark_conn_closed(conn);
881 conn->read_state = RX_CMD_START;
885 case RX_INIT_DDIGEST:
886 iscsi_conn_init_read(conn,
887 (void __force __user *)&cmnd->ddigest,
889 conn->read_state = RX_CHECK_DDIGEST;
892 case RX_CHECK_DDIGEST:
893 res = iscsi_rx_check_ddigest(conn);
899 if ((conn->hdigest_type & DIGEST_NONE) == 0)
900 conn->read_state = RX_INIT_HDIGEST;
902 conn->read_state = RX_CMD_START;
909 if ((conn->ddigest_type & DIGEST_NONE) == 0)
910 conn->read_state = RX_INIT_DDIGEST;
912 conn->read_state = RX_END;
917 PRINT_CRIT_ERROR("%d %x", conn->read_state, cmnd_opcode(cmnd));
918 res = -1; /* to keep compiler happy */
923 if (unlikely(conn->closing)) {
924 start_close_conn(conn);
933 * Called under iscsi_rd_lock and BHs disabled, but will drop it inside,
936 static void scst_do_job_rd(void)
937 __acquires(&iscsi_rd_lock)
938 __releases(&iscsi_rd_lock)
943 * We delete/add to tail connections to maintain fairness between them.
946 while (!list_empty(&iscsi_rd_list)) {
948 struct iscsi_conn *conn = list_entry(iscsi_rd_list.next,
949 typeof(*conn), rd_list_entry);
951 list_del(&conn->rd_list_entry);
953 sBUG_ON(conn->rd_state == ISCSI_CONN_RD_STATE_PROCESSING);
954 conn->rd_data_ready = 0;
955 conn->rd_state = ISCSI_CONN_RD_STATE_PROCESSING;
956 #ifdef CONFIG_SCST_EXTRACHECKS
957 conn->rd_task = current;
959 spin_unlock_bh(&iscsi_rd_lock);
961 process_read_io(conn, &closed);
963 spin_lock_bh(&iscsi_rd_lock);
968 #ifdef CONFIG_SCST_EXTRACHECKS
969 conn->rd_task = NULL;
971 if (conn->rd_data_ready) {
972 list_add_tail(&conn->rd_list_entry, &iscsi_rd_list);
973 conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
975 conn->rd_state = ISCSI_CONN_RD_STATE_IDLE;
982 static inline int test_rd_list(void)
984 int res = !list_empty(&iscsi_rd_list) ||
985 unlikely(kthread_should_stop());
993 PRINT_INFO("Read thread started, PID %d", current->pid);
995 current->flags |= PF_NOFREEZE;
997 spin_lock_bh(&iscsi_rd_lock);
998 while (!kthread_should_stop()) {
1000 init_waitqueue_entry(&wait, current);
1002 if (!test_rd_list()) {
1003 add_wait_queue_exclusive_head(&iscsi_rd_waitQ, &wait);
1005 set_current_state(TASK_INTERRUPTIBLE);
1008 spin_unlock_bh(&iscsi_rd_lock);
1010 spin_lock_bh(&iscsi_rd_lock);
1012 set_current_state(TASK_RUNNING);
1013 remove_wait_queue(&iscsi_rd_waitQ, &wait);
1017 spin_unlock_bh(&iscsi_rd_lock);
1020 * If kthread_should_stop() is true, we are guaranteed to be
1021 * on the module unload, so iscsi_rd_list must be empty.
1023 sBUG_ON(!list_empty(&iscsi_rd_list));
1025 PRINT_INFO("Read thread PID %d finished", current->pid);
1031 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1032 static inline void __iscsi_get_page_callback(struct iscsi_cmnd *cmd)
1036 TRACE_NET_PAGE("cmd %p, new net_ref_cnt %d",
1037 cmd, atomic_read(&cmd->net_ref_cnt)+1);
1039 v = atomic_inc_return(&cmd->net_ref_cnt);
1041 TRACE_NET_PAGE("getting cmd %p", cmd);
1047 void iscsi_get_page_callback(struct page *page)
1049 struct iscsi_cmnd *cmd = (struct iscsi_cmnd *)page->net_priv;
1051 TRACE_NET_PAGE("page %p, _count %d", page,
1052 atomic_read(&page->_count));
1054 __iscsi_get_page_callback(cmd);
1058 static inline void __iscsi_put_page_callback(struct iscsi_cmnd *cmd)
1060 TRACE_NET_PAGE("cmd %p, new net_ref_cnt %d", cmd,
1061 atomic_read(&cmd->net_ref_cnt)-1);
1063 if (atomic_dec_and_test(&cmd->net_ref_cnt)) {
1064 int i, sg_cnt = cmd->sg_cnt;
1065 for (i = 0; i < sg_cnt; i++) {
1066 struct page *page = sg_page(&cmd->sg[i]);
1067 TRACE_NET_PAGE("Clearing page %p", page);
1068 if (page->net_priv == cmd)
1069 page->net_priv = NULL;
1076 void iscsi_put_page_callback(struct page *page)
1078 struct iscsi_cmnd *cmd = (struct iscsi_cmnd *)page->net_priv;
1080 TRACE_NET_PAGE("page %p, _count %d", page,
1081 atomic_read(&page->_count));
1083 __iscsi_put_page_callback(cmd);
1087 static void check_net_priv(struct iscsi_cmnd *cmd, struct page *page)
1089 if ((atomic_read(&cmd->net_ref_cnt) == 1) && (page->net_priv == cmd)) {
1090 TRACE_DBG("sendpage() not called get_page(), zeroing net_priv "
1091 "%p (page %p)", page->net_priv, page);
1092 page->net_priv = NULL;
1097 static inline void check_net_priv(struct iscsi_cmnd *cmd, struct page *page) {}
1098 static inline void __iscsi_get_page_callback(struct iscsi_cmnd *cmd) {}
1099 static inline void __iscsi_put_page_callback(struct iscsi_cmnd *cmd) {}
1102 /* This is partially taken from the Ardis code. */
1103 static int write_data(struct iscsi_conn *conn)
1108 struct socket *sock;
1109 ssize_t (*sock_sendpage)(struct socket *, struct page *, int, size_t,
1111 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
1112 struct iscsi_cmnd *write_cmnd = conn->write_cmnd;
1113 struct iscsi_cmnd *ref_cmd;
1115 struct scatterlist *sg;
1116 int saved_size, size, sendsize;
1117 int length, offset, idx;
1118 int flags, res, count, sg_size;
1119 bool do_put = false, ref_cmd_to_parent;
1123 iscsi_extracheck_is_wr_thread(conn);
1125 if (write_cmnd->own_sg == 0) {
1126 ref_cmd = write_cmnd->parent_req;
1127 ref_cmd_to_parent = true;
1129 ref_cmd = write_cmnd;
1130 ref_cmd_to_parent = false;
1133 if (!ref_cmd->on_written_list) {
1134 TRACE_DBG("Adding cmd %p to conn %p written_list", ref_cmd,
1136 spin_lock_bh(&conn->write_list_lock);
1137 ref_cmd->on_written_list = 1;
1138 ref_cmd->write_timeout = jiffies + ISCSI_RSP_TIMEOUT;
1139 list_add_tail(&ref_cmd->written_list_entry,
1140 &conn->written_list);
1141 spin_unlock_bh(&conn->write_list_lock);
1144 if (!timer_pending(&conn->rsp_timer)) {
1145 sBUG_ON(!ref_cmd->on_written_list);
1146 spin_lock_bh(&conn->write_list_lock);
1147 if (likely(!timer_pending(&conn->rsp_timer))) {
1148 TRACE_DBG("Starting timer on %ld (conn %p)",
1149 ref_cmd->write_timeout, conn);
1150 conn->rsp_timer.expires = ref_cmd->write_timeout;
1151 add_timer(&conn->rsp_timer);
1153 spin_unlock_bh(&conn->write_list_lock);
1157 size = conn->write_size;
1159 iop = conn->write_iop;
1160 count = conn->write_iop_used;
1167 sBUG_ON(count > (signed)(sizeof(conn->write_iov) /
1168 sizeof(conn->write_iov[0])));
1172 res = vfs_writev(file,
1173 (struct iovec __force __user *)iop,
1176 TRACE_WRITE("sid %#Lx, cid %u, res %d, iov_len %ld",
1177 (long long unsigned int)conn->session->sid,
1178 conn->cid, res, (long)iop->iov_len);
1179 if (unlikely(res <= 0)) {
1180 if (res == -EAGAIN) {
1181 conn->write_iop = iop;
1182 conn->write_iop_used = count;
1184 } else if (res == -EINTR)
1191 while ((typeof(rest))iop->iov_len <= rest && rest) {
1192 rest -= iop->iov_len;
1197 conn->write_iop = NULL;
1198 conn->write_iop_used = 0;
1203 sBUG_ON(iop > conn->write_iov + sizeof(conn->write_iov)
1204 /sizeof(conn->write_iov[0]));
1205 iop->iov_base += rest;
1206 iop->iov_len -= rest;
1210 sg = write_cmnd->sg;
1211 if (unlikely(sg == NULL)) {
1212 PRINT_INFO("WARNING: Data missed (cmd %p)!", write_cmnd);
1217 /* To protect from too early transfer completion race */
1218 __iscsi_get_page_callback(ref_cmd);
1223 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1224 sock_sendpage = sock->ops->sendpage;
1226 if ((write_cmnd->parent_req->scst_cmd != NULL) &&
1227 scst_cmd_get_dh_data_buff_alloced(write_cmnd->parent_req->scst_cmd))
1228 sock_sendpage = sock_no_sendpage;
1230 sock_sendpage = sock->ops->sendpage;
1233 flags = MSG_DONTWAIT;
1236 if (sg != write_cmnd->rsp_sg) {
1237 offset = conn->write_offset + sg[0].offset;
1238 idx = offset >> PAGE_SHIFT;
1239 offset &= ~PAGE_MASK;
1240 length = min(size, (int)PAGE_SIZE - offset);
1241 TRACE_WRITE("write_offset %d, sg_size %d, idx %d, offset %d, "
1242 "length %d", conn->write_offset, sg_size, idx, offset,
1246 offset = conn->write_offset;
1247 while (offset >= sg[idx].length) {
1248 offset -= sg[idx].length;
1251 length = sg[idx].length - offset;
1252 offset += sg[idx].offset;
1253 sock_sendpage = sock_no_sendpage;
1254 TRACE_WRITE("rsp_sg: write_offset %d, sg_size %d, idx %d, "
1255 "offset %d, length %d", conn->write_offset, sg_size,
1256 idx, offset, length);
1258 page = sg_page(&sg[idx]);
1261 sendpage = sock_sendpage;
1263 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1265 static DEFINE_SPINLOCK(net_priv_lock);
1266 spin_lock(&net_priv_lock);
1267 if (unlikely(page->net_priv != NULL)) {
1268 if (page->net_priv != ref_cmd) {
1270 * This might happen if user space
1271 * supplies to scst_user the same
1272 * pages in different commands or in
1273 * case of zero-copy FILEIO, when
1274 * several initiators request the same
1275 * data simultaneously.
1277 TRACE_DBG("net_priv isn't NULL and != "
1278 "ref_cmd (write_cmnd %p, ref_cmd "
1279 "%p, sg %p, idx %d, page %p, "
1281 write_cmnd, ref_cmd, sg, idx,
1282 page, page->net_priv);
1283 sendpage = sock_no_sendpage;
1286 page->net_priv = ref_cmd;
1287 spin_unlock(&net_priv_lock);
1290 sendsize = min(size, length);
1291 if (size <= sendsize) {
1293 res = sendpage(sock, page, offset, size, flags);
1294 TRACE_WRITE("Final %s sid %#Lx, cid %u, res %d (page "
1295 "index %lu, offset %u, size %u, cmd %p, "
1296 "page %p)", (sendpage != sock_no_sendpage) ?
1297 "sendpage" : "sock_no_sendpage",
1298 (long long unsigned int)conn->session->sid,
1299 conn->cid, res, page->index,
1300 offset, size, write_cmnd, page);
1301 if (unlikely(res <= 0)) {
1308 check_net_priv(ref_cmd, page);
1310 conn->write_size = 0;
1321 res = sendpage(sock, page, offset, sendsize, flags | MSG_MORE);
1322 TRACE_WRITE("%s sid %#Lx, cid %u, res %d (page index %lu, "
1323 "offset %u, sendsize %u, size %u, cmd %p, page %p)",
1324 (sendpage != sock_no_sendpage) ? "sendpage" :
1326 (unsigned long long)conn->session->sid, conn->cid,
1327 res, page->index, offset, sendsize, size,
1329 if (unlikely(res <= 0)) {
1336 check_net_priv(ref_cmd, page);
1340 if (res == sendsize) {
1342 EXTRACHECKS_BUG_ON(idx >= ref_cmd->sg_cnt);
1343 page = sg_page(&sg[idx]);
1344 length = sg[idx].length;
1345 offset = sg[idx].offset;
1354 conn->write_offset += sg_size - size;
1357 conn->write_size = size;
1358 if ((saved_size == size) && res == -EAGAIN)
1361 res = saved_size - size;
1365 __iscsi_put_page_callback(ref_cmd);
1368 TRACE_EXIT_RES(res);
1372 check_net_priv(ref_cmd, page);
1375 /* else go through */
1378 #ifndef CONFIG_SCST_DEBUG
1382 PRINT_ERROR("error %d at sid:cid %#Lx:%u, cmnd %p", res,
1383 (long long unsigned int)conn->session->sid,
1384 conn->cid, conn->write_cmnd);
1386 if (ref_cmd_to_parent &&
1387 ((ref_cmd->scst_cmd != NULL) || (ref_cmd->scst_aen != NULL))) {
1388 if (ref_cmd->scst_state == ISCSI_CMD_STATE_AEN)
1389 scst_set_aen_delivery_status(ref_cmd->scst_aen,
1390 SCST_AEN_RES_FAILED);
1392 scst_set_delivery_status(ref_cmd->scst_cmd,
1393 SCST_CMD_DELIVERY_FAILED);
1398 static int exit_tx(struct iscsi_conn *conn, int res)
1400 iscsi_extracheck_is_wr_thread(conn);
1408 #ifndef CONFIG_SCST_DEBUG
1412 PRINT_ERROR("Sending data failed: initiator %s, "
1413 "write_size %d, write_state %d, res %d",
1414 conn->session->initiator_name,
1416 conn->write_state, res);
1418 conn->write_state = TX_END;
1419 conn->write_size = 0;
1420 mark_conn_closed(conn);
1426 static int tx_ddigest(struct iscsi_cmnd *cmnd, int state)
1428 int res, rest = cmnd->conn->write_size;
1429 struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
1432 iscsi_extracheck_is_wr_thread(cmnd->conn);
1434 TRACE_DBG("Sending data digest %x (cmd %p)", cmnd->ddigest, cmnd);
1436 iov.iov_base = (char *)(&cmnd->ddigest) + (sizeof(u32) - rest);
1439 res = kernel_sendmsg(cmnd->conn->sock, &msg, &iov, 1, rest);
1441 cmnd->conn->write_size -= res;
1442 if (!cmnd->conn->write_size)
1443 cmnd->conn->write_state = state;
1445 res = exit_tx(cmnd->conn, res);
1450 static void init_tx_hdigest(struct iscsi_cmnd *cmnd)
1452 struct iscsi_conn *conn = cmnd->conn;
1455 iscsi_extracheck_is_wr_thread(conn);
1457 digest_tx_header(cmnd);
1459 sBUG_ON(conn->write_iop_used >=
1460 (signed)(sizeof(conn->write_iov)/sizeof(conn->write_iov[0])));
1462 iop = &conn->write_iop[conn->write_iop_used];
1463 conn->write_iop_used++;
1464 iop->iov_base = (void __force __user *)&(cmnd->hdigest);
1465 iop->iov_len = sizeof(u32);
1466 conn->write_size += sizeof(u32);
1471 static int tx_padding(struct iscsi_cmnd *cmnd, int state)
1473 int res, rest = cmnd->conn->write_size;
1474 struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
1476 static const uint32_t padding;
1478 iscsi_extracheck_is_wr_thread(cmnd->conn);
1480 TRACE_DBG("Sending %d padding bytes (cmd %p)", rest, cmnd);
1482 iov.iov_base = (char *)(&padding) + (sizeof(uint32_t) - rest);
1485 res = kernel_sendmsg(cmnd->conn->sock, &msg, &iov, 1, rest);
1487 cmnd->conn->write_size -= res;
1488 if (!cmnd->conn->write_size)
1489 cmnd->conn->write_state = state;
1491 res = exit_tx(cmnd->conn, res);
1496 static int iscsi_do_send(struct iscsi_conn *conn, int state)
1500 iscsi_extracheck_is_wr_thread(conn);
1502 res = write_data(conn);
1504 if (!conn->write_size)
1505 conn->write_state = state;
1507 res = exit_tx(conn, res);
1513 * No locks, conn is wr processing.
1515 * IMPORTANT! Connection conn must be protected by additional conn_get()
1516 * upon entrance in this function, because otherwise it could be destroyed
1517 * inside as a result of cmnd release.
1519 int iscsi_send(struct iscsi_conn *conn)
1521 struct iscsi_cmnd *cmnd = conn->write_cmnd;
1522 int ddigest, res = 0;
1526 TRACE_DBG("conn %p, write_cmnd %p", conn, cmnd);
1528 iscsi_extracheck_is_wr_thread(conn);
1530 ddigest = conn->ddigest_type != DIGEST_NONE ? 1 : 0;
1532 switch (conn->write_state) {
1534 sBUG_ON(cmnd != NULL);
1535 cmnd = conn->write_cmnd = iscsi_get_send_cmnd(conn);
1538 cmnd_tx_start(cmnd);
1539 if (!(conn->hdigest_type & DIGEST_NONE))
1540 init_tx_hdigest(cmnd);
1541 conn->write_state = TX_BHS_DATA;
1543 res = iscsi_do_send(conn, cmnd->pdu.datasize ?
1544 TX_INIT_PADDING : TX_END);
1545 if (res <= 0 || conn->write_state != TX_INIT_PADDING)
1547 case TX_INIT_PADDING:
1548 cmnd->conn->write_size = ((cmnd->pdu.datasize + 3) & -4) -
1550 if (cmnd->conn->write_size != 0)
1551 conn->write_state = TX_PADDING;
1553 conn->write_state = TX_INIT_DDIGEST;
1555 conn->write_state = TX_END;
1558 res = tx_padding(cmnd, ddigest ? TX_INIT_DDIGEST : TX_END);
1559 if (res <= 0 || conn->write_state != TX_INIT_DDIGEST)
1561 case TX_INIT_DDIGEST:
1562 cmnd->conn->write_size = sizeof(u32);
1563 conn->write_state = TX_DDIGEST;
1565 res = tx_ddigest(cmnd, TX_END);
1568 PRINT_CRIT_ERROR("%d %d %x", res, conn->write_state,
1576 if (conn->write_state != TX_END)
1579 if (unlikely(conn->write_size)) {
1580 PRINT_CRIT_ERROR("%d %x %u", res, cmnd_opcode(cmnd),
1586 rsp_cmnd_release(cmnd);
1588 conn->write_cmnd = NULL;
1589 conn->write_state = TX_INIT;
1592 TRACE_EXIT_RES(res);
1596 /* No locks, conn is wr processing.
1598 * IMPORTANT! Connection conn must be protected by additional conn_get()
1599 * upon entrance in this function, because otherwise it could be destroyed
1600 * inside as a result of iscsi_send(), which releases sent commands.
1602 static int process_write_queue(struct iscsi_conn *conn)
1608 if (likely(test_write_ready(conn)))
1609 res = iscsi_send(conn);
1611 TRACE_EXIT_RES(res);
1616 * Called under iscsi_wr_lock and BHs disabled, but will drop it inside,
1619 static void scst_do_job_wr(void)
1620 __acquires(&iscsi_wr_lock)
1621 __releases(&iscsi_wr_lock)
1626 * We delete/add to tail connections to maintain fairness between them.
1629 while (!list_empty(&iscsi_wr_list)) {
1631 struct iscsi_conn *conn = list_entry(iscsi_wr_list.next,
1632 typeof(*conn), wr_list_entry);
1634 TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d, "
1635 "write ready %d", conn, conn->wr_state,
1636 conn->wr_space_ready, test_write_ready(conn));
1638 list_del(&conn->wr_list_entry);
1640 sBUG_ON(conn->wr_state == ISCSI_CONN_WR_STATE_PROCESSING);
1642 conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
1643 conn->wr_space_ready = 0;
1644 #ifdef CONFIG_SCST_EXTRACHECKS
1645 conn->wr_task = current;
1647 spin_unlock_bh(&iscsi_wr_lock);
1651 rc = process_write_queue(conn);
1653 spin_lock_bh(&iscsi_wr_lock);
1654 #ifdef CONFIG_SCST_EXTRACHECKS
1655 conn->wr_task = NULL;
1657 if ((rc == -EAGAIN) && !conn->wr_space_ready) {
1658 conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
1662 if (test_write_ready(conn)) {
1663 list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
1664 conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
1666 conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
1676 static inline int test_wr_list(void)
1678 int res = !list_empty(&iscsi_wr_list) ||
1679 unlikely(kthread_should_stop());
1683 int istwr(void *arg)
1687 PRINT_INFO("Write thread started, PID %d", current->pid);
1689 current->flags |= PF_NOFREEZE;
1691 spin_lock_bh(&iscsi_wr_lock);
1692 while (!kthread_should_stop()) {
1694 init_waitqueue_entry(&wait, current);
1696 if (!test_wr_list()) {
1697 add_wait_queue_exclusive_head(&iscsi_wr_waitQ, &wait);
1699 set_current_state(TASK_INTERRUPTIBLE);
1702 spin_unlock_bh(&iscsi_wr_lock);
1704 spin_lock_bh(&iscsi_wr_lock);
1706 set_current_state(TASK_RUNNING);
1707 remove_wait_queue(&iscsi_wr_waitQ, &wait);
1711 spin_unlock_bh(&iscsi_wr_lock);
1714 * If kthread_should_stop() is true, we are guaranteed to be
1715 * on the module unload, so iscsi_wr_list must be empty.
1717 sBUG_ON(!list_empty(&iscsi_wr_list));
1719 PRINT_INFO("Write thread PID %d finished", current->pid);