TX_END,
};
+#if defined(NET_PAGE_CALLBACKS_DEFINED)
+static void iscsi_check_closewait(struct iscsi_conn *conn)
+{
+ struct iscsi_cmnd *cmnd;
+
+ TRACE_ENTRY();
+
+ if ((conn->sock->sk->sk_state != TCP_CLOSE_WAIT) &&
+ (conn->sock->sk->sk_state != TCP_CLOSE)) {
+ TRACE_CONN_CLOSE_DBG("sk_state %d, skipping",
+ conn->sock->sk->sk_state);
+ goto out;
+ }
+
+ /*
+ * No data are going to be sent, so all being sent buffers can be freed
+ * now. Strange that TCP doesn't do that itself.
+ */
+
+again:
+ spin_lock_bh(&conn->cmd_list_lock);
+ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
+ TRACE_CONN_CLOSE_DBG("cmd %p, scst_state %x, data_waiting %d, "
+ "ref_cnt %d, parent_req %p, net_ref_cnt %d, sg %p",
+ cmnd, cmnd->scst_state, cmnd->data_waiting,
+ atomic_read(&cmnd->ref_cnt), cmnd->parent_req,
+ atomic_read(&cmnd->net_ref_cnt), cmnd->sg);
+ sBUG_ON(cmnd->parent_req != NULL);
+ if (cmnd->sg != NULL) {
+ int sg_cnt, i, restart = 0;
+ sg_cnt = get_pgcnt(cmnd->bufflen,
+ cmnd->sg[0].offset);
+ cmnd_get(cmnd);
+ for(i = 0; i < sg_cnt; i++) {
+ TRACE_CONN_CLOSE_DBG("page %p, net_priv %p, _count %d",
+ cmnd->sg[i].page, cmnd->sg[i].page->net_priv,
+ atomic_read(&cmnd->sg[i].page->_count));
+ if (cmnd->sg[i].page->net_priv != NULL) {
+ if (restart == 0) {
+ spin_unlock_bh(&conn->cmd_list_lock);
+ restart = 1;
+ }
+ while(cmnd->sg[i].page->net_priv != NULL)
+ iscsi_put_page_callback(cmnd->sg[i].page);
+ }
+ }
+ cmnd_put(cmnd);
+ if (restart)
+ goto again;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_list_lock);
+
+out:
+ TRACE_EXIT();
+ return;
+}
+#else
+static inline void iscsi_check_closewait(struct iscsi_conn *conn) {};
+#endif
+
/* No locks */
static void close_conn(struct iscsi_conn *conn)
{
struct iscsi_session *session = conn->session;
struct iscsi_target *target = conn->target;
+#ifdef DEBUG
+ unsigned long start_waiting = jiffies;
+#endif
TRACE_ENTRY();
- TRACE_CONN_CLOSE("conn %p, conn_ref_cnt=%d", conn,
+ TRACE_CONN_CLOSE("Closing connection %p (conn_ref_cnt=%d)", conn,
atomic_read(&conn->conn_ref_cnt));
iscsi_extracheck_is_rd_thread(conn);
conn_abort(conn);
+ mutex_lock(&target->target_mutex);
+ spin_lock(&session->sn_lock);
+ if ((session->tm_rsp != NULL) && (session->tm_rsp->conn == conn)) {
+ struct iscsi_cmnd *tm_rsp = session->tm_rsp;
+ TRACE(TRACE_MGMT_MINOR, "Dropping delayed TM rsp %p", tm_rsp);
+ session->tm_rsp = NULL;
+ session->tm_active = 0;
+ spin_unlock(&session->sn_lock);
+ mutex_unlock(&target->target_mutex);
+
+ rsp_cmnd_release(tm_rsp);
+ } else {
+ spin_unlock(&session->sn_lock);
+ mutex_unlock(&target->target_mutex);
+ }
+
if (conn->read_state != RX_INIT_BHS) {
req_cmnd_release_force(conn->read_cmnd, 0);
conn->read_cmnd = NULL;
struct list_head *pending_list = &session->pending_list;
struct iscsi_cmnd *tmp;
- TRACE_CONN_CLOSE("Disposing pending commands on conn "
- "%p, conn_ref_cnt=%d", conn,
+ TRACE_CONN_CLOSE("Disposing pending commands on "
+ "connection %p (conn_ref_cnt=%d)", conn,
atomic_read(&conn->conn_ref_cnt));
list_for_each_entry_safe(cmnd, tmp, pending_list,
#ifdef NET_PAGE_CALLBACKS_DEFINED
struct iscsi_cmnd *rsp;
#endif
+ if (time_after(jiffies, start_waiting+10*HZ))
+ trace_flag |= TRACE_CONN_OC_DBG;
+
spin_lock_bh(&conn->cmd_list_lock);
list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
- TRACE_DBG("cmd %p, scst_state %x, data_waiting "
- "%d, ref_cnt %d, parent_req %p", cmnd,
- cmnd->scst_state, cmnd->data_waiting,
- atomic_read(&cmnd->ref_cnt), cmnd->parent_req);
+ TRACE_CONN_CLOSE_DBG("cmd %p, scst_state %x, scst_cmd "
+ "state %d, data_waiting %d, ref_cnt %d, "
+ "parent_req %p", cmnd, cmnd->scst_state,
+ (cmnd->scst_cmd != NULL) ? cmnd->scst_cmd->state : -1,
+ cmnd->data_waiting, atomic_read(&cmnd->ref_cnt),
+ cmnd->parent_req);
#ifdef NET_PAGE_CALLBACKS_DEFINED
- TRACE_DBG("net_ref_cnt %d, sg %p",
+ TRACE_CONN_CLOSE_DBG("net_ref_cnt %d, sg %p",
atomic_read(&cmnd->net_ref_cnt), cmnd->sg);
if (cmnd->sg != NULL) {
int sg_cnt, i;
sg_cnt = get_pgcnt(cmnd->bufflen,
cmnd->sg[0].offset);
for(i = 0; i < sg_cnt; i++) {
- TRACE_DBG("page %p, net_priv %p, _count %d",
+ TRACE_CONN_CLOSE_DBG("page %p, net_priv %p, _count %d",
cmnd->sg[i].page, cmnd->sg[i].page->net_priv,
atomic_read(&cmnd->sg[i].page->_count));
}
spin_lock_bh(&cmnd->rsp_cmd_lock);
list_for_each_entry(rsp, &cmnd->rsp_cmd_list, rsp_cmd_list_entry) {
- TRACE_DBG(" rsp %p, ref_cnt %d, net_ref_cnt %d, "
+ TRACE_CONN_CLOSE_DBG(" rsp %p, ref_cnt %d, net_ref_cnt %d, "
"sg %p", rsp, atomic_read(&rsp->ref_cnt),
atomic_read(&rsp->net_ref_cnt), rsp->sg);
if ((rsp->sg != cmnd->sg) && (rsp->sg != NULL)) {
rsp->sg[0].offset);
sBUG_ON(rsp->sg_cnt != sg_cnt);
for(i = 0; i < sg_cnt; i++) {
- TRACE_DBG(" page %p, net_priv %p, "
+ TRACE_CONN_CLOSE_DBG(" page %p, net_priv %p, "
"_count %d", rsp->sg[i].page,
rsp->sg[i].page->net_priv,
atomic_read(&rsp->sg[i].page->_count));
spin_unlock_bh(&conn->cmd_list_lock);
}
#endif
+ iscsi_check_closewait(conn);
}
write_lock_bh(&conn->sock->sk->sk_callback_lock);
msleep(50);
}
+ TRACE_CONN_CLOSE("Notifying user space about closing connection %p", conn);
+ event_send(target->tid, session->sid, conn->cid, E_CONN_CLOSE, 0);
+
mutex_lock(&target->target_mutex);
conn_free(conn);
if (list_empty(&session->conn_list))
session_del(target, session->sid);
mutex_unlock(&target->target_mutex);
- TRACE_CONN_CLOSE("Notifying user space about closing conn %p", conn);
- event_send(target->tid, session->sid, conn->cid, E_CONN_CLOSE, 0);
-
TRACE_EXIT();
return;
}
"conn %p", res, conn);
break;
default:
- PRINT_ERROR_PR("sock_recvmsg() failed: %d", res);
+ PRINT_ERROR("sock_recvmsg() failed: %d", res);
mark_conn_closed(conn);
break;
}
int res = digest_rx_header(cmnd);
if (unlikely(res != 0)) {
- PRINT_ERROR_PR("rx header digest for initiator %s failed "
+ PRINT_ERROR("rx header digest for initiator %s failed "
"(%d)", conn->session->initiator_name, res);
mark_conn_closed(conn);
}
}
break;
default:
- PRINT_ERROR_PR("%d %x", conn->read_state, cmnd_opcode(cmnd));
+ PRINT_ERROR("%d %x", conn->read_state, cmnd_opcode(cmnd));
sBUG();
}
goto out;
if (conn->read_size) {
- PRINT_ERROR_PR("%d %x %d", res, cmnd_opcode(cmnd), conn->read_size);
+ PRINT_ERROR("%d %x %d", res, cmnd_opcode(cmnd), conn->read_size);
sBUG();
}
{
TRACE_ENTRY();
+ PRINT_INFO("Read thread started, PID %d", current->pid);
+
current->flags |= PF_NOFREEZE;
spin_lock_bh(&iscsi_rd_lock);
*/
sBUG_ON(!list_empty(&iscsi_rd_list));
+ PRINT_INFO("Read thread PID %d finished", current->pid);
+
TRACE_EXIT();
return 0;
}
struct iscsi_cmnd *cmd = (struct iscsi_cmnd*)page->net_priv;
int v;
- TRACE_DBG("cmd %p, page %p, _count %d, new net_ref_cnt %d",
+ TRACE_NET_PAGE("cmd %p, page %p, _count %d, new net_ref_cnt %d",
cmd, page, atomic_read(&page->_count),
atomic_read(&cmd->net_ref_cnt)+1);
v = atomic_inc_return(&cmd->net_ref_cnt);
if (v == 1) {
- TRACE_DBG("getting cmd %p for page %p", cmd, page);
+ TRACE_NET_PAGE("getting cmd %p for page %p", cmd, page);
cmnd_get(cmd);
}
}
{
struct iscsi_cmnd *cmd = (struct iscsi_cmnd*)page->net_priv;
- TRACE_DBG("cmd %p, page %p, _count %d, new net_ref_cnt %d",
+ TRACE_NET_PAGE("cmd %p, page %p, _count %d, new net_ref_cnt %d",
cmd, page, atomic_read(&page->_count),
atomic_read(&cmd->net_ref_cnt)-1);
if (atomic_dec_and_test(&cmd->net_ref_cnt)) {
int i, sg_cnt = get_pgcnt(cmd->bufflen, cmd->sg[0].offset);
for(i = 0; i < sg_cnt; i++) {
- TRACE_DBG("Clearing page %p", cmd->sg[i].page);
+ TRACE_NET_PAGE("Clearing page %p", cmd->sg[i].page);
cmd->sg[i].page->net_priv = NULL;
}
cmnd_put(cmd);
}
}
+
+static void check_net_priv(struct iscsi_cmnd *cmd, struct page *page)
+{
+ if (atomic_read(&cmd->net_ref_cnt) == 0) {
+ TRACE_DBG("%s", "sendpage() not called get_page(), "
+ "zeroing net_priv");
+ page->net_priv = NULL;
+ }
+}
+#else
+static inline void check_net_priv(struct iscsi_cmnd *cmd, struct page *page) {}
#endif
/* This is partially taken from the Ardis code. */
set_fs(KERNEL_DS);
res = vfs_writev(file, (struct iovec __user *)iop, count, &off);
set_fs(oldfs);
- TRACE(TRACE_D_DATA, "%#Lx:%u: %d(%ld)",
+ TRACE(TRACE_D_WRITE, "%#Lx:%u: %d(%ld)",
(unsigned long long) conn->session->sid, conn->cid,
res, (long) iop->iov_len);
if (unlikely(res <= 0)) {
sg = write_cmnd->sg;
if (sg == NULL) {
- PRINT_ERROR_PR("%s", "warning data missing!");
+ PRINT_ERROR("%s", "warning data missing!");
return 0;
}
offset = conn->write_offset;
#ifdef NET_PAGE_CALLBACKS_DEFINED
if (unlikely((sg[idx].page->net_priv != NULL) &&
(sg[idx].page->net_priv != ref_cmd))) {
- PRINT_ERROR_PR("net_priv isn't NULL and != ref_cmd "
+ PRINT_ERROR("net_priv isn't NULL and != ref_cmd "
"(write_cmnd %p, ref_cmd %p, sg %p, idx %d, "
"net_priv %p)", write_cmnd, ref_cmd, sg, idx,
sg[idx].page->net_priv);
if (size <= sendsize) {
retry2:
res = sendpage(sock, sg[idx].page, offset, size, flags);
- TRACE(TRACE_D_DATA, "%s %#Lx:%u: %d(%lu,%u,%u)",
+ TRACE(TRACE_D_WRITE, "%s %#Lx:%u: %d(%lu,%u,%u)",
sock->ops->sendpage ? "sendpage" : "sock_no_sendpage",
(unsigned long long)conn->session->sid, conn->cid,
res, sg[idx].page->index, offset, size);
else
goto out_res;
}
+ check_net_priv(ref_cmd, sg[idx].page);
if (res == size) {
conn->write_size = 0;
return saved_size;
retry1:
res = sendpage(sock, sg[idx].page, offset, sendsize,
flags | MSG_MORE);
- TRACE(TRACE_D_DATA, "%s %#Lx:%u: %d(%lu,%u,%u)",
+ TRACE(TRACE_D_WRITE, "%s %#Lx:%u: %d(%lu,%u,%u)",
sock->ops->sendpage ? "sendpage" : "sock_no_sendpage",
(unsigned long long ) conn->session->sid, conn->cid,
res, sg[idx].page->index, offset, sendsize);
else
goto out_res;
}
+ check_net_priv(ref_cmd, sg[idx].page);
if (res == sendsize) {
idx++;
offset = 0;
return saved_size - size;
out_res:
-#ifdef NET_PAGE_CALLBACKS_DEFINED
- if (atomic_read(&ref_cmd->net_ref_cnt) == 0) {
- TRACE_DBG("sendpage() returned %d, zeroing net_priv", res);
- sg[idx].page->net_priv = NULL;
- }
-#endif
+ check_net_priv(ref_cmd, sg[idx].page);
if (res == -EAGAIN)
goto out;
/* else go through */
if (!conn->closing)
#endif
{
- PRINT_ERROR_PR("error %d at sid:cid %#Lx:%u, cmnd %p", res,
+ PRINT_ERROR("error %d at sid:cid %#Lx:%u, cmnd %p", res,
(unsigned long long)conn->session->sid, conn->cid,
conn->write_cmnd);
}
+ if (ref_cmd->scst_cmd != NULL)
+ scst_set_delivery_status(ref_cmd->scst_cmd,
+ SCST_CMD_DELIVERY_FAILED);
return res;
}
if (!conn->closing)
#endif
{
- PRINT_ERROR_PR("Sending data failed: initiator %s, "
+ PRINT_ERROR("Sending data failed: initiator %s, "
"write_size %d, write_state %d, res %d",
conn->session->initiator_name, conn->write_size,
conn->write_state, res);
res = tx_ddigest(cmnd, TX_END);
break;
default:
- PRINT_ERROR_PR("%d %d %x", res, conn->write_state,
+ PRINT_ERROR("%d %d %x", res, conn->write_state,
cmnd_opcode(cmnd));
sBUG();
}
goto out;
if (conn->write_size) {
- PRINT_ERROR_PR("%d %x %u", res, cmnd_opcode(cmnd),
+ PRINT_ERROR("%d %x %u", res, cmnd_opcode(cmnd),
conn->write_size);
sBUG();
}
{
TRACE_ENTRY();
+ PRINT_INFO("Write thread started, PID %d", current->pid);
+
current->flags |= PF_NOFREEZE;
spin_lock_bh(&iscsi_wr_lock);
*/
sBUG_ON(!list_empty(&iscsi_wr_list));
+ PRINT_INFO("Write thread PID %d finished", current->pid);
+
TRACE_EXIT();
return 0;
}