#include <sys/uio.h>
#endif
-#define ISCSI_VERSION_STRING "0.9.6/0.4.15r147"
+#define ISCSI_VERSION_STRING "0.9.6/0.4.15r148"
/* The maximum length of 223 bytes in the RFC. */
#define ISCSI_NAME_LEN 256
return;
}
-void mark_conn_closed(struct iscsi_conn *conn)
+void __mark_conn_closed(struct iscsi_conn *conn, bool force)
{
spin_lock_bh(&iscsi_rd_lock);
conn->closing = 1;
+ conn->force_close = force;
spin_unlock_bh(&iscsi_rd_lock);
iscsi_make_conn_rd_active(conn);
}
+void mark_conn_closed(struct iscsi_conn *conn)
+{
+ __mark_conn_closed(conn, 0);
+}
+
static void iscsi_state_change(struct sock *sk)
{
struct iscsi_conn *conn = sk->sk_user_data;
PRINT_ERROR("Connection with initiator %s (%p) "
"unexpectedly closed!",
conn->session->initiator_name, conn);
- mark_conn_closed(conn);
+ __mark_conn_closed(conn, 1);
}
} else
iscsi_make_conn_rd_active(conn);
return;
}
+static void conn_rsp_timer_fn(unsigned long arg)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *)arg;
+
+ TRACE_ENTRY();
+
+ TRACE_DBG("Timer (conn %p)", conn);
+
+ spin_lock_bh(&conn->write_list_lock);
+
+ if (!list_empty(&conn->written_list)) {
+ struct iscsi_cmnd *wr_cmd = list_entry(conn->written_list.next,
+ struct iscsi_cmnd, write_list_entry);
+
+ if (unlikely(time_after_eq(jiffies, wr_cmd->write_timeout))) {
+ if (!conn->closing) {
+ PRINT_ERROR("Timeout sending data to initiator "
+ "%s (SID %Lx), closing connection",
+ conn->session->initiator_name,
+ conn->session->sid);
+ __mark_conn_closed(conn, 1);
+ }
+ } else {
+ TRACE_DBG("Restarting timer on %ld (conn %p)",
+ wr_cmd->write_timeout, conn);
+ /*
+ * Timer might have been restarted while we were
+ * entering here.
+ */
+ mod_timer(&conn->rsp_timer, wr_cmd->write_timeout);
+ }
+ }
+
+ spin_unlock_bh(&conn->write_list_lock);
+
+ TRACE_EXIT();
+ return;
+}
+
static int iscsi_socket_bind(struct iscsi_conn *conn)
{
int res = 0;
conn->session, (unsigned long long)conn->session->sid,
conn->cid);
+ del_timer_sync(&conn->rsp_timer);
+
sBUG_ON(atomic_read(&conn->conn_ref_cnt) != 0);
sBUG_ON(!list_empty(&conn->cmd_list));
sBUG_ON(!list_empty(&conn->write_list));
INIT_LIST_HEAD(&conn->cmd_list);
spin_lock_init(&conn->write_list_lock);
INIT_LIST_HEAD(&conn->write_list);
+ INIT_LIST_HEAD(&conn->written_list);
+ setup_timer(&conn->rsp_timer, conn_rsp_timer_fn, (unsigned long)conn);
conn->file = fget(info->fd);
kfree(cmnd->pdu.ahs);
- if (unlikely(cmnd->on_write_list)) {
+ if (unlikely(cmnd->on_write_list || cmnd->on_written_list)) {
struct iscsi_scsi_cmd_hdr *req = cmnd_hdr(cmnd);
PRINT_ERROR("cmnd %p still on some list?, %x, %x, %x, %x, %x, %x, %x",
EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
+ if (cmnd->on_written_list) {
+ struct iscsi_conn *conn = cmnd->conn;
+ TRACE_DBG("Deleting cmd %p from conn %p written_list", cmnd,
+ conn);
+ spin_lock_bh(&conn->write_list_lock);
+ list_del(&cmnd->write_list_entry);
+ cmnd->on_written_list = 0;
+ spin_unlock_bh(&conn->write_list_lock);
+ }
+
if (cmnd->parent_req == NULL) {
struct iscsi_conn *conn = cmnd->conn;
TRACE_DBG("Deleting req %p from conn %p", cmnd, conn);
sBUG_ON(req == conn->read_cmnd);
if (flags & ISCSI_FORCE_RELEASE_WRITE) {
- spin_lock(&conn->write_list_lock);
+ spin_lock_bh(&conn->write_list_lock);
list_for_each_entry_safe(rsp, t, &conn->write_list,
write_list_entry) {
if (rsp->parent_req != req)
list_add_tail(&rsp->write_list_entry, &cmds_list);
}
- spin_unlock(&conn->write_list_lock);
+ spin_unlock_bh(&conn->write_list_lock);
list_for_each_entry_safe(rsp, t, &cmds_list, write_list_entry) {
list_del(&rsp->write_list_entry);
spin_unlock_bh(&req->rsp_cmd_lock);
- spin_lock(&conn->write_list_lock);
+ spin_lock_bh(&conn->write_list_lock);
r = rsp->on_write_list || rsp->write_processing_started;
- spin_unlock(&conn->write_list_lock);
+ spin_unlock_bh(&conn->write_list_lock);
cmnd_put(rsp);
if (r)
- continue;
+ goto again_rsp;
/*
* If both on_write_list and write_processing_started not set,
- * we can safely put() cmnd
+ * we can safely put() rsp.
*/
cmnd_put(rsp);
goto again_rsp;
(rsp->parent_req->outstanding_r2t == 0))
cmnd_remove_hash(rsp->parent_req);
+ if (!(conn->ddigest_type & DIGEST_NONE)) {
+ list_for_each(pos, send) {
+ rsp = list_entry(pos, struct iscsi_cmnd,
+ write_list_entry);
+
+ if (rsp->pdu.datasize != 0) {
+ TRACE_DBG("Doing data digest (%p:%x)", rsp,
+ cmnd_opcode(rsp));
+ digest_tx_data(rsp);
+ }
+ }
+ }
+
+ spin_lock_bh(&conn->write_list_lock);
list_for_each_safe(pos, next, send) {
rsp = list_entry(pos, struct iscsi_cmnd, write_list_entry);
sBUG_ON(conn != rsp->conn);
- if (!(conn->ddigest_type & DIGEST_NONE) &&
- (rsp->pdu.datasize != 0))
- digest_tx_data(rsp);
-
list_del(&rsp->write_list_entry);
-
- spin_lock(&conn->write_list_lock);
cmd_add_on_write_list(conn, rsp);
- spin_unlock(&conn->write_list_lock);
}
+ spin_unlock_bh(&conn->write_list_lock);
if (flags & ISCSI_INIT_WRITE_WAKE)
iscsi_make_conn_wr_active(conn);
EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
+ if (scst_cmd_get_data_direction(scst_cmd) == SCST_DATA_READ) {
+ if (!(req->conn->ddigest_type & DIGEST_NONE))
+ scst_set_long_xmit(scst_cmd);
+#ifndef NET_PAGE_CALLBACKS_DEFINED
+ else if (cmnd_hdr(req)->data_length > 8*1024)
+ scst_set_long_xmit(scst_cmd);
+#endif
+ EXTRACHECKS_BUG_ON(!list_empty(&req->rx_ddigest_cmd_list));
+ goto out;
+ }
+
/* If data digest isn't used this list will be empty */
list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
rx_ddigest_cmd_list_entry) {
* upon entrance in this function, because otherwise it could be destroyed
* inside as a result of iscsi_send(), which releases sent commands.
*/
-static void iscsi_try_local_processing(struct iscsi_conn *conn)
+static void iscsi_try_local_processing(struct iscsi_conn *conn,
+ bool single_only)
{
int local;
int rc = 1;
while(test_write_ready(conn)) {
rc = iscsi_send(conn);
- if (rc <= 0) {
+ if ((rc <= 0) || single_only) {
break;
}
}
u8 *sense = scst_cmd_get_sense_buffer(scst_cmd);
int sense_len = scst_cmd_get_sense_buffer_len(scst_cmd);
int old_state = req->scst_state;
+ bool single_only = !scst_get_long_xmit(scst_cmd);
scst_cmd_set_tgt_priv(scst_cmd, NULL);
conn_get_ordered(conn);
req_cmnd_release(req);
- iscsi_try_local_processing(conn);
+ iscsi_try_local_processing(conn, single_only);
conn_put(conn);
out:
scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
int status = iscsi_get_mgmt_response(scst_mgmt_cmd_get_status(scst_mcmd));
- TRACE_MGMT_DBG("req %p, scst_mcmd %p, scst status %d", req, scst_mcmd,
+ TRACE_MGMT_DBG("req %p, scst_mcmd %p, fn %d, scst status %d",
+ req, scst_mcmd, scst_mgmt_cmd_get_fn(scst_mcmd),
scst_mgmt_cmd_get_status(scst_mcmd));
iscsi_send_task_mgmt_resp(req, status);
scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
+
+ return;
}
static int iscsi_target_detect(struct scst_tgt_template *templ)
/* All 3 protected by sn_lock */
unsigned int tm_active:1;
+ unsigned int shutting_down:1; /* Let's save some cache footprint by putting it here */
u32 tm_sn;
struct iscsi_cmnd *tm_rsp;
struct list_head session_list_entry;
+ struct completion unreg_compl;
+
/* Both don't need any protection */
char *initiator_name;
u64 sid;
spinlock_t write_list_lock;
/* List of data pdus to be sent, protected by write_list_lock */
- struct list_head write_list;
+ struct list_head write_list;
+ /* List of data pdus being sent, protected by write_list_lock */
+ struct list_head written_list;
+
+ struct timer_list rsp_timer;
/* All 2 protected by iscsi_wr_lock */
unsigned short wr_state;
unsigned short rd_state;
unsigned short rd_data_ready:1;
unsigned short closing:1; /* Let's save some cache footprint by putting it here */
+ unsigned short force_close:1; /* Let's save some cache footprint by putting it here */
struct list_head rd_list_entry;
unsigned int force_cleanup_done:1;
unsigned int dec_active_cmnds:1;
unsigned int ddigest_checked:1;
+ unsigned int on_written_list:1;
#ifdef EXTRACHECKS
unsigned int on_rx_digest_list:1;
unsigned int release_called:1;
struct list_head write_list_entry;
};
+ unsigned long write_timeout;
+
/*
* Unprotected, since could be accessed from only a single
* thread at time
/* Flags for req_cmnd_release_force() */
#define ISCSI_FORCE_RELEASE_WRITE 1
+#define ISCSI_RSP_TIMEOUT (7*HZ)
+
extern struct mutex target_mgmt_mutex;
extern struct file_operations ctr_fops;
extern int conn_add(struct iscsi_session *, struct conn_info *);
extern int conn_del(struct iscsi_session *, struct conn_info *);
extern int conn_free(struct iscsi_conn *);
+extern void __mark_conn_closed(struct iscsi_conn *, bool);
extern void mark_conn_closed(struct iscsi_conn *);
extern void iscsi_make_conn_wr_active(struct iscsi_conn *);
extern void conn_info_show(struct seq_file *, struct iscsi_session *);
cmnd_done(cmnd);
}
-/* conn->write_list_lock supposed to be locked */
+/* conn->write_list_lock supposed to be locked and BHs off */
static inline void cmd_add_on_write_list(struct iscsi_conn *conn,
struct iscsi_cmnd *cmnd)
{
cmnd->on_write_list = 1;
}
-/* conn->write_list_lock supposed to be locked */
+/* conn->write_list_lock supposed to be locked and BHs off */
static inline void cmd_del_from_write_list(struct iscsi_cmnd *cmnd)
{
TRACE_DBG("%p", cmnd);
static inline void iscsi_check_closewait(struct iscsi_conn *conn) {};
#endif
+static void iscsi_unreg_cmds_done_fn(struct scst_session *scst_sess)
+{
+ struct iscsi_session *sess =
+ (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
+
+ TRACE_ENTRY();
+
+ TRACE_CONN_CLOSE("sess %p (scst_sess %p)", sess, scst_sess);
+
+ sess->shutting_down = 1;
+ complete_all(&sess->unreg_compl);
+
+ TRACE_EXIT();
+ return;
+}
+
/* No locks */
static void close_conn(struct iscsi_conn *conn)
{
sBUG_ON(!conn->closing);
- /* We want all our already send operations to complete */
- conn->sock->ops->shutdown(conn->sock, RCV_SHUTDOWN);
+ if (conn->force_close) {
+ conn->sock->ops->shutdown(conn->sock,
+ RCV_SHUTDOWN|SEND_SHUTDOWN);
+ } else {
+ /* We want all our already send operations to complete */
+ conn->sock->ops->shutdown(conn->sock, RCV_SHUTDOWN);
+ }
/*
* We need to call scst_unregister_session() ASAP to make SCST start
*
* ToDo: this is incompatible with MC/S
*/
- scst_unregister_session(session->scst_sess, 0, NULL);
+ scst_unregister_session_ex(session->scst_sess, 0,
+ NULL, iscsi_unreg_cmds_done_fn);
session->scst_sess = NULL;
if (conn->read_state != RX_INIT_BHS) {
} while(req_freed);
spin_unlock(&session->sn_lock);
- if (time_after(jiffies, start_waiting + 5*HZ)) {
- TRACE_CONN_CLOSE("%s", "Wait time expired");
+ if (time_after(jiffies, start_waiting + 10*HZ)) {
+ TRACE_CONN_CLOSE("%s", "Pending wait time expired");
spin_lock(&session->sn_lock);
do {
req_freed = 0;
}
iscsi_make_conn_wr_active(conn);
+
+ if (time_after(jiffies, start_waiting + 7*HZ)) {
+ TRACE_CONN_CLOSE("%s", "Wait time expired");
+ conn->sock->ops->shutdown(conn->sock, SEND_SHUTDOWN);
+ }
+
msleep(200);
TRACE_CONN_CLOSE("conn %p, conn_ref_cnt %d left, wr_state %d, "
TRACE_CONN_CLOSE("Notifying user space about closing connection %p", conn);
event_send(target->tid, session->sid, conn->cid, E_CONN_CLOSE, 0);
+ wait_for_completion(&session->unreg_compl);
+
mutex_lock(&target->target_mutex);
conn_free(conn);
/* ToDo: this is incompatible with MC/S */
{
struct iscsi_cmnd *cmnd = NULL;
- spin_lock(&conn->write_list_lock);
+ spin_lock_bh(&conn->write_list_lock);
if (!list_empty(&conn->write_list)) {
cmnd = list_entry(conn->write_list.next, struct iscsi_cmnd,
write_list_entry);
cmd_del_from_write_list(cmnd);
cmnd->write_processing_started = 1;
}
- spin_unlock(&conn->write_list_lock);
+ spin_unlock_bh(&conn->write_list_lock);
return cmnd;
}
else
ref_cmd = write_cmnd;
+ if (!ref_cmd->on_written_list) {
+ TRACE_DBG("Adding cmd %p to conn %p written_list", ref_cmd,
+ conn);
+ spin_lock_bh(&conn->write_list_lock);
+ ref_cmd->on_written_list = 1;
+ ref_cmd->write_timeout = jiffies + ISCSI_RSP_TIMEOUT;
+ list_add_tail(&ref_cmd->write_list_entry, &conn->written_list);
+ spin_unlock_bh(&conn->write_list_lock);
+ }
+
+ if (!timer_pending(&conn->rsp_timer)) {
+ sBUG_ON(!ref_cmd->write_timeout);
+ spin_lock_bh(&conn->write_list_lock);
+ if (likely(!timer_pending(&conn->rsp_timer))) {
+ TRACE_DBG("Starting timer on %ld (conn %p)",
+ ref_cmd->write_timeout, conn);
+ conn->rsp_timer.expires = ref_cmd->write_timeout;
+ add_timer(&conn->rsp_timer);
+ }
+ spin_unlock_bh(&conn->write_list_lock);
+ }
+
file = conn->file;
saved_size = size = conn->write_size;
iop = conn->write_iop;
struct iscsi_session *session;
list_for_each_entry(session, &target->session_list, session_list_entry) {
- if (session->sid == sid)
+ if ((session->sid == sid) && !session->shutting_down)
return session;
}
return NULL;
struct iscsi_session *session;
char *name = NULL;
- TRACE_MGMT_DBG("Creating session: target %p, tid %u, sid %#Lx",
- target, target->tid, (unsigned long long) info->sid);
-
if (!(session = kzalloc(sizeof(*session), GFP_KERNEL)))
return -ENOMEM;
kfree(name);
+ scst_sess_set_tgt_priv(session->scst_sess, session);
+ init_completion(&session->unreg_compl);
+
list_add(&session->session_list_entry, &target->session_list);
+ TRACE_MGMT_DBG("Session %p created: target %p, tid %u, sid %#Lx",
+ session, target, target->tid, (unsigned long long) info->sid);
+
return 0;
err:
if (session) {
int err = -EEXIST;
session = session_lookup(target, info->sid);
- if (session)
+ if (session) {
+ PRINT_ERROR("Attempt to add session with existing SID %Lx",
+ info->sid);
return err;
+ }
err = iscsi_session_alloc(target, info);
mutex_unlock(&target_mgmt_mutex);
+ TRACE_MGMT_DBG("%s", "Deleting all targets finished");
+
TRACE_EXIT();
return;
}
* "change NAME [PATH]" - changes a virtual CD in the VDISK CDROM.
+By default, if neither BLOCKIO, nor NULLIO option is supplied, FILEIO
+mode is used.
+
For example, "echo "open disk1 /vdisks/disk1" >/proc/scsi_tgt/vdisk/vdisk"
-will open file /vdisks/disk1 as virtual VDISK disk with name "disk1".
+will open file /vdisks/disk1 as virtual FILEIO disk with name "disk1".
IMPORTANT: By default for performance reasons VDISK FILEIO devices use write
========= back caching policy. This is generally safe from the consistence of
*************************************************************/
/* LUN translation (mcmd->tgt_dev assignment) */
-#define SCST_MGMT_CMD_STATE_INIT 1
+#define SCST_MGMT_CMD_STATE_INIT 0
/* Mgmt cmd is ready for processing */
-#define SCST_MGMT_CMD_STATE_READY 2
+#define SCST_MGMT_CMD_STATE_READY 1
/* Mgmt cmd is being executing */
-#define SCST_MGMT_CMD_STATE_EXECUTING 3
+#define SCST_MGMT_CMD_STATE_EXECUTING 2
+
+/* Reservations are going to be cleared, if necessary */
+#define SCST_MGMT_CMD_STATE_CHECK_NEXUS_LOSS 3
/* Target driver's task_mgmt_fn_done() is going to be called */
#define SCST_MGMT_CMD_STATE_DONE 4
/*
* Direct cmd's processing (i.e. regular function calls in the current
- * context), sleeping is allowed, no restrictions
+ * context) sleeping is not allowed
*/
-#define SCST_CONTEXT_DIRECT 0
+#define SCST_CONTEXT_DIRECT_ATOMIC 0
/*
* Direct cmd's processing (i.e. regular function calls in the current
- * context) sleeping is not allowed
+ * context), sleeping is allowed, no restrictions
*/
-#define SCST_CONTEXT_DIRECT_ATOMIC 1
+#define SCST_CONTEXT_DIRECT 1
/* Tasklet or thread context required for cmd's processing */
#define SCST_CONTEXT_TASKLET 2
/* Thread context required for cmd's processing */
#define SCST_CONTEXT_THREAD 3
+/*
+ * SCST internal flag, which specifies that context is processable, i.e. the
+ * next command in the active list will be processed after the current one.
+ *
+ * Target drivers must never use it!!
+ */
+#define SCST_CONTEXT_PROCESSABLE 0x100
+
/*************************************************************
** Values for status parameter of scst_rx_data()
*************************************************************/
struct scst_dev_type
{
+ /* SCSI type of the supported device. MUST HAVE */
+ int type;
+
/*
* True, if corresponding function supports execution in
* the atomic (non-sleeping) context
/* Set, if no /proc files should be automatically created by SCST */
unsigned no_proc:1;
+ /* Set, if exec() is synchronous */
+ unsigned exec_sync:1;
+
/*
* Called to parse CDB from the cmd and initialize
* cmd->bufflen and cmd->data_direction (both - REQUIRED).
* by scst_cmd_atomic(): it is true if the function called in the
* atomic (non-sleeping) context.
*
+ * If this function provides sync execution, you must set above
+ * exec_sync flag and should consider to setup dedicated threads by
+ * setting threads_num > 0.
+ *
* !! If this function is implemented, scst_check_local_events() shall !!
* !! be called inside it just before the actual command's execution. !!
*
/* Name of the dev handler. Must be unique. MUST HAVE */
char name[15];
- /* SCSI type of the supported device. MUST HAVE */
- int type;
-
/*
* Number of dedicated threads. If 0 - no dedicated threads will
* be created, if <0 - creation of dedicated threads is prohibited.
void (*init_result_fn) (struct scst_session *sess, void *data,
int result);
void (*unreg_done_fn) (struct scst_session *sess);
+ void (*unreg_cmds_done_fn) (struct scst_session *sess);
-#ifdef MEASURE_LATENCY
+#ifdef MEASURE_LATENCY /* must be last */
spinlock_t meas_lock;
uint64_t scst_time, processing_time;
unsigned int processed_cmds;
/* Set if cmd is being processed in atomic context */
unsigned int atomic:1;
+ /*
+ * Set if the cmd is being processed in the processable context. See
+ * comment for SCST_CONTEXT_PROCESSABLE for what it means.
+ */
+ unsigned int context_processable:1;
+
/* Set if cmd is internally generated */
unsigned int internal:1;
/* Set if the cmd was done or aborted out of its SN */
unsigned int out_of_sn:1;
- /* Set if the cmd is deferred HEAD OF QUEUE */
- unsigned int hq_deferred:1;
+ /* Set if increment expected_sn in cmd->scst_cmd_done() */
+ unsigned int inc_expected_sn_on_done:1;
/*
- * Set if increment expected_sn in cmd->scst_cmd_done() (to save
- * extra dereferences)
+ * Set if xmit_response() is going to need a considerable processing
+ * time. Processing time is considerable, if it's > context switch time
+ * (about 1 usec on modern systems). It's needed to trigger other
+ * threads to start processing other outstanding commands without
+ * waiting XMIT for the current one to finish. E.g., it should be set
+ * if iSCSI data digest used and cmd has READ direction.
*/
- unsigned int inc_expected_sn_on_done:1;
+ unsigned int long_xmit:1;
/* Set if tgt_sn field is valid */
unsigned int tgt_sn_set:1;
+ /* Set if cmd is done */
+ unsigned int done:1;
+
/* Set if cmd is finished */
unsigned int finished:1;
lun_t lun; /* LUN for this cmd */
- /* The corresponding mgmt cmd, if any, protected by sess_list_lock */
- struct scst_mgmt_cmd *mgmt_cmnd;
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
struct scsi_request *scsi_req; /* SCSI request */
#endif
*/
int orig_sg_cnt, orig_sg_entry, orig_entry_len;
+ /* List corresponding mgmt cmd, if any, protected by sess_list_lock */
+ struct list_head mgmt_cmd_list;
+
/* List entry for dev's blocked_cmd_list */
struct list_head blocked_cmd_list_entry;
struct scst_cmd *orig_cmd; /* Used to issue REQUEST SENSE */
-#ifdef MEASURE_LATENCY
+#ifdef MEASURE_LATENCY /* must be last */
uint64_t start, pre_exec_finish, post_exec_start;
#endif
};
unsigned char cmd_sn_set;
};
+struct scst_mgmt_cmd_stub
+{
+ struct scst_mgmt_cmd *mcmd;
+
+ /* List entry in cmd->mgmt_cmd_list */
+ struct list_head cmd_mgmt_cmd_list_entry;
+};
+
struct scst_mgmt_cmd
{
/* List entry for *_mgmt_cmd_list */
unsigned int needs_unblocking:1;
unsigned int lun_set:1; /* set, if lun field is valid */
unsigned int cmd_sn_set:1; /* set, if cmd_sn field is valid */
+ unsigned int nexus_loss_check_active:1; /* set, if nexus loss check is active */
+ unsigned int nexus_loss_check_done:1; /* set, if nexus loss check is done */
/*
- * Number of commands to complete before sending response,
+ * Number of commands to finish before sending response,
* protected by scst_mcmd_lock
*/
- int cmd_wait_count;
+ int cmd_finish_wait_count;
+
+ /*
+ * Number of commands to complete (done) before resetting reservation,
+ * protected by scst_mcmd_lock
+ */
+ int cmd_done_wait_count;
/* Number of completed commands, protected by scst_mcmd_lock */
int completed_cmd_count;
* the session is about to be completely freed. Can be NULL.
* Parameter:
* - sess - session
+ * unreg_cmds_done_fn - pointer to the function that will be
+ * asynchronously called when the last session's command completes, i.e.
+ * goes to XMIT stage. Can be NULL.
+ * Parameter:
+ * - sess - session
*
* Notes:
*
* but it also starts recovering stuck commands, if there are any.
* Otherwise, your target driver could wait for those commands forever.
*/
-void scst_unregister_session(struct scst_session *sess, int wait,
- void (*unreg_done_fn) (struct scst_session *sess));
+void scst_unregister_session_ex(struct scst_session *sess, int wait,
+ void (*unreg_done_fn) (struct scst_session *sess),
+ void (*unreg_cmds_done_fn) (struct scst_session *sess));
+
+static inline void scst_unregister_session(struct scst_session *sess, int wait,
+ void (*unreg_done_fn) (struct scst_session *sess))
+{
+ scst_unregister_session_ex(sess, wait, unreg_done_fn, NULL);
+}
/*
* Registers dev handler driver
cmd->delivery_status = delivery_status;
}
+/*
+ * Get/set/clear functions for cmd's long XMIT flag.
+ */
+static inline int scst_get_long_xmit(struct scst_cmd *cmd)
+{
+ return cmd->long_xmit;
+}
+
+static inline void scst_set_long_xmit(struct scst_cmd *cmd)
+{
+ cmd->long_xmit = 1;
+}
+
+static inline void scst_clear_long_xmit(struct scst_cmd *cmd)
+{
+ cmd->long_xmit = 0;
+}
+
/*
* Get/Set function for mgmt cmd's target private data
*/
return mcmd->status;
}
+/*
+ * Returns mgmt cmd's TM fn
+ */
+static inline int scst_mgmt_cmd_get_fn(struct scst_mgmt_cmd *mcmd)
+{
+ return mcmd->fn;
+}
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
static inline struct page *sg_page(struct scatterlist *sg)
/*
* Main SCST commands processing routing. Must be used only by dev handlers.
* Argument context sets the execution context, only SCST_CONTEXT_DIRECT and
- * SCST_CONTEXT_DIRECT_ATOMIC are allowed.
+ * SCST_CONTEXT_DIRECT_ATOMIC with optional SCST_CONTEXT_PROCESSABLE flag
+ * are allowed.
*/
void scst_process_active_cmd(struct scst_cmd *cmd, int context);
/* Aborts all tasks in all sessions of the tgt */
#define SCST_ABORT_ALL_TASKS 9
+/*
+ * Internal TM command issued by SCST in scst_unregister_session(). It is the
+ * same as SCST_NEXUS_LOSS_SESS, except it calls unreg_cmds_done_fn().
+ *
+ * Target driver shall NEVER use it!!
+ */
+#define SCST_UNREG_SESS_TM 10
+
/*************************************************************
** Values for mgmt cmd's status field. Codes taken from iSCSI
*************************************************************/
#ifdef DEBUG
//# define LOG_FLAG KERN_DEBUG
# define LOG_FLAG KERN_INFO
-
# define INFO_FLAG KERN_INFO
# define ERROR_FLAG KERN_INFO
#else
# define ERROR_FLAG KERN_ERR
#endif
+#define CRIT_FLAG KERN_CRIT
+
#define NO_FLAG ""
#define TRACE_NULL 0x00000000
PRINT_LOG_FLAG(ERROR_FLAG, "***ERROR*** " format, args); \
} while(0)
+#define PRINT_CRIT_ERROR(format, args...) \
+do { \
+ if (strcmp(CRIT_FLAG, LOG_FLAG)) \
+ { \
+ PRINT_LOG_FLAG(LOG_FLAG, "***CRITICAL ERROR*** " format, args); \
+ } \
+ PRINT_LOG_FLAG(CRIT_FLAG, "***CRITICAL ERROR*** " format, args); \
+} while(0)
+
+
#define PRINT_INFO(format, args...) \
do { \
if (strcmp(INFO_FLAG, LOG_FLAG)) \
format, LOG_PREFIX, args); \
} while(0)
+#define PRINT_CRIT_ERROR(format, args...) \
+do { \
+ PRINT(CRIT_FLAG, "%s: ***CRITICAL ERROR*** " \
+ format, LOG_PREFIX, args); \
+} while(0)
+
#else
#define PRINT_INFO(format, args...) \
format, args); \
} while(0)
+#define PRINT_CRIT_ERROR(format, args...) \
+do { \
+ PRINT(CRIT_FLAG, "***CRITICAL ERROR*** " \
+ format, args); \
+} while(0)
+
#endif /* LOG_PREFIX */
#endif /* DEBUG */
case READ_12:
case READ_16:
cmd->completed = 1;
- break;
+ goto out_done;
}
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+
out_done:
res = SCST_EXEC_COMPLETED;
cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
-
- TRACE_EXIT_RES(res);
- return res;
+ goto out;
}
MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
case READ_12:
case READ_16:
cmd->completed = 1;
- break;
+ goto out_done;
}
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+
out_done:
res = SCST_EXEC_COMPLETED;
cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
-
- TRACE_EXIT_RES(res);
- return res;
+ goto out;
}
MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
case WRITE_6:
case READ_6:
cmd->completed = 1;
- break;
+ goto out_done;
}
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+
out_done:
res = SCST_EXEC_COMPLETED;
cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
-
- TRACE_EXIT_RES(res);
- return res;
+ goto out;
}
MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
list_del(&cmd->cmd_list_entry);
spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
- scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
+ scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT |
+ SCST_CONTEXT_PROCESSABLE);
spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
res++;
}
dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
if (IS_ERR(dev_user_sysfs_class)) {
- printk(KERN_ERR "Unable create sysfs class for SCST user "
- "space handler\n");
+ PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
+ "space handler");
res = PTR_ERR(dev_user_sysfs_class);
goto out_proc;
}
res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
if (res) {
- printk(KERN_ERR "Unable to get major %d for SCSI tapes\n",
- DEV_USER_MAJOR);
+ PRINT_ERROR("Unable to get major %d for SCSI tapes", DEV_USER_MAJOR);
goto out_class;
}
#define VDISK_TYPE { \
name: VDISK_NAME, \
type: TYPE_DISK, \
+ exec_sync: 1, \
threads_num: -1, \
parse_atomic: 1, \
exec_atomic: 0, \
#define VCDROM_TYPE { \
name: VCDROM_NAME, \
type: TYPE_ROM, \
+ exec_sync: 1, \
threads_num: -1, \
parse_atomic: 1, \
exec_atomic: 0, \
}
if (!(cmd->cdb[1] & PF) || (cmd->cdb[1] & SP)) {
- PRINT_ERROR("MODE SELECT: PF and/or SP are wrongly set "
- "(cdb[1]=%x)", cmd->cdb[1]);
+ TRACE(TRACE_MINOR|TRACE_SCSI, "MODE SELECT: Unsupported "
+ "value(s) of PF and/or SP bits (cdb[1]=%x)",
+ cmd->cdb[1]);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
goto out_put;
TRACE_DBG("PERSIST/PREVENT 0x%02x", cmd->cdb[4]);
- spin_lock(&virt_dev->flags_lock);
- if (cmd->dev->handler->type == TYPE_ROM)
+ if (cmd->dev->handler->type == TYPE_ROM) {
+ spin_lock(&virt_dev->flags_lock);
virt_dev->prevent_allow_medium_removal =
cmd->cdb[4] & 0x01 ? 1 : 0;
- else {
- PRINT_ERROR("%s", "Prevent allow medium removal for "
- "non-CDROM device");
- scst_set_cmd_error(cmd,
- SCST_LOAD_SENSE(scst_sense_invalid_opcode));
+ spin_unlock(&virt_dev->flags_lock);
}
- spin_unlock(&virt_dev->flags_lock);
return;
}
cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
if (cmd->sense == NULL) {
- PRINT_ERROR("FATAL!!! Sense memory allocation failed (op %x). "
+ PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
"The sense data will be lost!!", cmd->cdb[0]);
res = -ENOMEM;
goto out;
return;
}
-static void scst_send_release(struct scst_tgt_dev *tgt_dev)
+static void scst_send_release(struct scst_device *dev)
{
struct scsi_request *req;
struct scsi_device *scsi_dev;
TRACE_ENTRY();
- if (tgt_dev->dev->scsi_dev == NULL)
+ if (dev->scsi_dev == NULL)
goto out;
- scsi_dev = tgt_dev->dev->scsi_dev;
+ scsi_dev = dev->scsi_dev;
req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
if (req == NULL) {
req->sr_use_sg = 0;
req->sr_bufflen = 0;
req->sr_buffer = NULL;
- req->sr_request->rq_disk = tgt_dev->dev->rq_disk;
+ req->sr_request->rq_disk = dev->rq_disk;
req->sr_sense_buffer[0] = 0;
TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
return;
}
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
-static void scst_send_release(struct scst_tgt_dev *tgt_dev)
+static void scst_send_release(struct scst_device *dev)
{
struct scsi_device *scsi_dev;
unsigned char cdb[6];
TRACE_ENTRY();
- if (tgt_dev->dev->scsi_dev == NULL)
+ if (dev->scsi_dev == NULL)
goto out;
/* We can't afford missing RELEASE due to memory shortage */
sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
- scsi_dev = tgt_dev->dev->scsi_dev;
+ scsi_dev = dev->scsi_dev;
for(i = 0; i < 5; i++) {
memset(cdb, 0, sizeof(cdb));
PRINT_ERROR("RELEASE failed: %d", rc);
PRINT_BUFFER("RELEASE sense", sense,
SCST_SENSE_BUFFERSIZE);
- scst_check_internal_sense(tgt_dev->dev, rc,
+ scst_check_internal_sense(dev, rc,
sense, SCST_SENSE_BUFFERSIZE);
}
}
&tgt_dev_tmp->tgt_dev_flags);
}
dev->dev_reserved = 0;
+ release = 1;
}
spin_unlock_bh(&dev->dev_lock);
if (release)
- scst_send_release(tgt_dev);
+ scst_send_release(dev);
TRACE_EXIT();
return;
cmd->state = SCST_CMD_STATE_INIT_WAIT;
atomic_set(&cmd->cmd_ref, 1);
cmd->cmd_lists = &scst_main_cmd_lists;
+ INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
cmd->timeout = SCST_DEFAULT_TIMEOUT;
cmd->retries = 0;
mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
if (mcmd == NULL) {
- PRINT_ERROR("%s", "Allocation of management command "
+ PRINT_CRIT_ERROR("%s", "Allocation of management command "
"failed, some commands and their data could leak");
goto out;
}
int atomic = scst_cmd_atomic(cmd);
int flags;
struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
+ int bufflen = cmd->bufflen;
TRACE_ENTRY();
flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
if (cmd->no_sgv)
flags |= SCST_POOL_ALLOC_NO_CACHED;
- cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
+
+ if (unlikely(cmd->bufflen == 0)) {
+ TRACE(TRACE_MGMT_MINOR, "Data direction %d or/and zero buffer "
+ "length. Opcode 0x%x, handler %s, target %s",
+ cmd->data_direction, cmd->cdb[0],
+ cmd->dev->handler->name, cmd->tgtt->name);
+ /*
+ * Be on the safe side and alloc stub buffer. Neither target
+ * drivers, nor user space will touch it, since bufflen
+ * remains 0.
+ */
+ bufflen = PAGE_SIZE;
+ }
+
+ cmd->sg = sgv_pool_alloc(tgt_dev->pool, bufflen, gfp_mask, flags,
&cmd->sg_cnt, &cmd->sgv, NULL);
if (cmd->sg == NULL)
goto out;
*op_flags = ptr->flags;
*transfer_len = (*ptr->get_trans_len)(cdb_p, ptr->off);
-#ifdef EXTRACHECKS
- if (unlikely((*transfer_len == 0) &&
- (*direction != SCST_DATA_NONE) &&
- ((*op_flags & SCST_UNKNOWN_LENGTH) == 0))) {
- PRINT_ERROR("transfer_len 0, direction %d, flags %x, changing "
- "direction on NONE", *direction, *op_flags);
- *direction = SCST_DATA_NONE;
- }
-#endif
-
out:
TRACE_EXIT();
return res;
UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
if (UA_entry == NULL) {
- PRINT_ERROR("%s", "UNIT ATTENTION memory "
+ PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
"allocation failed. The UNIT ATTENTION "
"on some sessions will be missed");
+ PRINT_BUFFER("Lost UA", sense, sense_len);
goto out;
}
memset(UA_entry, 0, sizeof(*UA_entry));
spin_lock_irq(&tgt_dev->sn_lock);
+ if (unlikely(tgt_dev->hq_cmd_count != 0))
+ goto out_unlock;
+
restart:
list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
sn_cmd_list_entry) {
return;
}
-static struct scst_cmd *__scst_unblock_deferred(
- struct scst_tgt_dev *tgt_dev, struct scst_cmd *out_of_sn_cmd)
+static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
+ struct scst_cmd *out_of_sn_cmd)
{
- struct scst_cmd *res = NULL;
-
EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
- res = scst_check_deferred_commands(tgt_dev);
+ scst_make_deferred_commands_active(tgt_dev, out_of_sn_cmd);
} else {
out_of_sn_cmd->out_of_sn = 1;
spin_lock_irq(&tgt_dev->sn_lock);
spin_unlock_irq(&tgt_dev->sn_lock);
}
- return res;
+ return;
}
void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
struct scst_cmd *out_of_sn_cmd)
{
- struct scst_cmd *cmd;
-
TRACE_ENTRY();
if (!out_of_sn_cmd->sn_set) {
goto out;
}
- cmd = __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
- if (cmd != NULL) {
- unsigned long flags;
- spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
- TRACE_SN("cmd %p with sn %ld added to the head of active cmd "
- "list", cmd, cmd->sn);
- list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
- wake_up(&cmd->cmd_lists->cmd_list_waitQ);
- spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
- }
+ __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
out:
TRACE_EXIT();
* non-locked state. In the worst case we will only have
* unneeded run of the deferred commands.
*/
- if (tgt_dev->hq_cmd_count == 0) {
- struct scst_cmd *c =
- scst_check_deferred_commands(tgt_dev);
- if (c != NULL) {
- spin_lock_irq(&c->cmd_lists->cmd_list_lock);
- TRACE_SN("Adding cmd %p to active cmd list", c);
- list_add_tail(&c->cmd_list_entry,
- &c->cmd_lists->active_cmd_list);
- wake_up(&c->cmd_lists->cmd_list_waitQ);
- spin_unlock_irq(&c->cmd_lists->cmd_list_lock);
- }
- }
+ if (tgt_dev->hq_cmd_count == 0)
+ scst_make_deferred_commands_active(tgt_dev, cmd);
TRACE_EXIT();
return;
{
TRACE_ENTRY();
+ TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
+ "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
+ atomic_read(&scst_cmd_count));
+
+ scst_done_cmd_mgmt(cmd);
+
smp_rmb();
if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
if (cmd->completed) {
struct kmem_cache *scst_mgmt_cachep;
mempool_t *scst_mgmt_mempool;
+struct kmem_cache *scst_mgmt_stub_cachep;
+mempool_t *scst_mgmt_stub_mempool;
struct kmem_cache *scst_ua_cachep;
mempool_t *scst_ua_mempool;
struct kmem_cache *scst_sense_cachep;
} while (0)
INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
- INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA, out_destroy_mgmt_cache);
+ INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
+ out_destroy_mgmt_cache);
+ INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
+ out_destroy_mgmt_stub_cache);
{
struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
INIT_CACHEP(scst_sense_cachep, scst_sense, out_destroy_ua_cache);
INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
- scst_mgmt_mempool = mempool_create(10, mempool_alloc_slab,
+ scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
mempool_free_slab, scst_mgmt_cachep);
if (scst_mgmt_mempool == NULL) {
res = -ENOMEM;
goto out_destroy_acg_cache;
}
- scst_ua_mempool = mempool_create(25, mempool_alloc_slab,
+ scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
+ mempool_free_slab, scst_mgmt_stub_cachep);
+ if (scst_mgmt_stub_mempool == NULL) {
+ res = -ENOMEM;
+ goto out_destroy_mgmt_mempool;
+ }
+
+ scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
mempool_free_slab, scst_ua_cachep);
if (scst_ua_mempool == NULL) {
res = -ENOMEM;
- goto out_destroy_mgmt_mempool;
+ goto out_destroy_mgmt_stub_mempool;
}
/* Loosing sense may have fatal consequences, so let's have a big pool */
out_destroy_ua_mempool:
mempool_destroy(scst_ua_mempool);
+out_destroy_mgmt_stub_mempool:
+ mempool_destroy(scst_mgmt_stub_mempool);
+
out_destroy_mgmt_mempool:
mempool_destroy(scst_mgmt_mempool);
out_destroy_ua_cache:
kmem_cache_destroy(scst_ua_cachep);
+out_destroy_mgmt_stub_cache:
+ kmem_cache_destroy(scst_mgmt_stub_cachep);
+
out_destroy_mgmt_cache:
kmem_cache_destroy(scst_mgmt_cachep);
goto out;
} while (0)
mempool_destroy(scst_mgmt_mempool);
+ mempool_destroy(scst_mgmt_stub_mempool);
mempool_destroy(scst_ua_mempool);
mempool_destroy(scst_sense_mempool);
DEINIT_CACHEP(scst_mgmt_cachep);
+ DEINIT_CACHEP(scst_mgmt_stub_cachep);
DEINIT_CACHEP(scst_ua_cachep);
DEINIT_CACHEP(scst_sense_cachep);
DEINIT_CACHEP(scst_cmd_cachep);
* Target Driver Side (i.e. HBA)
*/
EXPORT_SYMBOL(scst_register_session);
-EXPORT_SYMBOL(scst_unregister_session);
+EXPORT_SYMBOL(scst_unregister_session_ex);
EXPORT_SYMBOL(__scst_register_target_template);
EXPORT_SYMBOL(scst_unregister_target_template);
{
struct sgv_pool_obj *obj;
int order, pages, cnt;
- struct scatterlist *res;
+ struct scatterlist *res = NULL;
int pages_to_alloc;
struct kmem_cache *cache;
int no_cached = flags & SCST_POOL_ALLOC_NO_CACHED;
TRACE_ENTRY();
- sBUG_ON(size == 0);
+ if (unlikely(size == 0))
+ goto out;
pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
order = get_order(size);
/* Set if new commands initialization is suspended for a while */
#define SCST_FLAG_SUSPENDED 1
-/* Set if a TM command is being performed */
-#define SCST_FLAG_TM_ACTIVE 2
-
/**
** Return codes for cmd state process functions
**/
extern unsigned long scst_max_cmd_mem;
extern mempool_t *scst_mgmt_mempool;
+extern mempool_t *scst_mgmt_stub_mempool;
extern mempool_t *scst_ua_mempool;
extern mempool_t *scst_sense_mempool;
return __scst_check_deferred_commands(tgt_dev);
}
+static inline void scst_make_deferred_commands_active(
+ struct scst_tgt_dev *tgt_dev, struct scst_cmd *curr_cmd)
+{
+ struct scst_cmd *c;
+
+ c = __scst_check_deferred_commands(tgt_dev);
+ if (c != NULL) {
+ TRACE_SN("Adding cmd %p to active cmd list", c);
+
+ EXTRACHECKS_BUG_ON(c->cmd_lists != curr_cmd->cmd_lists);
+
+ spin_lock_irq(&c->cmd_lists->cmd_list_lock);
+ list_add_tail(&c->cmd_list_entry,
+ &c->cmd_lists->active_cmd_list);
+ if (!curr_cmd->context_processable || curr_cmd->long_xmit)
+ wake_up(&c->cmd_lists->cmd_list_waitQ);
+ spin_unlock_irq(&c->cmd_lists->cmd_list_lock);
+ }
+
+ return;
+}
+
void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot);
int scst_check_hq_cmd(struct scst_cmd *cmd);
struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask);
void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd);
-void scst_complete_cmd_mgmt(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd);
+void scst_done_cmd_mgmt(struct scst_cmd *cmd);
/* /proc support */
int scst_proc_init_module(void);
static void scst_cmd_set_sn(struct scst_cmd *cmd);
static int __scst_init_cmd(struct scst_cmd *cmd);
+static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
{
}
/*
- * Must not be called in parallel with scst_unregister_session() for the
+ * Must not be called in parallel with scst_unregister_session_ex() for the
* same sess
*/
struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
TRACE_ENTRY();
- cmd->inc_expected_sn_on_done = !dev->has_own_order_mgmt &&
- (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER);
+ cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
+ (!dev->has_own_order_mgmt &&
+ ((dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) ||
+ (cmd->queue_type == SCST_CMD_QUEUE_ORDERED)));
sBUG_ON(cmd->internal);
struct scst_device *dev = cmd->dev;
int atomic = scst_cmd_atomic(cmd);
int orig_bufflen = cmd->bufflen;
- scst_data_direction orig_data_direction = cmd->data_direction;
TRACE_ENTRY();
cmd->data_direction = cmd->expected_data_direction;
cmd->bufflen = cmd->expected_transfer_len;
#else
- if (unlikely(cmd->data_direction != orig_data_direction)) {
+ if (unlikely(cmd->data_direction != cmd->expected_data_direction)) {
PRINT_ERROR("Expected data direction %d for opcode "
"0x%02x (handler %s, target %s) doesn't match "
"decoded value %d", cmd->data_direction,
cmd->cdb[0], dev->handler->name,
- cmd->tgtt->name, orig_data_direction);
+ cmd->tgtt->name, cmd->expected_data_direction);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_invalid_message));
goto out_dev_done;
}
if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
- PRINT_INFO("Warning: expected transfer length %d for "
- "opcode 0x%02x (handler %s, target %s) doesn't "
- "match decoded value %d. Faulty initiator "
- "(e.g. VMware is known to be such) or "
+ TRACE(TRACE_MINOR, "Warning: expected transfer length "
+ "%d for opcode 0x%02x (handler %s, target %s) "
+ "doesn't match decoded value %d. Faulty "
+ "initiator (e.g. VMware is known to be such) or "
"scst_scsi_op_table should be updated?",
cmd->expected_transfer_len, cmd->cdb[0],
dev->handler->name, cmd->tgtt->name,
cmd->bufflen);
- PRINT_BUFFER("Suspicious CDB", cmd->cdb, cmd->cdb_len);
+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB", cmd->cdb,
+ cmd->cdb_len);
}
#endif
}
- if ((cmd->data_direction == SCST_DATA_UNKNOWN) ||
- ((cmd->bufflen == 0) && (cmd->data_direction != SCST_DATA_NONE))) {
- PRINT_ERROR("Wrong data direction (%d) or/and buffer "
- "length (%d). Opcode 0x%x, handler %s, target %s",
- cmd->data_direction, cmd->bufflen, cmd->cdb[0],
- dev->handler->name, cmd->tgtt->name);
+ if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
+ PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
+ "target %s", cmd->cdb[0], dev->handler->name,
+ cmd->tgtt->name);
goto out_error;
}
if (r > 0)
goto alloc;
else if (r == 0) {
+ if (unlikely(cmd->bufflen == 0)) {
+ /* See comment in scst_alloc_space() */
+ if (cmd->sg == NULL)
+ goto alloc;
+ }
cmd->data_buf_alloced = 1;
if (unlikely(orig_bufflen < cmd->bufflen)) {
PRINT_ERROR("Target driver allocated data "
cmd->bufflen);
goto out_error;
}
- } else
- goto check;
+ TRACE_MEM("%s", "data_buf_alloced, returning");
+ }
+ goto check;
}
alloc:
- if (!cmd->data_buf_alloced) {
- r = scst_alloc_space(cmd);
- } else {
- TRACE_MEM("%s", "data_buf_alloced set, returning");
- }
+ r = scst_alloc_space(cmd);
check:
if (r != 0) {
TRACE_ENTRY();
- TRACE_DBG("Context: %d", context);
+ TRACE_DBG("Context: %x", context);
- switch(context) {
- case SCST_CONTEXT_DIRECT:
+ switch(context & ~SCST_CONTEXT_PROCESSABLE) {
case SCST_CONTEXT_DIRECT_ATOMIC:
+ context &= ~SCST_CONTEXT_PROCESSABLE;
+ /* go through */
+ case SCST_CONTEXT_DIRECT:
if (check_retries)
scst_check_retries(cmd->tgt);
scst_process_active_cmd(cmd, context);
static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
{
+ int context;
+
TRACE_ENTRY();
#ifdef MEASURE_LATENCY
#endif
cmd->state = next_state;
- scst_proccess_redirect_cmd(cmd,
- scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
+ context = scst_optimize_post_exec_context(cmd, scst_get_context());
+ if (cmd->context_processable)
+ context |= SCST_CONTEXT_PROCESSABLE;
+ scst_proccess_redirect_cmd(cmd, context, 0);
TRACE_EXIT();
return;
static int scst_do_send_to_midlev(struct scst_cmd *cmd)
{
int rc = SCST_EXEC_NOT_COMPLETED;
+ struct scst_device *dev = cmd->dev;
+ struct scst_dev_type *handler = dev->handler;
TRACE_ENTRY();
/* Check here to let an out of SN cmd be queued w/o context switch */
- if (scst_cmd_atomic(cmd) && !cmd->dev->handler->exec_atomic) {
+ if (scst_cmd_atomic(cmd) && !handler->exec_atomic) {
TRACE_DBG("Dev handler %s exec() can not be "
"called in atomic context, rescheduling to the thread",
- cmd->dev->handler->name);
+ handler->name);
rc = SCST_EXEC_NEED_THREAD;
goto out;
}
goto out_rc_error;
}
- if (cmd->dev->handler->exec) {
- struct scst_device *dev = cmd->dev;
+ if (!handler->exec_sync)
+ cmd->context_processable = 0;
+
+ if (handler->exec) {
TRACE_DBG("Calling dev handler %s exec(%p)",
- dev->handler->name, cmd);
+ handler->name, cmd);
TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
cmd->scst_cmd_done = scst_cmd_done_local;
- rc = dev->handler->exec(cmd);
+ rc = handler->exec(cmd);
TRACE_DBG("Dev handler %s exec() returned %d",
- dev->handler->name, rc);
+ handler->name, rc);
if (rc == SCST_EXEC_COMPLETED)
goto out;
else if (rc == SCST_EXEC_NEED_THREAD)
TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
- if (unlikely(cmd->dev->scsi_dev == NULL)) {
+ if (unlikely(dev->scsi_dev == NULL)) {
PRINT_ERROR("Command for virtual device must be "
"processed by device handler (lun %Ld)!",
(uint64_t)cmd->lun);
cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
cmd->retries);
#else
- rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
+ rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
cmd->timeout, cmd->retries, cmd, scst_cmd_done,
scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
out_rc_error:
PRINT_ERROR("Dev handler %s exec() or scst_local_exec() returned "
- "invalid code %d", cmd->dev->handler->name, rc);
+ "invalid code %d", handler->name, rc);
/* go through */
out_error:
count = 0;
while(1) {
atomic_t *slot = cmd->sn_slot;
+ /* For HQ commands SN is not set */
int inc_expected_sn = !cmd->inc_expected_sn_on_done &&
cmd->sn_set;
static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
{
- struct scst_cmd *c;
-
if (likely(cmd->sn_set))
scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
- c = scst_check_deferred_commands(cmd->tgt_dev);
- if (c != NULL) {
- unsigned long flags;
- spin_lock_irqsave(&c->cmd_lists->cmd_list_lock, flags);
- TRACE_SN("Adding cmd %p to active cmd list", c);
- list_add_tail(&c->cmd_list_entry,
- &c->cmd_lists->active_cmd_list);
- wake_up(&c->cmd_lists->cmd_list_waitQ);
- spin_unlock_irqrestore(&c->cmd_lists->cmd_list_lock, flags);
- }
+ scst_make_deferred_commands_active(cmd->tgt_dev, cmd);
}
static int scst_dev_done(struct scst_cmd *cmd)
list_del(&cmd->search_cmd_list_entry);
spin_unlock_irq(&cmd->sess->sess_list_lock);
+ cmd->done = 1;
+ smp_mb(); /* to sync with scst_abort_cmd() */
+
if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
scst_xmit_process_aborted_cmd(cmd);
smp_mb(); /* to sync with scst_abort_cmd() */
if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
- unsigned long flags;
-
TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
"scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
atomic_read(&scst_cmd_count));
- spin_lock_irqsave(&scst_mcmd_lock, flags);
- if (cmd->mgmt_cmnd)
- scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
- spin_unlock_irqrestore(&scst_mcmd_lock, flags);
+ scst_finish_cmd_mgmt(cmd);
}
if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
cmd->sn_set = 1;
+
out:
return;
}
EXTRACHECKS_BUG_ON(in_irq());
+ cmd->context_processable = context | SCST_CONTEXT_PROCESSABLE;
+ context &= ~SCST_CONTEXT_PROCESSABLE;
cmd->atomic = (context == SCST_CONTEXT_DIRECT_ATOMIC);
+ TRACE_DBG("cmd %p, context_processable %d, atomic %d", cmd,
+ cmd->context_processable, cmd->atomic);
+
do {
switch (cmd->state) {
case SCST_CMD_STATE_PRE_PARSE:
TRACE_ENTRY();
#ifdef EXTRACHECKS
- WARN_ON((context != SCST_CONTEXT_DIRECT_ATOMIC) &&
- (context != SCST_CONTEXT_DIRECT));
+ {
+ int c = context & ~SCST_CONTEXT_PROCESSABLE;
+ sBUG_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) &&
+ (c != SCST_CONTEXT_DIRECT));
+ }
#endif
while (!list_empty(cmd_list)) {
}
scst_do_job_active(&p_cmd_lists->active_cmd_list,
- &p_cmd_lists->cmd_list_lock, SCST_CONTEXT_DIRECT);
+ &p_cmd_lists->cmd_list_lock, SCST_CONTEXT_DIRECT |
+ SCST_CONTEXT_PROCESSABLE);
}
spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
return res;
}
-/* scst_mcmd_lock supposed to be held and IRQ off */
-void scst_complete_cmd_mgmt(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd)
+/* No locks */
+void scst_done_cmd_mgmt(struct scst_cmd *cmd)
{
+ struct scst_mgmt_cmd_stub *mstb;
+ bool wake = 0;
+ unsigned long flags;
+
TRACE_ENTRY();
- TRACE_MGMT_DBG("cmd %p completed (tag %llu, mcmd %p, "
- "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
- mcmd->cmd_wait_count);
+ TRACE_MGMT_DBG("cmd %p done (tag %llu)", cmd, cmd->tag);
- cmd->mgmt_cmnd = NULL;
+ spin_lock_irqsave(&scst_mcmd_lock, flags);
- if (cmd->completed)
- mcmd->completed_cmd_count++;
+ list_for_each_entry(mstb, &cmd->mgmt_cmd_list,
+ cmd_mgmt_cmd_list_entry) {
+ struct scst_mgmt_cmd *mcmd = mstb->mcmd;
- mcmd->cmd_wait_count--;
- if (mcmd->cmd_wait_count > 0) {
- TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
- mcmd->cmd_wait_count);
- goto out_unlock;
- }
+ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_done_wait_count %d",
+ mcmd, mcmd->cmd_done_wait_count);
- mcmd->state = SCST_MGMT_CMD_STATE_DONE;
+ mcmd->cmd_done_wait_count--;
+ if (mcmd->cmd_done_wait_count > 0) {
+ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
+ "skipping", mcmd->cmd_done_wait_count);
+ continue;
+ }
- if (mcmd->completed) {
- TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list",
- mcmd);
- list_add_tail(&mcmd->mgmt_cmd_list_entry,
- &scst_active_mgmt_cmd_list);
+ if (mcmd->completed) {
+ sBUG_ON(mcmd->nexus_loss_check_done);
+ mcmd->nexus_loss_check_active = 1;
+ mcmd->state = SCST_MGMT_CMD_STATE_CHECK_NEXUS_LOSS;
+ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
+ "list", mcmd);
+ list_add_tail(&mcmd->mgmt_cmd_list_entry,
+ &scst_active_mgmt_cmd_list);
+ wake = 1;
+ }
}
- wake_up(&scst_mgmt_cmd_list_waitQ);
+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
+
+ if (wake)
+ wake_up(&scst_mgmt_cmd_list_waitQ);
-out:
TRACE_EXIT();
return;
+}
-out_unlock:
- spin_unlock_irq(&scst_mcmd_lock);
- goto out;
+/* No locks */
+static void scst_finish_cmd_mgmt(struct scst_cmd *cmd)
+{
+ struct scst_mgmt_cmd_stub *mstb, *t;
+ bool wake = 0;
+ unsigned long flags;
+
+ TRACE_ENTRY();
+
+ TRACE_MGMT_DBG("cmd %p finished (tag %llu)", cmd, cmd->tag);
+
+ spin_lock_irqsave(&scst_mcmd_lock, flags);
+
+ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
+ cmd_mgmt_cmd_list_entry) {
+ struct scst_mgmt_cmd *mcmd = mstb->mcmd;
+
+ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_finish_wait_count %d",
+ mcmd, mcmd->cmd_finish_wait_count);
+
+ list_del(&mstb->cmd_mgmt_cmd_list_entry);
+ mempool_free(mstb, scst_mgmt_stub_mempool);
+
+ if (cmd->completed)
+ mcmd->completed_cmd_count++;
+
+ mcmd->cmd_finish_wait_count--;
+ if (mcmd->cmd_finish_wait_count > 0) {
+ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
+ "skipping", mcmd->cmd_finish_wait_count);
+ continue;
+ }
+
+ if (mcmd->completed && !mcmd->nexus_loss_check_active) {
+ mcmd->state = SCST_MGMT_CMD_STATE_DONE;
+ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
+ "list", mcmd);
+ list_add_tail(&mcmd->mgmt_cmd_list_entry,
+ &scst_active_mgmt_cmd_list);
+ wake = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
+
+ if (wake)
+ wake_up(&scst_mgmt_cmd_list_waitQ);
+
+ TRACE_EXIT();
+ return;
}
static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
}
set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
- /* To sync with cmd->finished set in scst_finish_cmd() */
+ /*
+ * To sync with cmd->finished/done set in
+ * scst_finish_cmd()/scst_pre_xmit_response()
+ */
smp_mb__after_set_bit();
if (cmd->tgt_dev == NULL) {
spin_lock_irqsave(&scst_mcmd_lock, flags);
if ((mcmd != NULL) && !cmd->finished) {
+ struct scst_mgmt_cmd_stub *mstb;
+
+ mstb = mempool_alloc(scst_mgmt_stub_mempool, GFP_ATOMIC);
+ if (mstb == NULL) {
+ PRINT_CRIT_ERROR("Allocation of management command "
+ "stub failed (mcmd %p, cmd %p)", mcmd, cmd);
+ goto unlock;
+ }
+ mstb->mcmd = mcmd;
+
+ /*
+ * cmd can't die here or sess_list_lock already taken and
+ * cmd is in the search list
+ */
+ list_add_tail(&mstb->cmd_mgmt_cmd_list_entry,
+ &cmd->mgmt_cmd_list);
+
/*
* Delay the response until the command's finish in
* order to guarantee that "no further responses from
* we must wait here to be sure that we won't receive
* double commands with the same tag.
*/
- TRACE_MGMT_DBG("cmd %p (tag %llu) being executed/xmitted "
- "(state %d), deferring ABORT...", cmd, cmd->tag,
- cmd->state);
-#ifdef EXTRACHECKS
- if (cmd->mgmt_cmnd) {
- printk(KERN_ALERT "cmd %p (tag %llu, state %d) "
- "has non-NULL mgmt_cmnd %p!!! Current "
- "mcmd %p\n", cmd, cmd->tag, cmd->state,
- cmd->mgmt_cmnd, mcmd);
- }
-#endif
- sBUG_ON(cmd->mgmt_cmnd);
+ TRACE_MGMT_DBG("cmd %p (tag %llu) being executed/"
+ "xmitted (state %d), deferring ABORT...",
+ cmd, cmd->tag, cmd->state);
- mcmd->cmd_wait_count++;
+ mcmd->cmd_finish_wait_count++;
- /*
- * cmd can't die here or sess_list_lock already taken and cmd is
- * in the search list
- */
- cmd->mgmt_cmnd = mcmd;
+ if (!cmd->done) {
+ TRACE_MGMT_DBG("cmd %p (tag %llu) not done yet",
+ cmd, cmd->tag);
+ mcmd->cmd_done_wait_count++;
+ }
}
+unlock:
spin_unlock_irqrestore(&scst_mcmd_lock, flags);
tm_dbg_release_cmd(cmd);
static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
{
int res;
+
spin_lock_irq(&scst_mcmd_lock);
- if (mcmd->cmd_wait_count != 0) {
- TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
- "wait", mcmd->cmd_wait_count);
+
+ if (mcmd->cmd_finish_wait_count == 0) {
+ if (!mcmd->nexus_loss_check_done)
+ mcmd->state = SCST_MGMT_CMD_STATE_CHECK_NEXUS_LOSS;
+ else
+ mcmd->state = SCST_MGMT_CMD_STATE_DONE;
+ res = 0;
+ } else if ((mcmd->cmd_done_wait_count == 0) &&
+ (!mcmd->nexus_loss_check_done)) {
+ mcmd->state = SCST_MGMT_CMD_STATE_CHECK_NEXUS_LOSS;
+ res = 0;
+ goto out_unlock;
+ } else {
+ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, preparing to "
+ "wait", mcmd->cmd_finish_wait_count);
mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
res = -1;
- } else {
- mcmd->state = SCST_MGMT_CMD_STATE_DONE;
- res = 0;
}
+
+ mcmd->nexus_loss_check_active = 0;
mcmd->completed = 1;
+
+out_unlock:
spin_unlock_irq(&scst_mcmd_lock);
return res;
}
}
static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
- struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
+ struct scst_tgt_dev *tgt_dev, int other_ini)
{
struct scst_cmd *cmd;
struct scst_session *sess = tgt_dev->sess;
}
spin_unlock_irq(&sess->sess_list_lock);
- scst_unblock_aborted_cmds(scst_mutex_held);
-
TRACE_EXIT();
return;
}
__scst_block_dev(dev);
spin_unlock_bh(&dev->dev_lock);
- __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
+ __scst_abort_task_set(mcmd, tgt_dev, 0);
+
+ scst_unblock_aborted_cmds(0);
+
scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
res = scst_set_mcmd_next_state(mcmd);
__scst_block_dev(dev);
spin_unlock_bh(&dev->dev_lock);
- __scst_abort_task_set(mcmd, mcmd->mcmd_tgt_dev, 0, 0);
+ __scst_abort_task_set(mcmd, mcmd->mcmd_tgt_dev, 0);
mutex_lock(&scst_mutex);
&UA_tgt_devs);
}
- mutex_unlock(&scst_mutex);
+ scst_unblock_aborted_cmds(1);
- scst_unblock_aborted_cmds(0);
+ mutex_unlock(&scst_mutex);
if (!dev->tas) {
list_for_each_entry(tgt_dev, &UA_tgt_devs, extra_tgt_dev_list_entry) {
return res;
}
-static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
-{
- if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags) && !mcmd->active) {
- TRACE_MGMT_DBG("Adding mgmt cmd %p to delayed mgmt cmd list",
- mcmd);
- spin_lock_irq(&scst_mcmd_lock);
- list_add_tail(&mcmd->mgmt_cmd_list_entry,
- &scst_delayed_mgmt_cmd_list);
- spin_unlock_irq(&scst_mcmd_lock);
- return -1;
- } else {
- mcmd->active = 1;
- set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
- return 0;
- }
-}
-
/* Returns 0 if the command processing should be continued,
* >0, if it should be requeued, <0 otherwise */
static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
TRACE_ENTRY();
- res = scst_check_delay_mgmt_cmd(mcmd);
- if (res != 0)
- goto out;
-
mcmd->state = SCST_MGMT_CMD_STATE_READY;
switch (mcmd->fn) {
tm_dbg_task_mgmt(dev, "TARGET RESET", 0);
}
+ scst_unblock_aborted_cmds(1);
+
/*
* We suppose here that for all commands that already on devices
* on/after scsi_reset_provider() completion callbacks will be called.
dev->scsi_dev->was_reset = 0;
}
+ scst_unblock_aborted_cmds(0);
+
out_tm_dbg:
tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "LUN RESET", 0);
return res;
}
+/* scst_mutex supposed to be held */
+static void scst_do_nexus_loss_sess(struct scst_mgmt_cmd *mcmd)
+{
+ int i;
+ struct scst_session *sess = mcmd->sess;
+ struct scst_tgt_dev *tgt_dev;
+
+ TRACE_ENTRY();
+
+ for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
+ struct list_head *sess_tgt_dev_list_head =
+ &sess->sess_tgt_dev_list_hash[i];
+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
+ sess_tgt_dev_list_entry) {
+ scst_nexus_loss(tgt_dev);
+ }
+ }
+
+ TRACE_EXIT();
+ return;
+}
+
/* Returns 0 if the command processing should be continued, <0 otherwise */
static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
int nexus_loss)
mcmd);
}
- mcmd->needs_unblocking = 1;
+ if (mcmd->fn != SCST_UNREG_SESS_TM)
+ mcmd->needs_unblocking = 1;
mutex_lock(&scst_mutex);
+
for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
struct list_head *sess_tgt_dev_list_head =
&sess->sess_tgt_dev_list_hash[i];
sess_tgt_dev_list_entry) {
struct scst_device *dev = tgt_dev->dev;
int rc;
+
+ if (mcmd->fn != SCST_UNREG_SESS_TM) {
+ spin_lock_bh(&dev->dev_lock);
+ __scst_block_dev(dev);
+ spin_unlock_bh(&dev->dev_lock);
+ }
- spin_lock_bh(&dev->dev_lock);
- __scst_block_dev(dev);
- spin_unlock_bh(&dev->dev_lock);
-
- __scst_abort_task_set(mcmd, tgt_dev, 0, 1);
- if (nexus_loss)
- scst_nexus_loss(tgt_dev);
+ __scst_abort_task_set(mcmd, tgt_dev, 0);
rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
if ((rc < 0) && (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
mcmd->status = rc;
}
}
+
+ scst_unblock_aborted_cmds(1);
+
mutex_unlock(&scst_mutex);
res = scst_set_mcmd_next_state(mcmd);
return res;
}
-/* Returns 0 if the command processing should be continued, <0 otherwise */
+/* scst_mutex supposed to be held */
+static void scst_do_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd)
+{
+ int i;
+ struct scst_tgt *tgt = mcmd->sess->tgt;
+ struct scst_session *sess;
+
+ TRACE_ENTRY();
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
+ struct list_head *sess_tgt_dev_list_head =
+ &sess->sess_tgt_dev_list_hash[i];
+ struct scst_tgt_dev *tgt_dev;
+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
+ sess_tgt_dev_list_entry) {
+ scst_nexus_loss(tgt_dev);
+ }
+ }
+ }
+
+ TRACE_EXIT();
+ return;
+}
+
static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
int nexus_loss)
{
sess_tgt_dev_list_entry) {
int rc;
- __scst_abort_task_set(mcmd, tgt_dev, 0, 1);
+ __scst_abort_task_set(mcmd, tgt_dev, 0);
if (nexus_loss)
scst_nexus_loss(tgt_dev);
}
}
+ scst_unblock_aborted_cmds(1);
+
mutex_unlock(&scst_mutex);
res = scst_set_mcmd_next_state(mcmd);
break;
case SCST_NEXUS_LOSS_SESS:
+ case SCST_UNREG_SESS_TM:
res = scst_abort_all_nexus_loss_sess(mcmd, 1);
break;
goto out;
}
+static int scst_mgmt_cmd_check_nexus_loss(struct scst_mgmt_cmd *mcmd)
+{
+ int res;
+
+ TRACE_ENTRY();
+
+ mutex_lock(&scst_mutex);
+
+ switch (mcmd->fn) {
+ case SCST_NEXUS_LOSS_SESS:
+ case SCST_UNREG_SESS_TM:
+ scst_do_nexus_loss_sess(mcmd);
+ break;
+
+ case SCST_NEXUS_LOSS:
+ scst_do_nexus_loss_tgt(mcmd);
+ break;
+ }
+
+ mutex_unlock(&scst_mutex);
+
+ if ((mcmd->fn == SCST_UNREG_SESS_TM) &&
+ (mcmd->sess->unreg_cmds_done_fn != NULL)) {
+ struct scst_session *sess = mcmd->sess;
+ TRACE_MGMT_DBG("Calling unreg_cmds_done_fn(%p)", sess);
+ sess->unreg_cmds_done_fn(sess);
+ TRACE_MGMT_DBG("task_mgmt_all_cmds_done(%p) returned", sess);
+ }
+
+ mcmd->nexus_loss_check_done = 1;
+
+ res = scst_set_mcmd_next_state(mcmd);
+
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
{
struct scst_device *dev;
TRACE_ENTRY();
- clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
- spin_lock_irq(&scst_mcmd_lock);
- if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
- struct scst_mgmt_cmd *m;
- m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
- mgmt_cmd_list_entry);
- TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
- "mgmt cmd list", m);
- list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
- }
- spin_unlock_irq(&scst_mcmd_lock);
-
mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
TRACE(TRACE_MGMT_MINOR, "TM command fn %d finished, status %x",
mcmd->fn, mcmd->status);
- if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
+ if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done &&
+ (mcmd->fn != SCST_UNREG_SESS_TM)) {
TRACE_DBG("Calling target %s task_mgmt_fn_done()",
mcmd->sess->tgt->tgtt->name);
mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
goto out;
break;
+ case SCST_MGMT_CMD_STATE_CHECK_NEXUS_LOSS:
+ if (scst_mgmt_cmd_check_nexus_loss(mcmd))
+ goto out;
+ break;
+
case SCST_MGMT_CMD_STATE_DONE:
scst_mgmt_cmd_send_done(mcmd);
break;
}
mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
- if (mcmd == NULL)
+ if (mcmd == NULL) {
+ PRINT_CRIT_ERROR("Lost TM fn %x, initiator %s", fn,
+ sess->initiator_name);
goto out;
+ }
mcmd->sess = sess;
mcmd->fn = fn;
scst_sess_get(sess);
+ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
+ PRINT_ERROR("New mgmt cmd while shutting down the session %p "
+ "shut_phase %ld", sess, sess->shut_phase);
+ sBUG();
+ }
+
local_irq_save(flags);
spin_lock(&sess->sess_list_lock);
atomic_inc(&sess->sess_cmd_count);
-#ifdef EXTRACHECKS
- if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
- PRINT_ERROR("%s",
- "New mgmt cmd while shutting down the session");
- sBUG();
- }
-#endif
-
if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
switch(sess->init_phase) {
case SCST_SESS_IPH_INITING:
}
/*
- * Must not be called in parallel with scst_unregister_session() for the
+ * Must not be called in parallel with scst_unregister_session_ex() for the
* same sess
*/
int scst_rx_mgmt_fn(struct scst_session *sess,
"TM fn %x", params->fn);
TRACE_MGMT_DBG("sess=%p, tag_set %d, tag %Ld, lun_set %d, "
- "lun=%Ld, cmd_sn_set %d, cmd_sn %d", sess,
+ "lun=%Ld, cmd_sn_set %d, cmd_sn %d, priv %p", sess,
params->tag_set, params->tag, params->lun_set,
- (uint64_t)mcmd->lun, params->cmd_sn_set, params->cmd_sn);
+ (uint64_t)mcmd->lun, params->cmd_sn_set, params->cmd_sn,
+ params->tgt_priv);
if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
goto out_free;
* Must not be called in parallel with scst_rx_cmd() or
* scst_rx_mgmt_fn_*() for the same sess
*/
-void scst_unregister_session(struct scst_session *sess, int wait,
- void (*unreg_done_fn) (struct scst_session *sess))
+void scst_unregister_session_ex(struct scst_session *sess, int wait,
+ void (*unreg_done_fn) (struct scst_session *sess),
+ void (*unreg_cmds_done_fn) (struct scst_session *sess))
{
unsigned long flags;
struct completion *pc;
#ifndef CONFIG_LOCKDEP
DECLARE_COMPLETION(c);
#endif
+ int rc, lun;
TRACE_ENTRY();
pc = &c;
#endif
+ sess->unreg_done_fn = unreg_done_fn;
+ sess->unreg_cmds_done_fn = unreg_cmds_done_fn;
+
+ /* Abort all outstanding commands and clear reservation, if necessary */
+ lun = 0;
+ rc = scst_rx_mgmt_fn_lun(sess, SCST_UNREG_SESS_TM,
+ (uint8_t*)&lun, sizeof(lun), SCST_ATOMIC, NULL);
+ if (rc != 0) {
+ PRINT_ERROR("SCST_UNREG_SESS_TM failed %d (sess %p)",
+ rc, sess);
+ }
+
sess->shut_phase = SCST_SESS_SPH_PRE_UNREG;
spin_lock_irqsave(&scst_mgmt_lock, flags);
- sess->unreg_done_fn = unreg_done_fn;
if (wait)
sess->shutdown_compl = pc;
#ifdef CONFIG_LOCKDEP