"Default_iqn.2007-05.com.example:storage.disk1.sys1.xyz", and add there
all necessary LUNs. Check SCST README file for details.
+If under high load you experience I/O stalls or see in the kernel log
+abort or reset messages then try to reduce QueuedCommands parameter in
+iscsi-scstd.conf file for the corresponding target. Particularly, it is
+known that the default value 32 sometimes too high if you do intensive
+writes from VMware on a target disk, which use LVM in the snapshot mode.
+In this case value like 16 or even 10 depending of your backstorage
+speed could be more appropriate.
+
Compilation options
-------------------
char initiator_name[ISCSI_NAME_LEN];
char user_name[ISCSI_NAME_LEN];
u32 exp_cmd_sn;
- u32 max_cmd_sn;
};
#define DIGEST_ALL (DIGEST_NONE | DIGEST_CRC32C)
return -ENOENT;
info.exp_cmd_sn = session->exp_cmd_sn;
- info.max_cmd_sn = session->max_cmd_sn;
if (copy_to_user((void *) ptr, &info, sizeof(info)))
return -EFAULT;
static void cmnd_remove_hash(struct iscsi_cmnd *cmnd);
static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status);
static void cmnd_prepare_skip_pdu(struct iscsi_cmnd *cmnd);
+static void iscsi_cond_send_tm_resp(struct iscsi_cmnd *rsp, int force);
static inline u32 cmnd_write_size(struct iscsi_cmnd *cmnd)
{
if (parent == NULL) {
conn_get(conn);
+
#ifdef NET_PAGE_CALLBACKS_DEFINED
atomic_set(&cmnd->net_ref_cnt, 0);
#endif
#endif
}
+ if (cmnd->dec_active_cmnds) {
+ struct iscsi_session *sess = cmnd->conn->session;
+ TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
+ "new value %d)", cmnd, sess,
+ atomic_read(&sess->active_cmds)-1);
+ atomic_dec(&sess->active_cmds);
+ }
+
cmnd_free(cmnd);
return;
}
if (req->hashed)
cmnd_remove_hash(req);
+ if (req->dec_active_cmnds) {
+ struct iscsi_session *sess = req->conn->session;
+ TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
+ "new value %d)", req, sess,
+ atomic_read(&sess->active_cmds)-1);
+ atomic_dec(&sess->active_cmds);
+ req->dec_active_cmnds = 0;
+ }
+
cmnd_put(req);
TRACE_EXIT();
req->pdu.bhs.opcode = ISCSI_OP_PDU_REJECT;
}
+static inline int iscsi_get_allowed_cmds(struct iscsi_session *sess)
+{
+ int res = max(-1, (int)sess->max_queued_cmnds -
+ atomic_read(&sess->active_cmds)-1);
+ TRACE_DBG("allowed cmds %d (sess %p, active_cmds %d)", res,
+ sess, atomic_read(&sess->active_cmds));
+ return res;
+}
+
static u32 cmnd_set_sn(struct iscsi_cmnd *cmnd, int set_stat_sn)
{
struct iscsi_conn *conn = cmnd->conn;
if (set_stat_sn)
cmnd->pdu.bhs.sn = cpu_to_be32(conn->stat_sn++);
cmnd->pdu.bhs.exp_sn = cpu_to_be32(sess->exp_cmd_sn);
- cmnd->pdu.bhs.max_sn = cpu_to_be32(sess->exp_cmd_sn + sess->max_queued_cmnds);
+ cmnd->pdu.bhs.max_sn = cpu_to_be32(sess->exp_cmd_sn +
+ iscsi_get_allowed_cmds(sess));
res = cpu_to_be32(conn->stat_sn);
TRACE_DBG("%p:%x", cmnd, itt);
if (itt == ISCSI_RESERVED_TAG) {
+ PRINT_ERROR("%s", "ITT is RESERVED_TAG");
err = -ISCSI_REASON_PROTOCOL_ERROR;
goto out;
}
TRACE_DBG("scsi command: %02x", req_hdr->scb[0]);
+ TRACE_DBG("Incrementing active_cmds (cmd %p, sess %p, "
+ "new value %d)", req, session,
+ atomic_read(&session->active_cmds)+1);
+ atomic_inc(&session->active_cmds);
+ req->dec_active_cmnds = 1;
+
scst_cmd = scst_rx_cmd(session->scst_sess,
(uint8_t*)&req_hdr->lun, sizeof(req_hdr->lun),
req_hdr->scb, sizeof(req_hdr->scb), SCST_NON_ATOMIC);
memset(¶ms, 0, sizeof(params));
params.atomic = SCST_NON_ATOMIC;
params.tgt_priv = req;
-
+
+ if (conn->session->tm_rsp != NULL) {
+ struct iscsi_task_rsp_hdr *rsp_hdr =
+ (struct iscsi_task_rsp_hdr *)&conn->session->tm_rsp->pdu.bhs;
+ rsp_hdr->response = ISCSI_RESPONSE_FUNCTION_REJECTED;
+ iscsi_cond_send_tm_resp(conn->session->tm_rsp, 1);
+ }
+
if ((function != ISCSI_FUNCTION_ABORT_TASK) &&
(req_hdr->rtt != ISCSI_RESERVED_TAG)) {
PRINT_ERROR("Invalid RTT %x (TM fn %x)", req_hdr->rtt,
spin_unlock(&session->sn_lock);
+ if (unlikely(session->tm_rsp != NULL))
+ iscsi_cond_send_tm_resp(session->tm_rsp, 0);
+
iscsi_cmnd_exec(cmnd);
if (list_empty(&session->pending_list))
session->exp_cmd_sn);
}
- if (after(cmd_sn, session->exp_cmd_sn + session->max_queued_cmnds)) {
- PRINT_ERROR("too large cmd_sn (%u,%u)", cmd_sn,
- session->exp_cmd_sn);
+ if (after(cmd_sn, session->exp_cmd_sn + iscsi_get_allowed_cmds(session))) {
+ PRINT_ERROR("too large cmd_sn %u (exp_cmd_sn %u, "
+ "max_sn %u)", cmd_sn, session->exp_cmd_sn,
+ iscsi_get_allowed_cmds(session));
}
spin_unlock(&session->sn_lock);
void cmnd_rx_end(struct iscsi_cmnd *cmnd)
{
- if (unlikely(cmnd->tmfabort)) {
- TRACE_MGMT_DBG("cmnd %p (scst_cmd %p) aborted", cmnd,
- cmnd->scst_cmd);
- req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
- return;
- }
-
TRACE_DBG("%p:%x", cmnd, cmnd_opcode(cmnd));
+
switch (cmnd_opcode(cmnd)) {
case ISCSI_OP_SCSI_REJECT:
case ISCSI_OP_NOOP_OUT:
req_cmnd_release(cmnd);
break;
}
+ return;
}
#ifndef NET_PAGE_CALLBACKS_DEFINED
return SCST_TGT_RES_SUCCESS;
}
+static void iscsi_cond_send_tm_resp(struct iscsi_cmnd *rsp, int force)
+{
+ struct iscsi_task_mgt_hdr *req_hdr =
+ (struct iscsi_task_mgt_hdr *)&rsp->parent_req->pdu.bhs;
+ int function = req_hdr->function & ISCSI_FUNCTION_MASK;
+ struct iscsi_session *sess = rsp->conn->session;
+
+ TRACE_ENTRY();
+
+ if (!force) {
+ spin_lock(&sess->sn_lock);
+ switch(function) {
+ case ISCSI_FUNCTION_ABORT_TASK_SET:
+ case ISCSI_FUNCTION_CLEAR_TASK_SET:
+ case ISCSI_FUNCTION_CLEAR_ACA:
+ if (after(req_hdr->cmd_sn, sess->exp_cmd_sn)) {
+ TRACE_MGMT_DBG("Delaying TM fn %x response, "
+ "because not all affected commands "
+ "received (rsp %p, cmd sn %x, exp sn "
+ "%x)", function, rsp, req_hdr->cmd_sn,
+ sess->exp_cmd_sn);
+ sess->tm_rsp = rsp;
+ spin_unlock(&sess->sn_lock);
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&sess->sn_lock);
+ }
+
+ if (rsp == sess->tm_rsp) {
+ TRACE_MGMT_DBG("Sending delayed rsp %p (fn %x)", rsp, function);
+ sess->tm_rsp = NULL;
+ }
+ iscsi_cmnd_init_write(rsp,
+ ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
+
+out:
+ TRACE_EXIT();
+ return;
+}
+
static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
{
struct iscsi_cmnd *rsp;
(struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
struct iscsi_task_rsp_hdr *rsp_hdr;
+ TRACE_ENTRY();
+
TRACE((req_hdr->function == ISCSI_FUNCTION_ABORT_TASK) ?
TRACE_MGMT_MINOR : TRACE_MGMT,
"TM req %p finished, status %d", req, status);
ISCSI_FUNCTION_TARGET_COLD_RESET)
rsp->should_close_conn = 1;
- iscsi_cmnd_init_write(rsp,
- ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
+ iscsi_cond_send_tm_resp(rsp, 0);
+
req_cmnd_release(req);
+
+ TRACE_EXIT();
+ return;
}
static inline int iscsi_get_mgmt_response(int status)
struct list_head pending_list;
u32 next_ttt;
- /* Both unprotected, since read-only */
- u32 max_queued_cmnds;
- u32 max_cmd_sn; /* Not used ??, ToDo */
+ u32 max_queued_cmnds; /* unprotected, since read-only */
+ atomic_t active_cmds;
spinlock_t sn_lock;
u32 exp_cmd_sn; /* protected by sn_lock */
+ struct iscsi_cmnd *tm_rsp;
+
/* read only, if there are connection(s) */
struct iscsi_sess_param sess_param;
unsigned int write_processing_started:1;
unsigned int data_waiting:1;
unsigned int force_cleanup_done:1;
+ unsigned int dec_active_cmnds:1;
#ifdef EXTRACHECKS
unsigned int release_called:1;
#endif
memcpy(&session->sess_param, &target->trgt_sess_param,
sizeof(session->sess_param));
session->max_queued_cmnds = target->trgt_param.queued_cmnds;
+ atomic_set(&session->active_cmds, 0);
session->exp_cmd_sn = info->exp_cmd_sn;
- session->max_cmd_sn = info->max_cmd_sn;
session->initiator_name = kstrdup(info->initiator_name, GFP_KERNEL);
if (!session->initiator_name) {
(unsigned long long)session->sid);
sBUG_ON(!list_empty(&session->conn_list));
+ if (unlikely(atomic_read(&session->active_cmds) != 0)) {
+ PRINT_ERROR("active_cmds not 0 (%d)!!",
+ atomic_read(&session->active_cmds));
+ sBUG();
+ }
for (i = 0; i < ARRAY_SIZE(session->cmnd_hash); i++)
sBUG_ON(!list_empty(&session->cmnd_hash[i]));
}
static int iscsi_session_create(u32 tid, u64 sid, u32 exp_cmd_sn,
- u32 max_cmd_sn, char *name, char *user)
+ char *name, char *user)
{
struct session_info info;
info.tid = tid;
info.sid = sid;
info.exp_cmd_sn = exp_cmd_sn;
- info.max_cmd_sn = max_cmd_sn;
strncpy(info.initiator_name, name, sizeof(info.initiator_name) - 1);
strncpy(info.user_name, user, sizeof(info.user_name) - 1);
if (from.ss_family == AF_INET) {
struct sockaddr_in *in = (struct sockaddr_in *)&from;
- log_info("Connect from %s:%hd", inet_ntoa(in->sin_addr),
+ log_info("Connect from %s:%hu", inet_ntoa(in->sin_addr),
ntohs(in->sin_port));
} else if (from.ss_family == AF_INET6) {
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&from;
- log_info("Connect from %x:%x:%x:%x:%x:%x:%x:%x.%hd",
+ log_info("Connect from %x:%x:%x:%x:%x:%x:%x:%x.%hu",
in6->sin6_addr.s6_addr16[7], in6->sin6_addr.s6_addr16[6],
in6->sin6_addr.s6_addr16[5], in6->sin6_addr.s6_addr16[4],
in6->sin6_addr.s6_addr16[3], in6->sin6_addr.s6_addr16[2],
rsp->sid = conn->sid;
rsp->stat_sn = cpu_to_be32(conn->stat_sn++);
rsp->exp_cmd_sn = cpu_to_be32(conn->exp_cmd_sn);
- conn->max_cmd_sn = conn->exp_cmd_sn + 1;
- rsp->max_cmd_sn = cpu_to_be32(conn->max_cmd_sn);
+ rsp->max_cmd_sn = cpu_to_be32(conn->exp_cmd_sn + 1);
return;
init_err:
rsp->flags = 0;
rsp->stat_sn = cpu_to_be32(conn->stat_sn++);
rsp->exp_cmd_sn = cpu_to_be32(conn->exp_cmd_sn);
- conn->max_cmd_sn = conn->exp_cmd_sn + 1;
- rsp->max_cmd_sn = cpu_to_be32(conn->max_cmd_sn);
+ rsp->max_cmd_sn = cpu_to_be32(conn->exp_cmd_sn + 1);
}
static void cmnd_exec_logout(struct connection *conn)
rsp->stat_sn = cpu_to_be32(conn->stat_sn++);
rsp->exp_cmd_sn = cpu_to_be32(conn->exp_cmd_sn);
- conn->max_cmd_sn = conn->exp_cmd_sn + 1;
- rsp->max_cmd_sn = cpu_to_be32(conn->max_cmd_sn);
+ rsp->max_cmd_sn = cpu_to_be32(conn->exp_cmd_sn + 1);
}
int cmnd_execute(struct connection *conn)
u32 cmd_sn;
u32 exp_cmd_sn;
- u32 max_cmd_sn;
struct PDU req;
void *req_buffer;
int (*param_set) (u32, u64, int, u32, struct iscsi_param *, int);
int (*target_create) (u32 *, char *);
int (*target_destroy) (u32);
- int (*session_create) (u32, u64, u32, u32, char *, char *);
+ int (*session_create) (u32, u64, u32, char *, char *);
int (*session_destroy) (u32, u64);
int (*conn_create) (u32, u64, u32, u32, u32, int, u32, u32);
int (*conn_destroy) (u32 tid, u64 sid, u32 cid);
user = "";
ki->session_create(conn->tid, session->sid.id64, conn->exp_cmd_sn,
- conn->max_cmd_sn, session->initiator, user);
+ session->initiator, user);
ki->param_set(conn->tid, session->sid.id64, key_session, 0,
conn->session_param, 0);
}
- Support for per-target default security groups added.
- - Updateed to work on 2.6.22.x kernels.
+ - Updated to work on 2.6.22.x kernels.
- Updated to work with SCST 0.9.6.
and eases CPU load, but could create a security hole (information
leakage), so enable it, if you have strict security requirements.
+ - ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING - if defined, in case
+ when TASK MANAGEMENT function ABORT TASK is trying to abort a
+ command, which has already finished, remote initiator, which sent the
+ ABORT TASK request, will receive TASK NOT EXIST (or ABORT FAILED)
+ response for the ABORT TASK request. This is more logical response,
+ since, because the command finished, attempt to abort it failed, but
+ some initiators, particularly VMware iSCSI initiator, consider TASK
+ NOT EXIST response as if the target got crazy and try to RESET it.
+ Then sometimes get crazy itself. So, this option is disabled by
+ default.
+
HIGHMEM kernel configurations are fully supported, but not recommended
for performance reasons, except for scst_user, where they are not
supported, because this module deals with user supplied memory on a
int scst_tape_generic_dev_done(struct scst_cmd *cmd,
void (*set_block_size)(struct scst_cmd *cmd, int block_size));
+/*
+ * Issues a MODE SENSE for control mode page data and sets the corresponding
+ * dev's parameter from it. Returns 0 on success and not 0 otherwise.
+ */
+int scst_obtain_device_parameters(struct scst_device *dev);
+
#endif /* __SCST_H */
#EXTRA_CFLAGS += -DUSE_EXPECTED_VALUES
#EXTRA_CFLAGS += -DALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
+#EXTRA_CFLAGS += -DABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
#EXTRA_CFLAGS += -fno-inline
goto out_free_buf;
}
+ res = scst_obtain_device_parameters(dev);
+ if (res != 0)
+ goto out_free_buf;
+
out_free_buf:
kfree(buffer);
CHANGER_RETRIES);
TRACE_DBG("TEST_UNIT_READY done: %x", res);
} while ((--retries > 0) && res);
- if (res)
+ if (res) {
res = -ENODEV;
+ goto out;
+ }
+
+ res = scst_obtain_device_parameters(dev);
+ if (res != 0)
+ goto out;
out:
TRACE_EXIT_HRES(res);
goto out_free_buf;
}
+ res = scst_obtain_device_parameters(dev);
+ if (res != 0)
+ goto out_free_buf;
+
out_free_buf:
kfree(buffer);
goto out_free_buf;
}
+ res = scst_obtain_device_parameters(dev);
+ if (res != 0)
+ goto out_free_buf;
+
out_free_buf:
kfree(buffer);
PROCESSOR_RETRIES);
TRACE_DBG("TEST_UNIT_READY done: %x", res);
} while ((--retries > 0) && res);
- if (res)
+ if (res) {
res = -ENODEV;
+ goto out;
+ }
+
+ res = scst_obtain_device_parameters(dev);
+ if (res != 0)
+ goto out;
out:
TRACE_EXIT();
RAID_RETRIES);
TRACE_DBG("TEST_UNIT_READY done: %x", res);
} while ((--retries > 0) && res);
- if (res)
+ if (res) {
res = -ENODEV;
+ goto out;
+ }
+
+ res = scst_obtain_device_parameters(dev);
+ if (res != 0)
+ goto out;
out:
TRACE_EXIT();
goto out_free_buf;
}
+ res = scst_obtain_device_parameters(dev);
+ if (res != 0)
+ goto out_free_buf;
+
out_free_buf:
kfree(buffer);
#include "scst_cdbprobe.h"
static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
-int scst_check_internal_sense(struct scst_device *dev, int result,
+static void scst_check_internal_sense(struct scst_device *dev, int result,
uint8_t *sense, int sense_len);
void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
init_waitqueue_head(&dev->on_dev_waitQ);
dev->dev_double_ua_possible = 1;
dev->dev_serialized = 1;
+ dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
dev->dev_num = dev_num++;
*out_dev = dev;
PRINT_ERROR("RELEASE failed: %d", rc);
TRACE_BUFFER("RELEASE sense", sense,
SCST_SENSE_BUFFERSIZE);
- if (scst_check_internal_sense(tgt_dev->dev, rc,
- sense, SCST_SENSE_BUFFERSIZE) != 0)
- break;
+ scst_check_internal_sense(tgt_dev->dev, rc,
+ sense, SCST_SENSE_BUFFERSIZE);
}
}
return res;
}
-int scst_check_internal_sense(struct scst_device *dev, int result,
+static void scst_check_internal_sense(struct scst_device *dev, int result,
uint8_t *sense, int sense_len)
{
TRACE_ENTRY();
if (host_byte(result) == DID_RESET) {
+ TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
+ "reset UA");
scst_set_sense(sense, sense_len,
SCST_LOAD_SENSE(scst_sense_reset_UA));
scst_dev_check_set_UA(dev, NULL, sense, sense_len);
- } else if (SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
+ } else if ((status_byte(result) == CHECK_CONDITION) &&
+ SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
scst_dev_check_set_UA(dev, NULL, sense, sense_len);
TRACE_EXIT();
- return 0;
+ return;
}
int scst_obtain_device_parameters(struct scst_device *dev)
goto out;
} else {
- PRINT_ERROR("Internal MODE_SENSE failed: %d", res);
- TRACE_BUFFER("MODE_SENSE sense", sense_buffer,
- sizeof(sense_buffer));
- if (scst_check_internal_sense(dev, res, sense_buffer,
- sizeof(sense_buffer)) != 0)
- break;
+ TRACE(TRACE_MGMT_MINOR, "Internal MODE SENSE to device "
+ "%d:%d:%d:%d failed: %x", dev->scsi_dev->host->host_no,
+ dev->scsi_dev->channel, dev->scsi_dev->id,
+ dev->scsi_dev->lun, res);
+ TRACE_BUFF_FLAG(TRACE_MGMT_MINOR, "MODE SENSE sense",
+ sense_buffer, sizeof(sense_buffer));
+ if ((status_byte(res) == CHECK_CONDITION) &&
+ SCST_SENSE_VALID(sense_buffer) &&
+ (sense_buffer[2] == ILLEGAL_REQUEST)) {
+ res = 0;
+ goto out;
+ }
+ scst_check_internal_sense(dev, res, sense_buffer,
+ sizeof(sense_buffer));
}
}
res = -ENODEV;
list_add_tail(&dev->dev_list_entry, &scst_dev_list);
- res = scst_obtain_device_parameters(dev);
- if (res != 0)
- goto out_free;
-
list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
if (dt->type == scsidp->type) {
res = scst_assign_dev_handler(dev, dt);
EXPORT_SYMBOL(scst_get_cdb_info);
EXPORT_SYMBOL(scst_cmd_get_tgt_priv_lock);
EXPORT_SYMBOL(scst_cmd_set_tgt_priv_lock);
+EXPORT_SYMBOL(scst_obtain_device_parameters);
#ifdef DEBUG
EXPORT_SYMBOL(scst_random);
int scst_get_cdb_len(const uint8_t *cdb);
-int scst_obtain_device_parameters(struct scst_device *dev);
-
void __scst_dev_check_set_UA(struct scst_device *dev, struct scst_cmd *exclude,
const uint8_t *sense, int sense_len);
static inline void scst_dev_check_set_UA(struct scst_device *dev,
static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
{
switch(mgmt_fn) {
+#ifdef ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
case SCST_ABORT_TASK:
+#endif
#if 0
case SCST_ABORT_TASK_SET:
case SCST_CLEAR_TASK_SET: