- Timer-based retries for targets after SCST_TGT_RES_QUEUE_FULL status
implemented.
+ - More intelligent IO flow control implemented.
+
- Fixed broken CDROM FILEIO. Before that it always reported
"No medium found"
- Fixed READ(6)/WRITE(6) CDB decoding for block devices.
- This bug prevented FreeBSD initiator from working.
+ This bug prevented FreeBSD initiators from working.
- Implemented sgv_pool. It is mempool-like interface, which caches
built SG-vectors in order not to rebuild them again for every
- Exported symbols are now not GPL'ed
- - Various cleanups and bug fixes.
+ - Various cleanups and a lot of bug fixes.
Summary of changes between versions 0.9.3 and 0.9.4
---------------------------------------------------
are seen remotely. There must be LUN 0 in each security group, i.e. LUs
numeration must not start from, e.g., 1.
-Module "scst_target" supports parameter "scst_threads", which allows to
-set count of SCST's threads (CPU count by default).
-
IMPORTANT: without loading appropriate device handler, corresponding devices
========= will be invisible for remote initiators, which could lead to holes
in the LUN addressing, so automatic device scanning by remote SCSI
and eases CPU load, but could create a security hole (information
leakage), so enable it, if you have strict security requirements.
+Module parameters
+-----------------
+
+Module scsi_tgt supports the following parameters:
+
+ - scst_threads - allows to set count of SCST's threads. By default it
+ is CPU count.
+
+ - scst_max_cmd_mem - sets maximum amount of memory in Mb allowed to be
+ consumed by the SCST commands for data buffers at any given time. By
+ default it is approximately TotalMem/4.
+
SCST "/proc" commands
---------------------
To enable it, set SCST_HIGHMEM in 1 in scst_priv.h. HIGHMEM is not
supported on 2.4 and is not going to be.
- - More intelligent IO-throttling.
-
- Small ToDo's spread all over the code.
- Investigate possible missed emulated UA cases.
/* Set if the cmd is dead and can be destroyed at any time */
#define SCST_CMD_CAN_BE_DESTROYED 6
-/*
- * Set if the cmd is throtteled, ie put on hold since there
- * are too many pending commands.
- */
-#define SCST_CMD_THROTTELED 7
-
/*************************************************************
** Tgt_dev's flags
*************************************************************/
*/
unsigned int sg_buff_modified:1;
+ /*
+ * Set if the cmd's memory requirements are checked and found
+ * acceptable
+ */
+ unsigned int mem_checked:1;
+
/**************************************************************/
unsigned long cmd_flags; /* cmd's async flags */
struct scst_tgt *tgt; /* to save extra dereferences */
struct scst_device *dev; /* to save extra dereferences */
- lun_t lun; /* LUN for this cmd */
+ lun_t lun; /* LUN for this cmd */
- struct scst_tgt_dev *tgt_dev; /* corresponding device for this cmd */
+ struct scst_tgt_dev *tgt_dev; /* corresponding device for this cmd */
struct scsi_request *scsi_req; /* SCSI request */
*/
int cmd_count;
- /* Throttled commands, protected by scst_list_lock */
- struct list_head thr_cmd_list;
-
spinlock_t tgt_dev_lock; /* per-session device lock */
/* List of UA's for this device, protected by tgt_dev_lock */
*/
void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len);
+/*
+ * Checks if total memory allocated by commands is less, than defined
+ * limit (scst_cur_max_cmd_mem) and returns 0, if it is so. Otherwise,
+ * returnes 1 and sets on cmd QUEUE FULL or BUSY status as well as
+ * SCST_CMD_STATE_XMIT_RESP state. Target drivers and dev handlers are
+ * required to call this function if they allocate data buffers on their
+ * own.
+ */
+int scst_check_mem(struct scst_cmd *cmd);
+
+
#endif /* __SCST_H */
LIST_HEAD(scst_init_cmd_list);
LIST_HEAD(scst_cmd_list);
DECLARE_WAIT_QUEUE_HEAD(scst_list_waitQ);
+
+spinlock_t scst_cmd_mem_lock = SPIN_LOCK_UNLOCKED;
+unsigned long scst_cur_cmd_mem, scst_cur_max_cmd_mem;
+
struct tasklet_struct scst_tasklets[NR_CPUS];
struct scst_sgv_pools scst_sgv;
+DECLARE_WORK(scst_cmd_mem_work, scst_cmd_mem_work_fn, 0);
+
+unsigned long scst_max_cmd_mem;
+
LIST_HEAD(scst_mgmt_cmd_list);
LIST_HEAD(scst_active_mgmt_cmd_list);
LIST_HEAD(scst_delayed_mgmt_cmd_list);
module_param_named(scst_threads, scst_threads, int, 0);
MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
+module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, long, 0);
+MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
+ "the SCST commands at any given time in Mb");
+
int scst_register_target_template(struct scst_tgt_template *vtt)
{
int res = 0;
}
atomic_inc(&scst_threads_count);
- PRINT_INFO_PR("SCST version %s loaded successfully",
- SCST_VERSION_STRING);
+ if (scst_max_cmd_mem == 0) {
+ struct sysinfo si;
+ si_meminfo(&si);
+#if BITS_PER_LONG == 32
+ scst_max_cmd_mem = min(((uint64_t)si.totalram << PAGE_SHIFT) >> 2,
+ (uint64_t)1 << 30);
+#else
+ scst_max_cmd_mem = (si.totalram << PAGE_SHIFT) >> 2;
+#endif
+ } else
+ scst_max_cmd_mem <<= 20;
+
+ scst_cur_max_cmd_mem = scst_max_cmd_mem;
+
+ PRINT_INFO_PR("SCST version %s loaded successfully (max mem for "
+ "commands %ld Mb)", SCST_VERSION_STRING, scst_max_cmd_mem >> 20);
out:
TRACE_EXIT_RES(res);
}
}
+ if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
+ cancel_delayed_work(&scst_cmd_mem_work);
+ flush_scheduled_work();
+ }
+
scst_proc_cleanup_module();
scsi_unregister_interface(&scst_interface);
scst_destroy_acg(scst_default_acg);
#endif
EXPORT_SYMBOL(__scst_get_buf);
+EXPORT_SYMBOL(scst_check_mem);
/*
* Other Commands
{
scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
TRACE_MGMT_DBG("Sending BUSY status to initiator %s "
- "(cmds count %d, queue_type %x, sess->init_phase %d), "
- "probably the system is overloaded",
+ "(cmds count %d, queue_type %x, sess->init_phase %d)",
cmd->sess->initiator_name, cmd->sess->sess_cmd_count,
cmd->queue_type, cmd->sess->init_phase);
} else {
scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
TRACE_MGMT_DBG("Sending QUEUE_FULL status to initiator %s "
- "(cmds count %d, queue_type %x, sess->init_phase %d), "
- "probably the system is overloaded",
+ "(cmds count %d, queue_type %x, sess->init_phase %d)",
cmd->sess->initiator_name, cmd->sess->sess_cmd_count,
cmd->queue_type, cmd->sess->init_phase);
}
(uint64_t)tgt_dev->acg_dev->lun);
}
- INIT_LIST_HEAD(&tgt_dev->thr_cmd_list);
spin_lock_init(&tgt_dev->tgt_dev_lock);
INIT_LIST_HEAD(&tgt_dev->UA_list);
spin_lock_init(&tgt_dev->sn_lock);
return;
}
-/* Called under scst_list_lock and IRQs disabled */
-void scst_throttle_cmd(struct scst_cmd *cmd)
-{
- struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-
- TRACE(TRACE_RETRY, "Too many pending commands in session, initiator "
- "\"%s\". Moving cmd %p to thr cmd list",
- (cmd->sess->initiator_name[0] == '\0') ? "Anonymous" :
- cmd->sess->initiator_name, cmd);
- list_move_tail(&cmd->cmd_list_entry, &tgt_dev->thr_cmd_list);
- set_bit(SCST_CMD_THROTTELED, &cmd->cmd_flags);
-}
-
-/* Called under scst_list_lock and IRQs disabled */
-void scst_unthrottle_cmd(struct scst_cmd *cmd)
-{
- TRACE(TRACE_RETRY|TRACE_DEBUG, "Moving cmd %p from "
- "thr cmd list to active cmd list", cmd);
- list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
- clear_bit(SCST_CMD_THROTTELED, &cmd->cmd_flags);
-}
-
static struct scst_cmd *scst_inc_expected_sn(
struct scst_tgt_dev *tgt_dev, struct scst_cmd *out_of_sn_cmd)
{
obj->sg_count = 0;
for (pg = 0; pg < pages; pg++) {
#ifdef DEBUG_OOM
- if (((scst_random() % 100) == 55))
+ if ((scst_random() % 10000) == 55)
obj->entries[obj->sg_count].page = NULL;
else
#endif
/* Set if a TM command is being performed */
#define SCST_FLAG_TM_ACTIVE 2
+/* Set if scst_cmd_mem_work is scheduled */
+#define SCST_FLAG_CMD_MEM_WORK_SCHEDULED 3
+
/**
** Return codes for cmd state process functions
**/
#define SCST_THREAD_FLAGS CLONE_KERNEL
#define SCST_TGT_RETRY_TIMEOUT (3/2*HZ)
+#define SCST_CMD_MEM_TIMEOUT (120*HZ)
static inline int scst_get_context(void) {
/* Be overinsured */
extern struct list_head scst_init_cmd_list;
extern struct list_head scst_cmd_list;
+extern spinlock_t scst_cmd_mem_lock;
+extern unsigned long scst_max_cmd_mem, scst_cur_max_cmd_mem, scst_cur_cmd_mem;
+extern struct work_struct scst_cmd_mem_work;
+
+/* The following lists protected by scst_list_lock as well */
extern struct list_head scst_mgmt_cmd_list;
extern struct list_head scst_active_mgmt_cmd_list;
extern struct list_head scst_delayed_mgmt_cmd_list;
void scst_cmd_tasklet(long p);
int scst_mgmt_cmd_thread(void *arg);
int scst_mgmt_thread(void *arg);
+void scst_cmd_mem_work_fn(void *p);
struct scst_device *scst_alloc_device(int gfp_mask);
void scst_free_device(struct scst_device *tgt_dev);
goto out;
}
+void scst_cmd_mem_work_fn(void *p)
+{
+ TRACE_ENTRY();
+
+ spin_lock_bh(&scst_cmd_mem_lock);
+
+ scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
+ if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
+ TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
+ schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
+ } else {
+ scst_cur_max_cmd_mem = scst_max_cmd_mem;
+ clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
+ }
+ TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
+
+ spin_unlock_bh(&scst_cmd_mem_lock);
+
+ TRACE_EXIT();
+ return;
+}
+
+int scst_check_mem(struct scst_cmd *cmd)
+{
+ int res = 0;
+
+ TRACE_ENTRY();
+
+ if (cmd->mem_checked)
+ goto out;
+
+ spin_lock_bh(&scst_cmd_mem_lock);
+
+ scst_cur_cmd_mem += cmd->bufflen;
+ cmd->mem_checked = 1;
+ if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
+ goto out_unlock;
+
+ TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
+ "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
+ "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
+ (cmd->sess->initiator_name[0] == '\0') ?
+ "Anonymous" : cmd->sess->initiator_name,
+ scst_cur_max_cmd_mem >> 10);
+
+ scst_cur_cmd_mem -= cmd->bufflen;
+ cmd->mem_checked = 0;
+ scst_set_busy(cmd);
+ cmd->state = SCST_CMD_STATE_XMIT_RESP;
+ res = 1;
+
+out_unlock:
+ spin_unlock_bh(&scst_cmd_mem_lock);
+
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
+static void scst_low_cur_max_cmd_mem(void)
+{
+ TRACE_ENTRY();
+
+ if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
+ cancel_delayed_work(&scst_cmd_mem_work);
+ flush_scheduled_work();
+ clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
+ }
+
+ spin_lock_bh(&scst_cmd_mem_lock);
+
+ scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) +
+ (scst_cur_cmd_mem >> 2);
+ if (scst_cur_max_cmd_mem < 16*1024*1024)
+ scst_cur_max_cmd_mem = 16*1024*1024;
+
+ if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
+ TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
+ schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
+ set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
+ }
+
+ spin_unlock_bh(&scst_cmd_mem_lock);
+
+ TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
+
+ TRACE_EXIT();
+ return;
+}
+
static int scst_prepare_space(struct scst_cmd *cmd)
{
int r, res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
+ r = scst_check_mem(cmd);
+ if (unlikely(r != 0))
+ goto out;
+
if (cmd->data_buf_tgt_alloc) {
TRACE_MEM("%s", "Custom tgt data buf allocation requested");
r = cmd->tgtt->alloc_data_buf(cmd);
out_no_space:
TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
- "(size %zd), sending BUSY status", cmd->bufflen);
+ "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
+ scst_low_cur_max_cmd_mem();
scst_set_busy(cmd);
cmd->state = SCST_CMD_STATE_DEV_DONE;
res = SCST_CMD_STATE_RES_CONT_SAME;
TRACE_ENTRY();
+ if (cmd->mem_checked) {
+ spin_lock_bh(&scst_cmd_mem_lock);
+ scst_cur_cmd_mem -= cmd->bufflen;
+ spin_unlock_bh(&scst_cmd_mem_lock);
+ }
+
spin_lock_irq(&scst_list_lock);
TRACE_DBG("Deleting cmd %p from cmd list", cmd);
if (cmd->mgmt_cmnd)
scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
- if (likely(cmd->tgt_dev != NULL)) {
- struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
- tgt_dev->cmd_count--;
- if (!list_empty(&tgt_dev->thr_cmd_list)) {
- struct scst_cmd *t =
- list_entry(tgt_dev->thr_cmd_list.next,
- typeof(*t), cmd_list_entry);
- scst_unthrottle_cmd(t);
- if (!cmd->processible_env)
- wake_up(&scst_list_waitQ);
- }
- }
+ if (likely(cmd->tgt_dev != NULL))
+ cmd->tgt_dev->cmd_count--;
cmd->sess->sess_cmd_count--;
res = scst_translate_lun(cmd);
if (likely(res == 0)) {
cmd->state = SCST_CMD_STATE_DEV_PARSE;
- if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS)
-#if 0 /* don't know how it's better */
- {
- scst_throttle_cmd(cmd);
- } else {
- BUG_ON(!list_empty(&cmd->tgt_dev->thr_cmd_list));
- TRACE_DBG("Moving cmd %p to active cmd list", cmd);
- list_move_tail(&cmd->cmd_list_entry,
- &scst_active_cmd_list);
- }
-#else
- {
+ if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
TRACE(TRACE_RETRY, "Too many pending commands in "
"session, returning BUSY to initiator \"%s\"",
(cmd->sess->initiator_name[0] == '\0') ?
}
TRACE_DBG("Moving cmd %p to active cmd list", cmd);
list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
-#endif
} else if (res < 0) {
TRACE_DBG("Finishing cmd %p", cmd);
scst_set_cmd_error(cmd,
set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
smp_mb__after_set_bit();
- if (test_bit(SCST_CMD_THROTTELED, &cmd->cmd_flags))
- scst_unthrottle_cmd(cmd);
-
if (call_dev_task_mgmt_fn && cmd->tgt_dev)
scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);