4 * Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
21 #include <linux/types.h>
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_driver.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_host.h>
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
30 #include <scsi/scsi_request.h>
33 #define SCST_MAJOR 177
35 #define TRACE_RETRY 0x80000000
36 #define TRACE_SCSI_SERIALIZING 0x40000000
37 #define TRACE_SEND_TOP 0x20000000 /** top being the edge away from the interupt */
38 #define TRACE_RECV_TOP 0x01000000
39 #define TRACE_SEND_BOT 0x08000000 /** bottom being the edge toward the interupt */
40 #define TRACE_RECV_BOT 0x04000000
42 #define LOG_PREFIX "scst"
45 /*#define SCST_DEFAULT_LOG_FLAGS (TRACE_ALL & ~TRACE_MEMORY & ~TRACE_BUFF \
47 #define SCST_DEFAULT_LOG_FLAGS (TRACE_ALL & ~TRACE_MEMORY & ~TRACE_BUFF & \
48 ~TRACE_SCSI & ~TRACE_SCSI_SERIALIZING & ~TRACE_DEBUG)
50 #define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR | TRACE_PID | \
51 TRACE_FUNCTION | TRACE_SPECIAL | TRACE_MGMT | TRACE_MGMT_DEBUG | \
55 #define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR | TRACE_PID | \
58 #define SCST_DEFAULT_LOG_FLAGS 0
63 ** Bits for scst_flags
66 /* Set if new commands initialization should be suspended for a while */
67 #define SCST_FLAG_SUSPENDED 0
70 * If set, SCST's threads exit immediately not performing any
71 * sessions' shutdown tasks, therefore at this point all the sessions
72 * must be already down.
74 #define SCST_FLAG_SHUTDOWN 1
76 /* Set if a TM command is being performed */
77 #define SCST_FLAG_TM_ACTIVE 2
79 /* Set if scst_cmd_mem_work is scheduled */
80 #define SCST_FLAG_CMD_MEM_WORK_SCHEDULED 3
83 ** Return codes for cmd state process functions
85 #define SCST_CMD_STATE_RES_CONT_SAME 0
86 #define SCST_CMD_STATE_RES_CONT_NEXT 1
87 #define SCST_CMD_STATE_RES_NEED_THREAD 2
88 #define SCST_CMD_STATE_RES_RESTART 3
90 /** Name of the "default" security group **/
91 #define SCST_DEFAULT_ACG_NAME "Default"
94 ** Maximum count of uncompleted commands that an initiator could
95 ** queue on any device. Then it will take TASK QUEUE FULL status.
97 #define SCST_MAX_DEVICE_COMMANDS 128
99 #define SCST_THREAD_FLAGS CLONE_KERNEL
101 #define SCST_TGT_RETRY_TIMEOUT (3/2*HZ)
102 #define SCST_CMD_MEM_TIMEOUT (120*HZ)
104 static inline int scst_get_context(void) {
106 return (in_atomic() || in_interrupt()) ? SCST_CONTEXT_DIRECT_ATOMIC :
110 #define SCST_MGMT_CMD_CACHE_STRING "scst_mgmt_cmd"
111 extern kmem_cache_t *scst_mgmt_cachep;
112 extern mempool_t *scst_mgmt_mempool;
114 #define SCST_UA_CACHE_STRING "scst_ua"
115 extern kmem_cache_t *scst_ua_cachep;
116 extern mempool_t *scst_ua_mempool;
118 #define SCST_CMD_CACHE_STRING "scst_cmd"
119 extern kmem_cache_t *scst_cmd_cachep;
121 #define SCST_SESSION_CACHE_STRING "scst_session"
122 extern kmem_cache_t *scst_sess_cachep;
124 #define SCST_TGT_DEV_CACHE_STRING "scst_tgt_dev"
125 extern kmem_cache_t *scst_tgtd_cachep;
127 #define SCST_ACG_DEV_CACHE_STRING "scst_acg_dev"
128 extern kmem_cache_t *scst_acgd_cachep;
130 extern struct scst_sgv_pools scst_sgv;
132 extern int scst_num_cpus;
133 extern unsigned long scst_flags;
134 extern struct semaphore scst_mutex;
135 extern atomic_t scst_cmd_count;
136 extern spinlock_t scst_list_lock;
137 extern struct list_head scst_dev_wait_sess_list; /* protected by scst_list_lock */
138 extern struct list_head scst_template_list; /* protected by scst_mutex */
139 extern struct list_head scst_dev_list; /* protected by scst_mutex */
140 extern struct list_head scst_dev_type_list; /* protected by scst_mutex */
141 extern wait_queue_head_t scst_dev_cmd_waitQ;
143 extern struct list_head scst_acg_list;
144 extern struct scst_acg *scst_default_acg;
146 /* The following lists protected by scst_list_lock */
147 extern struct list_head scst_active_cmd_list;
148 extern struct list_head scst_init_cmd_list;
149 extern struct list_head scst_cmd_list;
151 extern spinlock_t scst_cmd_mem_lock;
152 extern unsigned long scst_max_cmd_mem, scst_cur_max_cmd_mem, scst_cur_cmd_mem;
153 extern struct work_struct scst_cmd_mem_work;
155 /* The following lists protected by scst_list_lock as well */
156 extern struct list_head scst_mgmt_cmd_list;
157 extern struct list_head scst_active_mgmt_cmd_list;
158 extern struct list_head scst_delayed_mgmt_cmd_list;
160 extern struct tasklet_struct scst_tasklets[NR_CPUS];
161 extern wait_queue_head_t scst_list_waitQ;
163 extern wait_queue_head_t scst_mgmt_cmd_list_waitQ;
165 extern wait_queue_head_t scst_mgmt_waitQ;
166 extern spinlock_t scst_mgmt_lock;
167 extern struct list_head scst_sess_mgmt_list;
169 extern int scst_threads;
170 extern int scst_shut_threads_count;
171 extern atomic_t scst_threads_count;
172 extern int scst_thread_num;
174 extern struct semaphore *scst_shutdown_mutex;
176 extern spinlock_t scst_temp_UA_lock;
177 extern uint8_t scst_temp_UA[SCSI_SENSE_BUFFERSIZE];
179 extern struct scst_cmd *__scst_check_deferred_commands(
180 struct scst_tgt_dev *tgt_dev, int expected_sn);
182 /* Used to save the function call on th fast path */
183 static inline struct scst_cmd *scst_check_deferred_commands(
184 struct scst_tgt_dev *tgt_dev, int expected_sn)
186 if (tgt_dev->def_cmd_count == 0)
189 return __scst_check_deferred_commands(tgt_dev, expected_sn);
192 static inline int __scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev)
195 * No locks is needed, because only one thread at time can
196 * call it (serialized by sn). Also it is supposed that there
197 * could not be half-incremented halves.
200 typeof(tgt_dev->expected_sn) e;
202 e = tgt_dev->expected_sn;
203 tgt_dev->expected_sn++;
204 smp_mb(); /* write must be before def_cmd_count read */
206 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/, "Next expected_sn: %d", e);
210 void scst_inc_expected_sn_unblock(struct scst_tgt_dev *tgt_dev,
211 struct scst_cmd *cmd_sn, int locked);
213 int scst_cmd_thread(void *arg);
214 void scst_cmd_tasklet(long p);
215 int scst_mgmt_cmd_thread(void *arg);
216 int scst_mgmt_thread(void *arg);
217 void scst_cmd_mem_work_fn(void *p);
219 struct scst_device *scst_alloc_device(int gfp_mask);
220 void scst_free_device(struct scst_device *tgt_dev);
222 struct scst_acg *scst_alloc_add_acg(const char *acg_name);
223 int scst_destroy_acg(struct scst_acg *acg);
225 int scst_sess_alloc_tgt_devs(struct scst_session *sess);
226 void scst_sess_free_tgt_devs(struct scst_session *sess);
227 void scst_reset_tgt_dev(struct scst_tgt_dev *tgt_dev, int nexus_loss);
229 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
231 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev);
233 int scst_acg_add_name(struct scst_acg *acg, const char *name);
234 int scst_acg_remove_name(struct scst_acg *acg, const char *name);
236 int scst_assign_dev_handler(struct scst_device *dev,
237 struct scst_dev_type *handler);
239 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
240 const char *initiator_name);
241 void scst_free_session(struct scst_session *sess);
242 void scst_free_session_callback(struct scst_session *sess);
244 struct scst_cmd *scst_alloc_cmd(int gfp_mask);
245 void scst_free_cmd(struct scst_cmd *cmd);
246 static inline void scst_destroy_cmd(struct scst_cmd *cmd)
248 TRACE_MEM("kfree for cmd: %p", cmd);
249 kmem_cache_free(scst_cmd_cachep, cmd);
253 void scst_check_retries(struct scst_tgt *tgt, int processible_env);
254 void scst_tgt_retry_timer_fn(unsigned long arg);
256 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
257 int scst_alloc_request(struct scst_cmd *cmd);
258 void scst_release_request(struct scst_cmd *cmd);
260 static inline void scst_do_req(struct scsi_request *sreq,
261 const void *cmnd, void *buffer, unsigned bufflen,
262 void (*done)(struct scsi_cmnd *), int timeout, int retries)
264 #ifdef STRICT_SERIALIZING
265 scsi_do_req(sreq, cmnd, buffer, bufflen, done, timeout, retries);
266 #elif defined(FILEIO_ONLY)
269 scsi_do_req_fifo(sreq, cmnd, buffer, bufflen, done, timeout, retries);
273 static inline int scst_exec_req(struct scsi_device *sdev,
274 const unsigned char *cmd, int cmd_len, int data_direction,
275 void *buffer, unsigned bufflen, int use_sg, int timeout, int retries,
276 void *privdata, void (*done)(void *, char *, int, int), gfp_t gfp)
278 #ifdef STRICT_SERIALIZING
279 return scsi_execute_async(sdev, cmd, cmd_len, data_direction, buffer,
280 bufflen, use_sg, timeout, retries, privdata, done, gfp);
281 #elif defined(FILEIO_ONLY)
285 return scsi_execute_async_fifo(sdev, cmd, cmd_len, data_direction,
286 buffer, bufflen, use_sg, timeout, retries, privdata, done, gfp);
291 int scst_alloc_space(struct scst_cmd *cmd);
292 void scst_release_space(struct scst_cmd *cmd);
293 void scst_scsi_op_list_init(void);
295 lun_t scst_unpack_lun(const uint8_t *lun, int len);
297 struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
300 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask);
301 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int del);
304 int scst_proc_init_module(void);
305 void scst_proc_cleanup_module(void);
306 int scst_build_proc_target_dir_entries(struct scst_tgt_template *vtt);
307 void scst_cleanup_proc_target_dir_entries(struct scst_tgt_template *vtt);
308 int scst_build_proc_target_entries(struct scst_tgt *vtt);
309 void scst_cleanup_proc_target_entries(struct scst_tgt *vtt);
310 int scst_build_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type);
311 void scst_cleanup_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type);
313 int scst_get_cdb_len(const uint8_t *cdb);
315 void __scst_process_UA(struct scst_device *dev, struct scst_cmd *exclude,
316 const uint8_t *sense, int sense_len, int internal);
317 static inline void scst_process_UA(struct scst_device *dev,
318 struct scst_cmd *exclude, const uint8_t *sense, int sense_len,
321 spin_lock_bh(&dev->dev_lock);
322 __scst_process_UA(dev, exclude, sense, sense_len, internal);
323 spin_unlock_bh(&dev->dev_lock);
326 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev, const uint8_t *sense,
328 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
329 const uint8_t *sense, int sense_len);
330 int scst_set_pending_UA(struct scst_cmd *cmd);
331 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
333 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
334 int other_ini, int call_dev_task_mgmt_fn);
335 void scst_process_reset(struct scst_device *dev,
336 struct scst_session *originator, struct scst_cmd *exclude_cmd,
337 struct scst_mgmt_cmd *mcmd);
339 static inline int scst_is_ua_command(struct scst_cmd *cmd)
341 return ((cmd->cdb[0] != INQUIRY) &&
342 (cmd->cdb[0] != REQUEST_SENSE) &&
343 (cmd->cdb[0] != REPORT_LUNS));
347 * Returns 1, if cmd's CDB is locally handled by SCST and 0 otherwise.
348 * Dev handlers parse() and dev_done() not called for such commands.
350 static inline int scst_is_cmd_local(struct scst_cmd *cmd)
353 switch (cmd->cdb[0]) {
361 * Some notes on devices "blocking". Blocking means that no
362 * commands will go from SCST to underlying SCSI device until it
363 * is unblocked. But we don't care about all commands that
364 * already on the device.
367 extern int scst_inc_on_dev_cmd(struct scst_cmd *cmd);
368 extern void scst_unblock_cmds(struct scst_device *dev);
370 static inline void __scst_block_dev(struct scst_device *dev)
374 TRACE_MGMT_DBG("Device BLOCK(%d), dev %p", dev->block_count, dev);
377 static inline void scst_block_dev(struct scst_device *dev,
378 unsigned int outstanding)
380 spin_lock_bh(&dev->dev_lock);
381 __scst_block_dev(dev);
382 spin_unlock_bh(&dev->dev_lock);
384 TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
385 "%d)", outstanding, atomic_read(&dev->on_dev_count));
386 wait_event(dev->on_dev_waitQ,
387 atomic_read(&dev->on_dev_count) <= outstanding);
388 TRACE_MGMT_DBG("%s", "wait_event() returned");
391 static inline void scst_unblock_dev(struct scst_device *dev)
393 spin_lock_bh(&dev->dev_lock);
394 TRACE_MGMT_DBG("Device UNBLOCK(%d), dev %p",
395 dev->block_count-1, dev);
396 if (--dev->block_count == 0)
397 scst_unblock_cmds(dev);
398 spin_unlock_bh(&dev->dev_lock);
401 static inline void scst_dec_on_dev_cmd(struct scst_cmd *cmd)
404 TRACE_MGMT_DBG("cmd %p (tag %d): unblocking dev %p", cmd,
407 scst_unblock_dev(cmd->dev);
409 atomic_dec(&cmd->dev->on_dev_count);
410 smp_mb__after_atomic_dec();
411 if (unlikely(cmd->dev->block_count != 0))
412 wake_up_all(&cmd->dev->on_dev_waitQ);
415 static inline void scst_inc_cmd_count(void)
417 atomic_inc(&scst_cmd_count);
418 smp_mb__after_atomic_inc();
419 TRACE_DBG("Incrementing scst_cmd_count(%d)",
420 atomic_read(&scst_cmd_count));
423 static inline void scst_dec_cmd_count(void)
426 f = atomic_dec_and_test(&scst_cmd_count);
427 smp_mb__after_atomic_dec();
428 if (f && unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags)))
429 wake_up_all(&scst_dev_cmd_waitQ);
430 TRACE_DBG("Decrementing scst_cmd_count(%d)",
431 atomic_read(&scst_cmd_count));
434 void scst_sched_session_free(struct scst_session *sess);
436 static inline void scst_sess_get(struct scst_session *sess)
438 atomic_inc(&sess->refcnt);
441 static inline void scst_sess_put(struct scst_session *sess)
443 if (atomic_dec_and_test(&sess->refcnt))
444 scst_sched_session_free(sess);
447 void __scst_suspend_activity(void);
448 void __scst_resume_activity(void);
450 extern void scst_throttle_cmd(struct scst_cmd *cmd);
451 extern void scst_unthrottle_cmd(struct scst_cmd *cmd);
453 static inline void scst_set_sense(uint8_t *buffer, int len, int key,
456 memset(buffer, 0, len);
457 buffer[0] = 0x70; /* Error Code */
458 buffer[2] = key; /* Sense Key */
459 buffer[7] = 0x0a; /* Additional Sense Length */
460 buffer[12] = asc; /* ASC */
461 buffer[13] = ascq; /* ASCQ */
462 TRACE_BUFFER("Sense set", buffer, len);
466 static inline void scst_check_restore_sg_buff(struct scst_cmd *cmd)
468 if (cmd->sg_buff_modified) {
469 cmd->sg[cmd->orig_sg_entry].length = cmd->orig_entry_len;
470 cmd->sg_cnt = cmd->orig_sg_cnt;
475 extern void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
476 struct scst_acg_dev *acg_dev);
477 extern void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
478 extern void tm_dbg_check_released_cmds(void);
479 extern int tm_dbg_check_cmd(struct scst_cmd *cmd);
480 extern void tm_dbg_release_cmd(struct scst_cmd *cmd);
481 extern void tm_dbg_task_mgmt(const char *fn);
482 extern int tm_dbg_is_release(void);
484 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
485 struct scst_acg_dev *acg_dev) {}
486 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
487 static inline void tm_dbg_check_released_cmds(void) {}
488 static inline int tm_dbg_check_cmd(struct scst_cmd *cmd)
492 static inline void tm_dbg_release_cmd(struct scst_cmd *cmd) {}
493 static inline void tm_dbg_task_mgmt(const char *fn) {}
494 static inline int tm_dbg_is_release(void)
498 #endif /* DEBUG_TM */
500 #endif /* __SCST_PRIV_H */