Bufflen can be changed after scst_get_cdb_info(), so for bufflen = 0 data_direction...
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #include "scst_cdbprobe.h"
36
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39         uint8_t *sense, int sense_len);
40 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
41         const uint8_t *sense, int sense_len, int head);
42 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
43 static void scst_release_space(struct scst_cmd *cmd);
44 static void scst_sess_free_tgt_devs(struct scst_session *sess);
45 static void scst_unblock_cmds(struct scst_device *dev);
46
47 #ifdef CONFIG_SCST_DEBUG_TM
48 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
49         struct scst_acg_dev *acg_dev);
50 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
51 #else
52 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
53         struct scst_acg_dev *acg_dev) {}
54 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
55 #endif /* CONFIG_SCST_DEBUG_TM */
56
57 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
58 {
59         int res = 0;
60         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
61
62         TRACE_ENTRY();
63
64         sBUG_ON(cmd->sense != NULL);
65
66         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
67         if (cmd->sense == NULL) {
68                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
69                         "The sense data will be lost!!", cmd->cdb[0]);
70                 res = -ENOMEM;
71                 goto out;
72         }
73
74         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
75
76 out:
77         TRACE_EXIT_RES(res);
78         return res;
79 }
80 EXPORT_SYMBOL(scst_alloc_sense);
81
82 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
83         const uint8_t *sense, unsigned int len)
84 {
85         int res;
86
87         TRACE_ENTRY();
88
89         res = scst_alloc_sense(cmd, atomic);
90         if (res != 0) {
91                 PRINT_BUFFER("Lost sense", sense, len);
92                 goto out;
93         }
94
95         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
96         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
97
98 out:
99         TRACE_EXIT_RES(res);
100         return res;
101 }
102 EXPORT_SYMBOL(scst_alloc_set_sense);
103
104 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
105 {
106         TRACE_ENTRY();
107
108         cmd->status = status;
109         cmd->host_status = DID_OK;
110
111         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
112         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
113
114         cmd->data_direction = SCST_DATA_NONE;
115         cmd->resp_data_len = 0;
116         cmd->is_send_status = 1;
117
118         cmd->completed = 1;
119
120         TRACE_EXIT();
121         return;
122 }
123 EXPORT_SYMBOL(scst_set_cmd_error_status);
124
125 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
126 {
127         int rc;
128
129         TRACE_ENTRY();
130
131         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
132
133         rc = scst_alloc_sense(cmd, 1);
134         if (rc != 0) {
135                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
136                         key, asc, ascq);
137                 goto out;
138         }
139
140         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
141         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
142
143 out:
144         TRACE_EXIT();
145         return;
146 }
147 EXPORT_SYMBOL(scst_set_cmd_error);
148
149 void scst_set_sense(uint8_t *buffer, int len, int key,
150         int asc, int ascq)
151 {
152         memset(buffer, 0, len);
153         buffer[0] = 0x70;       /* Error Code                   */
154         buffer[2] = key;        /* Sense Key                    */
155         buffer[7] = 0x0a;       /* Additional Sense Length      */
156         buffer[12] = asc;       /* ASC                          */
157         buffer[13] = ascq;      /* ASCQ                         */
158         TRACE_BUFFER("Sense set", buffer, len);
159         return;
160 }
161 EXPORT_SYMBOL(scst_set_sense);
162
163 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
164         unsigned int len)
165 {
166         TRACE_ENTRY();
167
168         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
169         scst_alloc_set_sense(cmd, 1, sense, len);
170
171         TRACE_EXIT();
172         return;
173 }
174
175 void scst_set_busy(struct scst_cmd *cmd)
176 {
177         int c = atomic_read(&cmd->sess->sess_cmd_count);
178
179         TRACE_ENTRY();
180
181         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
182                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
183                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
184                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
185                         cmd->sess->initiator_name, c,
186                         cmd->queue_type, cmd->sess->init_phase);
187         } else {
188                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
189                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
190                         "initiator %s (cmds count %d, queue_type %x, "
191                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
192                         cmd->queue_type, cmd->sess->init_phase);
193         }
194
195         TRACE_EXIT();
196         return;
197 }
198 EXPORT_SYMBOL(scst_set_busy);
199
200 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
201 {
202         int res;
203
204         TRACE_ENTRY();
205
206         switch (cmd->state) {
207         case SCST_CMD_STATE_INIT_WAIT:
208         case SCST_CMD_STATE_INIT:
209         case SCST_CMD_STATE_PRE_PARSE:
210         case SCST_CMD_STATE_DEV_PARSE:
211                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
212                 break;
213
214         default:
215                 res = SCST_CMD_STATE_PRE_DEV_DONE;
216                 break;
217         }
218
219         TRACE_EXIT_RES(res);
220         return res;
221 }
222 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
223
224 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
225 {
226         TRACE_ENTRY();
227
228 #ifdef CONFIG_SCST_EXTRACHECKS
229         switch (cmd->state) {
230         case SCST_CMD_STATE_PRE_XMIT_RESP:
231         case SCST_CMD_STATE_XMIT_RESP:
232         case SCST_CMD_STATE_FINISHED:
233         case SCST_CMD_STATE_XMIT_WAIT:
234                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
235                         cmd->state, cmd, cmd->cdb[0]);
236                 sBUG();
237         }
238 #endif
239
240         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
241
242         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
243                            (cmd->tgt_dev == NULL));
244
245         TRACE_EXIT();
246         return;
247 }
248 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
249
250 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
251 {
252         int i, l;
253
254         TRACE_ENTRY();
255
256         scst_check_restore_sg_buff(cmd);
257         cmd->resp_data_len = resp_data_len;
258
259         if (resp_data_len == cmd->bufflen)
260                 goto out;
261
262         l = 0;
263         for (i = 0; i < cmd->sg_cnt; i++) {
264                 l += cmd->sg[i].length;
265                 if (l >= resp_data_len) {
266                         int left = resp_data_len - (l - cmd->sg[i].length);
267 #ifdef CONFIG_SCST_DEBUG
268                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
269                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
270                                 "left %d",
271                                 cmd, (long long unsigned int)cmd->tag,
272                                 resp_data_len, i,
273                                 cmd->sg[i].length, left);
274 #endif
275                         cmd->orig_sg_cnt = cmd->sg_cnt;
276                         cmd->orig_sg_entry = i;
277                         cmd->orig_entry_len = cmd->sg[i].length;
278                         cmd->sg_cnt = (left > 0) ? i+1 : i;
279                         cmd->sg[i].length = left;
280                         cmd->sg_buff_modified = 1;
281                         break;
282                 }
283         }
284
285 out:
286         TRACE_EXIT();
287         return;
288 }
289 EXPORT_SYMBOL(scst_set_resp_data_len);
290
291 /* Called under scst_mutex and suspended activity */
292 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
293 {
294         struct scst_device *dev;
295         int res = 0;
296         static int dev_num; /* protected by scst_mutex */
297
298         TRACE_ENTRY();
299
300         dev = kzalloc(sizeof(*dev), gfp_mask);
301         if (dev == NULL) {
302                 TRACE(TRACE_OUT_OF_MEM, "%s",
303                         "Allocation of scst_device failed");
304                 res = -ENOMEM;
305                 goto out;
306         }
307
308         dev->handler = &scst_null_devtype;
309         dev->p_cmd_lists = &scst_main_cmd_lists;
310         atomic_set(&dev->dev_cmd_count, 0);
311         atomic_set(&dev->write_cmd_count, 0);
312         scst_init_mem_lim(&dev->dev_mem_lim);
313         spin_lock_init(&dev->dev_lock);
314         atomic_set(&dev->on_dev_count, 0);
315         INIT_LIST_HEAD(&dev->blocked_cmd_list);
316         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
317         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
318         INIT_LIST_HEAD(&dev->threads_list);
319         init_waitqueue_head(&dev->on_dev_waitQ);
320         dev->dev_double_ua_possible = 1;
321         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
322         dev->dev_num = dev_num++;
323
324         *out_dev = dev;
325
326 out:
327         TRACE_EXIT_RES(res);
328         return res;
329 }
330
331 /* Called under scst_mutex and suspended activity */
332 void scst_free_device(struct scst_device *dev)
333 {
334         TRACE_ENTRY();
335
336 #ifdef CONFIG_SCST_EXTRACHECKS
337         if (!list_empty(&dev->dev_tgt_dev_list) ||
338             !list_empty(&dev->dev_acg_dev_list)) {
339                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
340                         "is not empty!", __func__);
341                 sBUG();
342         }
343 #endif
344
345         kfree(dev);
346
347         TRACE_EXIT();
348         return;
349 }
350
351 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
352 {
353         atomic_set(&mem_lim->alloced_pages, 0);
354         mem_lim->max_allowed_pages =
355                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
356 }
357 EXPORT_SYMBOL(scst_init_mem_lim);
358
359 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
360                                         struct scst_device *dev, uint64_t lun)
361 {
362         struct scst_acg_dev *res;
363
364         TRACE_ENTRY();
365
366 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
367         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
368 #else
369         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
370 #endif
371         if (res == NULL) {
372                 TRACE(TRACE_OUT_OF_MEM,
373                       "%s", "Allocation of scst_acg_dev failed");
374                 goto out;
375         }
376 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
377         memset(res, 0, sizeof(*res));
378 #endif
379
380         res->dev = dev;
381         res->acg = acg;
382         res->lun = lun;
383
384 out:
385         TRACE_EXIT_HRES(res);
386         return res;
387 }
388
389 /* The activity supposed to be suspended and scst_mutex held */
390 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
391 {
392         TRACE_ENTRY();
393
394         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
395                 acg_dev);
396         list_del(&acg_dev->acg_dev_list_entry);
397         list_del(&acg_dev->dev_acg_dev_list_entry);
398
399         kmem_cache_free(scst_acgd_cachep, acg_dev);
400
401         TRACE_EXIT();
402         return;
403 }
404
405 /* The activity supposed to be suspended and scst_mutex held */
406 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
407 {
408         struct scst_acg *acg;
409
410         TRACE_ENTRY();
411
412         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
413         if (acg == NULL) {
414                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
415                 goto out;
416         }
417
418         INIT_LIST_HEAD(&acg->acg_dev_list);
419         INIT_LIST_HEAD(&acg->acg_sess_list);
420         INIT_LIST_HEAD(&acg->acn_list);
421         acg->acg_name = acg_name;
422
423         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
424         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
425
426 out:
427         TRACE_EXIT_HRES(acg);
428         return acg;
429 }
430
431 /* The activity supposed to be suspended and scst_mutex held */
432 int scst_destroy_acg(struct scst_acg *acg)
433 {
434         struct scst_acn *n, *nn;
435         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
436         int res = 0;
437
438         TRACE_ENTRY();
439
440         if (!list_empty(&acg->acg_sess_list)) {
441                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
442                 res = -EBUSY;
443                 goto out;
444         }
445
446         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
447         list_del(&acg->scst_acg_list_entry);
448
449         /* Freeing acg_devs */
450         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
451                         acg_dev_list_entry) {
452                 struct scst_tgt_dev *tgt_dev, *tt;
453                 list_for_each_entry_safe(tgt_dev, tt,
454                                  &acg_dev->dev->dev_tgt_dev_list,
455                                  dev_tgt_dev_list_entry) {
456                         if (tgt_dev->acg_dev == acg_dev)
457                                 scst_free_tgt_dev(tgt_dev);
458                 }
459                 scst_free_acg_dev(acg_dev);
460         }
461
462         /* Freeing names */
463         list_for_each_entry_safe(n, nn, &acg->acn_list,
464                         acn_list_entry) {
465                 list_del(&n->acn_list_entry);
466                 kfree(n->name);
467                 kfree(n);
468         }
469         INIT_LIST_HEAD(&acg->acn_list);
470
471         kfree(acg);
472 out:
473         TRACE_EXIT_RES(res);
474         return res;
475 }
476
477 /*
478  * scst_mutex supposed to be held, there must not be parallel activity in this
479  * session.
480  */
481 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
482         struct scst_acg_dev *acg_dev)
483 {
484         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
485         struct scst_tgt_dev *tgt_dev;
486         struct scst_device *dev = acg_dev->dev;
487         struct list_head *sess_tgt_dev_list_head;
488         struct scst_tgt_template *vtt = sess->tgt->tgtt;
489         int rc, i;
490
491         TRACE_ENTRY();
492
493 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
494         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
495 #else
496         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
497 #endif
498         if (tgt_dev == NULL) {
499                 TRACE(TRACE_OUT_OF_MEM, "%s",
500                       "Allocation of scst_tgt_dev failed");
501                 goto out;
502         }
503 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
504         memset(tgt_dev, 0, sizeof(*tgt_dev));
505 #endif
506
507         tgt_dev->dev = dev;
508         tgt_dev->lun = acg_dev->lun;
509         tgt_dev->acg_dev = acg_dev;
510         tgt_dev->sess = sess;
511         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
512
513         scst_sgv_pool_use_norm(tgt_dev);
514
515         if (dev->scsi_dev != NULL) {
516                 ini_sg = dev->scsi_dev->host->sg_tablesize;
517                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
518                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
519                                 ENABLE_CLUSTERING);
520         } else {
521                 ini_sg = (1 << 15) /* infinite */;
522                 ini_unchecked_isa_dma = 0;
523                 ini_use_clustering = 0;
524         }
525         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
526
527         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
528             !sess->tgt->tgtt->no_clustering)
529                 scst_sgv_pool_use_norm_clust(tgt_dev);
530
531         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
532                 scst_sgv_pool_use_dma(tgt_dev);
533
534         if (dev->scsi_dev != NULL) {
535                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
536                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
537                       dev->scsi_dev->channel, dev->scsi_dev->id,
538                       dev->scsi_dev->lun,
539                       (long long unsigned int)tgt_dev->lun);
540         } else {
541                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
542                                dev->virt_name,
543                                (long long unsigned int)tgt_dev->lun);
544         }
545
546         spin_lock_init(&tgt_dev->tgt_dev_lock);
547         INIT_LIST_HEAD(&tgt_dev->UA_list);
548         spin_lock_init(&tgt_dev->thr_data_lock);
549         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
550         spin_lock_init(&tgt_dev->sn_lock);
551         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
552         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
553         tgt_dev->expected_sn = 1;
554         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
555         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
556         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
557                 atomic_set(&tgt_dev->sn_slots[i], 0);
558
559         if (dev->handler->parse_atomic &&
560             (sess->tgt->tgtt->preprocessing_done == NULL)) {
561                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
562                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
563                                 &tgt_dev->tgt_dev_flags);
564                 if (dev->handler->exec_atomic)
565                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
566                                 &tgt_dev->tgt_dev_flags);
567         }
568         if (dev->handler->exec_atomic) {
569                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
570                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
571                                 &tgt_dev->tgt_dev_flags);
572                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
573                                 &tgt_dev->tgt_dev_flags);
574                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
575                         &tgt_dev->tgt_dev_flags);
576         }
577         if (dev->handler->dev_done_atomic &&
578             sess->tgt->tgtt->xmit_response_atomic) {
579                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
580                         &tgt_dev->tgt_dev_flags);
581         }
582
583         spin_lock_bh(&scst_temp_UA_lock);
584         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
585                 SCST_LOAD_SENSE(scst_sense_reset_UA));
586         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
587         spin_unlock_bh(&scst_temp_UA_lock);
588
589         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
590
591         if (vtt->threads_num > 0) {
592                 rc = 0;
593                 if (dev->handler->threads_num > 0)
594                         rc = scst_add_dev_threads(dev, vtt->threads_num);
595                 else if (dev->handler->threads_num == 0)
596                         rc = scst_add_cmd_threads(vtt->threads_num);
597                 if (rc != 0)
598                         goto out_free;
599         }
600
601         if (dev->handler && dev->handler->attach_tgt) {
602                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
603                       tgt_dev);
604                 rc = dev->handler->attach_tgt(tgt_dev);
605                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
606                 if (rc != 0) {
607                         PRINT_ERROR("Device handler's %s attach_tgt() "
608                             "failed: %d", dev->handler->name, rc);
609                         goto out_thr_free;
610                 }
611         }
612
613         spin_lock_bh(&dev->dev_lock);
614         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
615         if (dev->dev_reserved)
616                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
617         spin_unlock_bh(&dev->dev_lock);
618
619         sess_tgt_dev_list_head =
620                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
621         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
622                       sess_tgt_dev_list_head);
623
624 out:
625         TRACE_EXIT();
626         return tgt_dev;
627
628 out_thr_free:
629         if (vtt->threads_num > 0) {
630                 if (dev->handler->threads_num > 0)
631                         scst_del_dev_threads(dev, vtt->threads_num);
632                 else if (dev->handler->threads_num == 0)
633                         scst_del_cmd_threads(vtt->threads_num);
634         }
635
636 out_free:
637         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
638         tgt_dev = NULL;
639         goto out;
640 }
641
642 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
643
644 /* No locks supposed to be held, scst_mutex - held */
645 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
646 {
647         TRACE_ENTRY();
648
649         scst_clear_reservation(tgt_dev);
650
651         /* With activity suspended the lock isn't needed, but let's be safe */
652         spin_lock_bh(&tgt_dev->tgt_dev_lock);
653         scst_free_all_UA(tgt_dev);
654         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
655
656         spin_lock_bh(&scst_temp_UA_lock);
657         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
658                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
659         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
660         spin_unlock_bh(&scst_temp_UA_lock);
661
662         TRACE_EXIT();
663         return;
664 }
665
666 /*
667  * scst_mutex supposed to be held, there must not be parallel activity in this
668  * session.
669  */
670 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
671 {
672         struct scst_device *dev = tgt_dev->dev;
673         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
674
675         TRACE_ENTRY();
676
677         tm_dbg_deinit_tgt_dev(tgt_dev);
678
679         spin_lock_bh(&dev->dev_lock);
680         list_del(&tgt_dev->dev_tgt_dev_list_entry);
681         spin_unlock_bh(&dev->dev_lock);
682
683         list_del(&tgt_dev->sess_tgt_dev_list_entry);
684
685         scst_clear_reservation(tgt_dev);
686         scst_free_all_UA(tgt_dev);
687
688         if (dev->handler && dev->handler->detach_tgt) {
689                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
690                       tgt_dev);
691                 dev->handler->detach_tgt(tgt_dev);
692                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
693         }
694
695         if (vtt->threads_num > 0) {
696                 if (dev->handler->threads_num > 0)
697                         scst_del_dev_threads(dev, vtt->threads_num);
698                 else if (dev->handler->threads_num == 0)
699                         scst_del_cmd_threads(vtt->threads_num);
700         }
701
702         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
703
704         TRACE_EXIT();
705         return;
706 }
707
708 /* scst_mutex supposed to be held */
709 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
710 {
711         int res = 0;
712         struct scst_acg_dev *acg_dev;
713         struct scst_tgt_dev *tgt_dev;
714
715         TRACE_ENTRY();
716
717         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
718                         acg_dev_list_entry) {
719                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
720                 if (tgt_dev == NULL) {
721                         res = -ENOMEM;
722                         goto out_free;
723                 }
724         }
725
726 out:
727         TRACE_EXIT();
728         return res;
729
730 out_free:
731         scst_sess_free_tgt_devs(sess);
732         goto out;
733 }
734
735 /*
736  * scst_mutex supposed to be held, there must not be parallel activity in this
737  * session.
738  */
739 static void scst_sess_free_tgt_devs(struct scst_session *sess)
740 {
741         int i;
742         struct scst_tgt_dev *tgt_dev, *t;
743
744         TRACE_ENTRY();
745
746         /* The session is going down, no users, so no locks */
747         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
748                 struct list_head *sess_tgt_dev_list_head =
749                         &sess->sess_tgt_dev_list_hash[i];
750                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
751                                 sess_tgt_dev_list_entry) {
752                         scst_free_tgt_dev(tgt_dev);
753                 }
754                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
755         }
756
757         TRACE_EXIT();
758         return;
759 }
760
761 /* The activity supposed to be suspended and scst_mutex held */
762 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
763                      uint64_t lun, int read_only)
764 {
765         int res = 0;
766         struct scst_acg_dev *acg_dev;
767         struct scst_tgt_dev *tgt_dev;
768         struct scst_session *sess;
769         LIST_HEAD(tmp_tgt_dev_list);
770
771         TRACE_ENTRY();
772
773         INIT_LIST_HEAD(&tmp_tgt_dev_list);
774
775 #ifdef CONFIG_SCST_EXTRACHECKS
776         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
777                 if (acg_dev->dev == dev) {
778                         PRINT_ERROR("Device is already in group %s",
779                                 acg->acg_name);
780                         res = -EINVAL;
781                         goto out;
782                 }
783         }
784 #endif
785
786         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
787         if (acg_dev == NULL) {
788                 res = -ENOMEM;
789                 goto out;
790         }
791         acg_dev->rd_only_flag = read_only;
792
793         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
794                 acg_dev);
795         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
796         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
797
798         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
799                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
800                 if (tgt_dev == NULL) {
801                         res = -ENOMEM;
802                         goto out_free;
803                 }
804                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
805                               &tmp_tgt_dev_list);
806         }
807
808 out:
809         if (res == 0) {
810                 if (dev->virt_name != NULL) {
811                         PRINT_INFO("Added device %s to group %s (LUN %lld, "
812                                 "rd_only %d)", dev->virt_name, acg->acg_name,
813                                 (long long unsigned int)lun,
814                                 read_only);
815                 } else {
816                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
817                                 "%lld, rd_only %d)",
818                                 dev->scsi_dev->host->host_no,
819                                 dev->scsi_dev->channel, dev->scsi_dev->id,
820                                 dev->scsi_dev->lun, acg->acg_name,
821                                 (long long unsigned int)lun,
822                                 read_only);
823                 }
824         }
825
826         TRACE_EXIT_RES(res);
827         return res;
828
829 out_free:
830         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
831                          extra_tgt_dev_list_entry) {
832                 scst_free_tgt_dev(tgt_dev);
833         }
834         scst_free_acg_dev(acg_dev);
835         goto out;
836 }
837
838 /* The activity supposed to be suspended and scst_mutex held */
839 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
840 {
841         int res = 0;
842         struct scst_acg_dev *acg_dev = NULL, *a;
843         struct scst_tgt_dev *tgt_dev, *tt;
844
845         TRACE_ENTRY();
846
847         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
848                 if (a->dev == dev) {
849                         acg_dev = a;
850                         break;
851                 }
852         }
853
854         if (acg_dev == NULL) {
855                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
856                 res = -EINVAL;
857                 goto out;
858         }
859
860         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
861                          dev_tgt_dev_list_entry) {
862                 if (tgt_dev->acg_dev == acg_dev)
863                         scst_free_tgt_dev(tgt_dev);
864         }
865         scst_free_acg_dev(acg_dev);
866
867 out:
868         if (res == 0) {
869                 if (dev->virt_name != NULL) {
870                         PRINT_INFO("Removed device %s from group %s",
871                                 dev->virt_name, acg->acg_name);
872                 } else {
873                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
874                                 dev->scsi_dev->host->host_no,
875                                 dev->scsi_dev->channel, dev->scsi_dev->id,
876                                 dev->scsi_dev->lun, acg->acg_name);
877                 }
878         }
879
880         TRACE_EXIT_RES(res);
881         return res;
882 }
883
884 /* scst_mutex supposed to be held */
885 int scst_acg_add_name(struct scst_acg *acg, const char *name)
886 {
887         int res = 0;
888         struct scst_acn *n;
889         int len;
890         char *nm;
891
892         TRACE_ENTRY();
893
894         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
895         {
896                 if (strcmp(n->name, name) == 0) {
897                         PRINT_ERROR("Name %s already exists in group %s",
898                                 name, acg->acg_name);
899                         res = -EINVAL;
900                         goto out;
901                 }
902         }
903
904         n = kmalloc(sizeof(*n), GFP_KERNEL);
905         if (n == NULL) {
906                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
907                 res = -ENOMEM;
908                 goto out;
909         }
910
911         len = strlen(name);
912         nm = kmalloc(len + 1, GFP_KERNEL);
913         if (nm == NULL) {
914                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
915                 res = -ENOMEM;
916                 goto out_free;
917         }
918
919         strcpy(nm, name);
920         n->name = nm;
921
922         list_add_tail(&n->acn_list_entry, &acg->acn_list);
923
924 out:
925         if (res == 0)
926                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
927
928         TRACE_EXIT_RES(res);
929         return res;
930
931 out_free:
932         kfree(n);
933         goto out;
934 }
935
936 /* scst_mutex supposed to be held */
937 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
938 {
939         int res = -EINVAL;
940         struct scst_acn *n;
941
942         TRACE_ENTRY();
943
944         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
945         {
946                 if (strcmp(n->name, name) == 0) {
947                         list_del(&n->acn_list_entry);
948                         kfree(n->name);
949                         kfree(n);
950                         res = 0;
951                         break;
952                 }
953         }
954
955         if (res == 0) {
956                 PRINT_INFO("Removed name %s from group %s", name,
957                         acg->acg_name);
958         } else {
959                 PRINT_ERROR("Unable to find name %s in group %s", name,
960                         acg->acg_name);
961         }
962
963         TRACE_EXIT_RES(res);
964         return res;
965 }
966
967 static struct scst_cmd *scst_create_prepare_internal_cmd(
968         struct scst_cmd *orig_cmd, int bufsize)
969 {
970         struct scst_cmd *res;
971         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
972
973         TRACE_ENTRY();
974
975         res = scst_alloc_cmd(gfp_mask);
976         if (res == NULL)
977                 goto out;
978
979         res->cmd_lists = orig_cmd->cmd_lists;
980         res->sess = orig_cmd->sess;
981         res->atomic = scst_cmd_atomic(orig_cmd);
982         res->internal = 1;
983         res->tgtt = orig_cmd->tgtt;
984         res->tgt = orig_cmd->tgt;
985         res->dev = orig_cmd->dev;
986         res->tgt_dev = orig_cmd->tgt_dev;
987         res->lun = orig_cmd->lun;
988         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
989         res->data_direction = SCST_DATA_UNKNOWN;
990         res->orig_cmd = orig_cmd;
991         res->bufflen = bufsize;
992
993         res->state = SCST_CMD_STATE_PRE_PARSE;
994
995 out:
996         TRACE_EXIT_HRES((unsigned long)res);
997         return res;
998 }
999
1000 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1001 {
1002         TRACE_ENTRY();
1003
1004         __scst_cmd_put(cmd);
1005
1006         TRACE_EXIT();
1007         return;
1008 }
1009
1010 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1011 {
1012         int res = 0;
1013 #define sbuf_size 252
1014         static const uint8_t request_sense[6] =
1015             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1016         struct scst_cmd *rs_cmd;
1017
1018         TRACE_ENTRY();
1019
1020         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1021         if (rs_cmd == NULL)
1022                 goto out_error;
1023
1024         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1025         rs_cmd->cdb_len = sizeof(request_sense);
1026         rs_cmd->data_direction = SCST_DATA_READ;
1027         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1028         rs_cmd->expected_transfer_len = sbuf_size;
1029         rs_cmd->expected_values_set = 1;
1030
1031         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1032                 "cmd list ", rs_cmd);
1033         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1034         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1035         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1036         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1037
1038 out:
1039         TRACE_EXIT_RES(res);
1040         return res;
1041
1042 out_error:
1043         res = -1;
1044         goto out;
1045 #undef sbuf_size
1046 }
1047
1048 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1049 {
1050         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1051         uint8_t *buf;
1052         int len;
1053
1054         TRACE_ENTRY();
1055
1056         sBUG_ON(orig_cmd == NULL);
1057
1058         len = scst_get_buf_first(req_cmd, &buf);
1059
1060         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1061             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1062                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1063                         buf, len);
1064                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1065                         len);
1066         } else {
1067                 PRINT_ERROR("%s", "Unable to get the sense via "
1068                         "REQUEST SENSE, returning HARDWARE ERROR");
1069                 scst_set_cmd_error(orig_cmd,
1070                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1071         }
1072
1073         if (len > 0)
1074                 scst_put_buf(req_cmd, buf);
1075
1076         scst_free_internal_cmd(req_cmd);
1077
1078         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1079         return orig_cmd;
1080 }
1081
1082 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1083 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1084 {
1085         struct scsi_request *req;
1086
1087         TRACE_ENTRY();
1088
1089         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1090                 if (req) {
1091                         if (req->sr_bufflen)
1092                                 kfree(req->sr_buffer);
1093                         scsi_release_request(req);
1094                 }
1095         }
1096
1097         TRACE_EXIT();
1098         return;
1099 }
1100
1101 static void scst_send_release(struct scst_device *dev)
1102 {
1103         struct scsi_request *req;
1104         struct scsi_device *scsi_dev;
1105         uint8_t cdb[6];
1106
1107         TRACE_ENTRY();
1108
1109         if (dev->scsi_dev == NULL)
1110                 goto out;
1111
1112         scsi_dev = dev->scsi_dev;
1113
1114         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1115         if (req == NULL) {
1116                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1117                             "to RELEASE device %d:%d:%d:%d",
1118                             scsi_dev->host->host_no, scsi_dev->channel,
1119                             scsi_dev->id, scsi_dev->lun);
1120                 goto out;
1121         }
1122
1123         memset(cdb, 0, sizeof(cdb));
1124         cdb[0] = RELEASE;
1125         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1126             ((scsi_dev->lun << 5) & 0xe0) : 0;
1127         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1128         req->sr_cmd_len = sizeof(cdb);
1129         req->sr_data_direction = SCST_DATA_NONE;
1130         req->sr_use_sg = 0;
1131         req->sr_bufflen = 0;
1132         req->sr_buffer = NULL;
1133         req->sr_request->rq_disk = dev->rq_disk;
1134         req->sr_sense_buffer[0] = 0;
1135
1136         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1137                 "mid-level", req);
1138         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1139                     scst_req_done, 15, 3);
1140
1141 out:
1142         TRACE_EXIT();
1143         return;
1144 }
1145 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1146 static void scst_send_release(struct scst_device *dev)
1147 {
1148         struct scsi_device *scsi_dev;
1149         unsigned char cdb[6];
1150         unsigned char *sense;
1151         int rc, i;
1152
1153         TRACE_ENTRY();
1154
1155         if (dev->scsi_dev == NULL)
1156                 goto out;
1157
1158         /* We can't afford missing RELEASE due to memory shortage */
1159         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1160
1161         scsi_dev = dev->scsi_dev;
1162
1163         for (i = 0; i < 5; i++) {
1164                 memset(cdb, 0, sizeof(cdb));
1165                 cdb[0] = RELEASE;
1166                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1167                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1168
1169                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1170
1171                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1172                         "SCSI mid-level");
1173                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1174                                 sense, 15, 0, 0);
1175                 TRACE_DBG("MODE_SENSE done: %x", rc);
1176
1177                 if (scsi_status_is_good(rc)) {
1178                         break;
1179                 } else {
1180                         PRINT_ERROR("RELEASE failed: %d", rc);
1181                         PRINT_BUFFER("RELEASE sense", sense,
1182                                 SCST_SENSE_BUFFERSIZE);
1183                         scst_check_internal_sense(dev, rc,
1184                                         sense, SCST_SENSE_BUFFERSIZE);
1185                 }
1186         }
1187
1188         kfree(sense);
1189
1190 out:
1191         TRACE_EXIT();
1192         return;
1193 }
1194 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1195
1196 /* scst_mutex supposed to be held */
1197 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1198 {
1199         struct scst_device *dev = tgt_dev->dev;
1200         int release = 0;
1201
1202         TRACE_ENTRY();
1203
1204         spin_lock_bh(&dev->dev_lock);
1205         if (dev->dev_reserved &&
1206             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1207                 /* This is one who holds the reservation */
1208                 struct scst_tgt_dev *tgt_dev_tmp;
1209                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1210                                     dev_tgt_dev_list_entry) {
1211                         clear_bit(SCST_TGT_DEV_RESERVED,
1212                                     &tgt_dev_tmp->tgt_dev_flags);
1213                 }
1214                 dev->dev_reserved = 0;
1215                 release = 1;
1216         }
1217         spin_unlock_bh(&dev->dev_lock);
1218
1219         if (release)
1220                 scst_send_release(dev);
1221
1222         TRACE_EXIT();
1223         return;
1224 }
1225
1226 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1227         const char *initiator_name)
1228 {
1229         struct scst_session *sess;
1230         int i;
1231         int len;
1232         char *nm;
1233
1234         TRACE_ENTRY();
1235
1236 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1237         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1238 #else
1239         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1240 #endif
1241         if (sess == NULL) {
1242                 TRACE(TRACE_OUT_OF_MEM, "%s",
1243                       "Allocation of scst_session failed");
1244                 goto out;
1245         }
1246 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1247         memset(sess, 0, sizeof(*sess));
1248 #endif
1249
1250         sess->init_phase = SCST_SESS_IPH_INITING;
1251         sess->shut_phase = SCST_SESS_SPH_READY;
1252         atomic_set(&sess->refcnt, 0);
1253         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1254                 struct list_head *sess_tgt_dev_list_head =
1255                          &sess->sess_tgt_dev_list_hash[i];
1256                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1257         }
1258         spin_lock_init(&sess->sess_list_lock);
1259         INIT_LIST_HEAD(&sess->search_cmd_list);
1260         sess->tgt = tgt;
1261         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1262         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1263
1264 #ifdef CONFIG_SCST_MEASURE_LATENCY
1265         spin_lock_init(&sess->meas_lock);
1266 #endif
1267
1268         len = strlen(initiator_name);
1269         nm = kmalloc(len + 1, gfp_mask);
1270         if (nm == NULL) {
1271                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1272                 goto out_free;
1273         }
1274
1275         strcpy(nm, initiator_name);
1276         sess->initiator_name = nm;
1277
1278 out:
1279         TRACE_EXIT();
1280         return sess;
1281
1282 out_free:
1283         kmem_cache_free(scst_sess_cachep, sess);
1284         sess = NULL;
1285         goto out;
1286 }
1287
1288 void scst_free_session(struct scst_session *sess)
1289 {
1290         TRACE_ENTRY();
1291
1292         mutex_lock(&scst_mutex);
1293
1294         TRACE_DBG("Removing sess %p from the list", sess);
1295         list_del(&sess->sess_list_entry);
1296         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1297         list_del(&sess->acg_sess_list_entry);
1298
1299         scst_sess_free_tgt_devs(sess);
1300
1301         wake_up_all(&sess->tgt->unreg_waitQ);
1302
1303         mutex_unlock(&scst_mutex);
1304
1305         kfree(sess->initiator_name);
1306         kmem_cache_free(scst_sess_cachep, sess);
1307
1308         TRACE_EXIT();
1309         return;
1310 }
1311
1312 void scst_free_session_callback(struct scst_session *sess)
1313 {
1314         struct completion *c;
1315
1316         TRACE_ENTRY();
1317
1318         TRACE_DBG("Freeing session %p", sess);
1319
1320         c = sess->shutdown_compl;
1321
1322         if (sess->unreg_done_fn) {
1323                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1324                 sess->unreg_done_fn(sess);
1325                 TRACE_DBG("%s", "unreg_done_fn() returned");
1326         }
1327         scst_free_session(sess);
1328
1329         if (c)
1330                 complete_all(c);
1331
1332         TRACE_EXIT();
1333         return;
1334 }
1335
1336 void scst_sched_session_free(struct scst_session *sess)
1337 {
1338         unsigned long flags;
1339
1340         TRACE_ENTRY();
1341
1342         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1343                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1344                         "shut phase %lx", sess, sess->shut_phase);
1345                 sBUG();
1346         }
1347
1348         spin_lock_irqsave(&scst_mgmt_lock, flags);
1349         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1350         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1351         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1352
1353         wake_up(&scst_mgmt_waitQ);
1354
1355         TRACE_EXIT();
1356         return;
1357 }
1358
1359 void scst_cmd_get(struct scst_cmd *cmd)
1360 {
1361         __scst_cmd_get(cmd);
1362 }
1363 EXPORT_SYMBOL(scst_cmd_get);
1364
1365 void scst_cmd_put(struct scst_cmd *cmd)
1366 {
1367         __scst_cmd_put(cmd);
1368 }
1369 EXPORT_SYMBOL(scst_cmd_put);
1370
1371 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1372 {
1373         struct scst_cmd *cmd;
1374
1375         TRACE_ENTRY();
1376
1377 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1378         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1379 #else
1380         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1381 #endif
1382         if (cmd == NULL) {
1383                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1384                 goto out;
1385         }
1386 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1387         memset(cmd, 0, sizeof(*cmd));
1388 #endif
1389
1390         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1391         cmd->start_time = jiffies;
1392         atomic_set(&cmd->cmd_ref, 1);
1393         cmd->cmd_lists = &scst_main_cmd_lists;
1394         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1395         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1396         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1397         cmd->retries = 0;
1398         cmd->data_len = -1;
1399         cmd->is_send_status = 1;
1400         cmd->resp_data_len = -1;
1401
1402         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1403         cmd->dbl_ua_orig_resp_data_len = -1;
1404
1405 out:
1406         TRACE_EXIT();
1407         return cmd;
1408 }
1409
1410 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1411 {
1412         scst_sess_put(cmd->sess);
1413
1414         /*
1415          * At this point tgt_dev can be dead, but the pointer remains non-NULL
1416          */
1417         if (likely(cmd->tgt_dev != NULL))
1418                 __scst_put();
1419
1420         scst_destroy_cmd(cmd);
1421         return;
1422 }
1423
1424 /* No locks supposed to be held */
1425 void scst_free_cmd(struct scst_cmd *cmd)
1426 {
1427         int destroy = 1;
1428
1429         TRACE_ENTRY();
1430
1431         TRACE_DBG("Freeing cmd %p (tag %llu)",
1432                   cmd, (long long unsigned int)cmd->tag);
1433
1434         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1435                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1436                         cmd, atomic_read(&scst_cmd_count));
1437         }
1438
1439         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1440                 cmd->dec_on_dev_needed);
1441
1442 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1443 #if defined(CONFIG_SCST_EXTRACHECKS)
1444         if (cmd->scsi_req) {
1445                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1446                         "scsi_req!");
1447                 scst_release_request(cmd);
1448         }
1449 #endif
1450 #endif
1451
1452         /*
1453          * Target driver can already free sg buffer before calling
1454          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
1455          */
1456         if (!cmd->tgt_data_buf_alloced)
1457                 scst_check_restore_sg_buff(cmd);
1458
1459         if (unlikely(cmd->internal)) {
1460                 if (cmd->bufflen > 0)
1461                         scst_release_space(cmd);
1462                 scst_destroy_cmd(cmd);
1463                 goto out;
1464         }
1465
1466         if (cmd->tgtt->on_free_cmd != NULL) {
1467                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1468                 cmd->tgtt->on_free_cmd(cmd);
1469                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1470         }
1471
1472         if (likely(cmd->dev != NULL)) {
1473                 struct scst_dev_type *handler = cmd->dev->handler;
1474                 if (handler->on_free_cmd != NULL) {
1475                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1476                               handler->name, cmd);
1477                         handler->on_free_cmd(cmd);
1478                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1479                                 handler->name);
1480                 }
1481         }
1482
1483         scst_release_space(cmd);
1484
1485         if (unlikely(cmd->sense != NULL)) {
1486                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1487                 mempool_free(cmd->sense, scst_sense_mempool);
1488                 cmd->sense = NULL;
1489         }
1490
1491         if (likely(cmd->tgt_dev != NULL)) {
1492 #ifdef CONFIG_SCST_EXTRACHECKS
1493                 if (unlikely(!cmd->sent_for_exec)) {
1494                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1495                             "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1496                             cmd, cmd->cdb[0], cmd->tgtt->name,
1497                             (long long unsigned int)cmd->lun,
1498                             cmd->sn, cmd->tgt_dev->expected_sn);
1499                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1500                 }
1501 #endif
1502
1503                 if (unlikely(cmd->out_of_sn)) {
1504                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1505                                 "destroy=%d", cmd,
1506                                 (long long unsigned int)cmd->tag,
1507                                 cmd->sn, destroy);
1508                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1509                                         &cmd->cmd_flags);
1510                 }
1511         }
1512
1513         if (likely(destroy))
1514                 scst_destroy_put_cmd(cmd);
1515
1516 out:
1517         TRACE_EXIT();
1518         return;
1519 }
1520
1521 /* No locks supposed to be held. */
1522 void scst_check_retries(struct scst_tgt *tgt)
1523 {
1524         int need_wake_up = 0;
1525
1526         TRACE_ENTRY();
1527
1528         /*
1529          * We don't worry about overflow of finished_cmds, because we check
1530          * only for its change.
1531          */
1532         atomic_inc(&tgt->finished_cmds);
1533         /* See comment in scst_queue_retry_cmd() */
1534         smp_mb__after_atomic_inc();
1535         if (unlikely(tgt->retry_cmds > 0)) {
1536                 struct scst_cmd *c, *tc;
1537                 unsigned long flags;
1538
1539                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1540                       tgt->retry_cmds);
1541
1542                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1543                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1544                                 cmd_list_entry) {
1545                         tgt->retry_cmds--;
1546
1547                         TRACE_RETRY("Moving retry cmd %p to head of active "
1548                                 "cmd list (retry_cmds left %d)",
1549                                 c, tgt->retry_cmds);
1550                         spin_lock(&c->cmd_lists->cmd_list_lock);
1551                         list_move(&c->cmd_list_entry,
1552                                   &c->cmd_lists->active_cmd_list);
1553                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1554                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1555
1556                         need_wake_up++;
1557                         if (need_wake_up >= 2) /* "slow start" */
1558                                 break;
1559                 }
1560                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1561         }
1562
1563         TRACE_EXIT();
1564         return;
1565 }
1566
1567 void scst_tgt_retry_timer_fn(unsigned long arg)
1568 {
1569         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1570         unsigned long flags;
1571
1572         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1573
1574         spin_lock_irqsave(&tgt->tgt_lock, flags);
1575         tgt->retry_timer_active = 0;
1576         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1577
1578         scst_check_retries(tgt);
1579
1580         TRACE_EXIT();
1581         return;
1582 }
1583
1584 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
1585 {
1586         struct scst_mgmt_cmd *mcmd;
1587
1588         TRACE_ENTRY();
1589
1590         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1591         if (mcmd == NULL) {
1592                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1593                         "failed, some commands and their data could leak");
1594                 goto out;
1595         }
1596         memset(mcmd, 0, sizeof(*mcmd));
1597
1598 out:
1599         TRACE_EXIT();
1600         return mcmd;
1601 }
1602
1603 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1604 {
1605         unsigned long flags;
1606
1607         TRACE_ENTRY();
1608
1609         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1610         atomic_dec(&mcmd->sess->sess_cmd_count);
1611         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1612
1613         scst_sess_put(mcmd->sess);
1614
1615         if (mcmd->mcmd_tgt_dev != NULL)
1616                 __scst_put();
1617
1618         mempool_free(mcmd, scst_mgmt_mempool);
1619
1620         TRACE_EXIT();
1621         return;
1622 }
1623
1624 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1625 int scst_alloc_request(struct scst_cmd *cmd)
1626 {
1627         int res = 0;
1628         struct scsi_request *req;
1629         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1630
1631         TRACE_ENTRY();
1632
1633         /* cmd->dev->scsi_dev must be non-NULL here */
1634         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1635         if (req == NULL) {
1636                 TRACE(TRACE_OUT_OF_MEM, "%s",
1637                       "Allocation of scsi_request failed");
1638                 res = -ENOMEM;
1639                 goto out;
1640         }
1641
1642         cmd->scsi_req = req;
1643
1644         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1645         req->sr_cmd_len = cmd->cdb_len;
1646         req->sr_data_direction = cmd->data_direction;
1647         req->sr_use_sg = cmd->sg_cnt;
1648         req->sr_bufflen = cmd->bufflen;
1649         req->sr_buffer = cmd->sg;
1650         req->sr_request->rq_disk = cmd->dev->rq_disk;
1651         req->sr_sense_buffer[0] = 0;
1652
1653         cmd->scsi_req->upper_private_data = cmd;
1654
1655 out:
1656         TRACE_EXIT();
1657         return res;
1658 }
1659
1660 void scst_release_request(struct scst_cmd *cmd)
1661 {
1662         scsi_release_request(cmd->scsi_req);
1663         cmd->scsi_req = NULL;
1664 }
1665 #endif
1666
1667 int scst_alloc_space(struct scst_cmd *cmd)
1668 {
1669         gfp_t gfp_mask;
1670         int res = -ENOMEM;
1671         int atomic = scst_cmd_atomic(cmd);
1672         int flags;
1673         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1674
1675         TRACE_ENTRY();
1676
1677         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1678
1679         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1680         if (cmd->no_sgv)
1681                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1682
1683         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1684                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1685         if (cmd->sg == NULL)
1686                 goto out;
1687
1688         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1689                 static int ll;
1690                 if (ll < 10) {
1691                         PRINT_INFO("Unable to complete command due to "
1692                                 "SG IO count limitation (requested %d, "
1693                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1694                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1695                         ll++;
1696                 }
1697                 goto out_sg_free;
1698         }
1699
1700         res = 0;
1701
1702 out:
1703         TRACE_EXIT();
1704         return res;
1705
1706 out_sg_free:
1707         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1708         cmd->sgv = NULL;
1709         cmd->sg = NULL;
1710         cmd->sg_cnt = 0;
1711         goto out;
1712 }
1713
1714 static void scst_release_space(struct scst_cmd *cmd)
1715 {
1716         TRACE_ENTRY();
1717
1718         if (cmd->sgv == NULL)
1719                 goto out;
1720
1721         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
1722                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
1723                 goto out;
1724         }
1725
1726         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1727
1728         cmd->sgv = NULL;
1729         cmd->sg_cnt = 0;
1730         cmd->sg = NULL;
1731         cmd->bufflen = 0;
1732         cmd->data_len = 0;
1733
1734 out:
1735         TRACE_EXIT();
1736         return;
1737 }
1738
1739 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
1740 {
1741         struct scatterlist *src_sg, *dst_sg;
1742         unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
1743         struct page *src, *dst;
1744         unsigned int s, d, to_copy;
1745
1746         TRACE_ENTRY();
1747
1748         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
1749                 src_sg = cmd->tgt_sg;
1750                 src_sg_cnt = cmd->tgt_sg_cnt;
1751                 dst_sg = cmd->sg;
1752                 to_copy = cmd->bufflen;
1753         } else {
1754                 src_sg = cmd->sg;
1755                 src_sg_cnt = cmd->sg_cnt;
1756                 dst_sg = cmd->tgt_sg;
1757                 to_copy = cmd->resp_data_len;
1758         }
1759
1760         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
1761                 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
1762                 to_copy);
1763
1764         dst = sg_page(dst_sg);
1765         dst_len = dst_sg->length;
1766         dst_offs = dst_sg->offset;
1767
1768         s = 0;
1769         d = 0;
1770         src_offs = 0;
1771         while (s < src_sg_cnt) {
1772                 src = sg_page(&src_sg[s]);
1773                 src_len = src_sg[s].length;
1774                 src_offs += src_sg[s].offset;
1775
1776                 do {
1777                         unsigned int n;
1778
1779                         /*
1780                          * Himem pages are not allowed here, see the
1781                          * corresponding #warning in scst_main.c. Correct
1782                          * your target driver or dev handler to not alloc
1783                          * such pages!
1784                          */
1785                         EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
1786                                            PageHighMem(src));
1787
1788                         TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
1789                                 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
1790                                 cmd, to_copy, src, src_len, src_offs, dst,
1791                                 dst_len, dst_offs);
1792
1793                         if ((src_offs == 0) && (dst_offs == 0) &&
1794                             (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
1795                                 copy_page(page_address(dst), page_address(src));
1796                                 n = PAGE_SIZE;
1797                         } else {
1798                                 n = min(PAGE_SIZE - dst_offs,
1799                                         PAGE_SIZE - src_offs);
1800                                 n = min(n, src_len);
1801                                 n = min(n, dst_len);
1802                                 memcpy(page_address(dst) + dst_offs,
1803                                        page_address(src) + src_offs, n);
1804                                 dst_offs -= min(n, dst_offs);
1805                                 src_offs -= min(n, src_offs);
1806                         }
1807
1808                         TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
1809
1810                         to_copy -= n;
1811                         if (to_copy <= 0)
1812                                 goto out;
1813
1814                         src_len -= n;
1815                         dst_len -= n;
1816                         if (dst_len == 0) {
1817                                 d++;
1818                                 dst = sg_page(&dst_sg[d]);
1819                                 dst_len = dst_sg[d].length;
1820                                 dst_offs += dst_sg[d].offset;
1821                         }
1822                 } while (src_len > 0);
1823
1824                 s++;
1825         }
1826
1827 out:
1828         TRACE_EXIT();
1829         return;
1830 }
1831
1832 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1833
1834 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1835 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1836
1837 int scst_get_cdb_len(const uint8_t *cdb)
1838 {
1839         return SCST_GET_CDB_LEN(cdb[0]);
1840 }
1841
1842 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1843
1844 /* for special commands */
1845 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1846 {
1847         cmd->bufflen = 6;
1848         return 0;
1849 }
1850
1851 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1852 {
1853         cmd->bufflen = READ_CAP_LEN;
1854         return 0;
1855 }
1856
1857 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1858 {
1859         cmd->bufflen = 1;
1860         return 0;
1861 }
1862
1863 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1864 {
1865         uint8_t *p = (uint8_t *)cmd->cdb + off;
1866         int res = 0;
1867
1868         cmd->bufflen = 0;
1869         cmd->bufflen |= ((u32)p[0]) << 8;
1870         cmd->bufflen |= ((u32)p[1]);
1871
1872         switch (cmd->cdb[1] & 0x1f) {
1873         case 0:
1874         case 1:
1875         case 6:
1876                 if (cmd->bufflen != 0) {
1877                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1878                                 "allocation length for service action %x",
1879                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1880                         goto out_inval;
1881                 }
1882                 break;
1883         }
1884
1885         switch (cmd->cdb[1] & 0x1f) {
1886         case 0:
1887         case 1:
1888                 cmd->bufflen = 20;
1889                 break;
1890         case 6:
1891                 cmd->bufflen = 32;
1892                 break;
1893         case 8:
1894                 cmd->bufflen = max(28, cmd->bufflen);
1895                 break;
1896         default:
1897                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1898                         cmd->cdb[1] & 0x1f);
1899                 goto out_inval;
1900         }
1901
1902 out:
1903         return res;
1904
1905 out_inval:
1906         scst_set_cmd_error(cmd,
1907                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1908         res = 1;
1909         goto out;
1910 }
1911
1912 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1913 {
1914         cmd->bufflen = (u32)cmd->cdb[off];
1915         return 0;
1916 }
1917
1918 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1919 {
1920         cmd->bufflen = (u32)cmd->cdb[off];
1921         if (cmd->bufflen == 0)
1922                 cmd->bufflen = 256;
1923         return 0;
1924 }
1925
1926 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1927 {
1928         const uint8_t *p = cmd->cdb + off;
1929
1930         cmd->bufflen = 0;
1931         cmd->bufflen |= ((u32)p[0]) << 8;
1932         cmd->bufflen |= ((u32)p[1]);
1933
1934         return 0;
1935 }
1936
1937 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1938 {
1939         const uint8_t *p = cmd->cdb + off;
1940
1941         cmd->bufflen = 0;
1942         cmd->bufflen |= ((u32)p[0]) << 16;
1943         cmd->bufflen |= ((u32)p[1]) << 8;
1944         cmd->bufflen |= ((u32)p[2]);
1945
1946         return 0;
1947 }
1948
1949 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1950 {
1951         const uint8_t *p = cmd->cdb + off;
1952
1953         cmd->bufflen = 0;
1954         cmd->bufflen |= ((u32)p[0]) << 24;
1955         cmd->bufflen |= ((u32)p[1]) << 16;
1956         cmd->bufflen |= ((u32)p[2]) << 8;
1957         cmd->bufflen |= ((u32)p[3]);
1958
1959         return 0;
1960 }
1961
1962 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1963 {
1964         cmd->bufflen = 0;
1965         return 0;
1966 }
1967
1968 int scst_get_cdb_info(struct scst_cmd *cmd)
1969 {
1970         int dev_type = cmd->dev->handler->type;
1971         int i, res = 0;
1972         uint8_t op;
1973         const struct scst_sdbops *ptr = NULL;
1974
1975         TRACE_ENTRY();
1976
1977         op = cmd->cdb[0];       /* get clear opcode */
1978
1979         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1980                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1981                 dev_type);
1982
1983         i = scst_scsi_op_list[op];
1984         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1985                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1986                         ptr = &scst_scsi_op_table[i];
1987                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1988                               ptr->ops, ptr->devkey[0], /* disk     */
1989                               ptr->devkey[1],   /* tape     */
1990                               ptr->devkey[2],   /* printer */
1991                               ptr->devkey[3],   /* cpu      */
1992                               ptr->devkey[4],   /* cdr      */
1993                               ptr->devkey[5],   /* cdrom    */
1994                               ptr->devkey[6],   /* scanner */
1995                               ptr->devkey[7],   /* worm     */
1996                               ptr->devkey[8],   /* changer */
1997                               ptr->devkey[9],   /* commdev */
1998                               ptr->op_name);
1999                         TRACE_DBG("direction=%d flags=%d off=%d",
2000                               ptr->direction,
2001                               ptr->flags,
2002                               ptr->off);
2003                         break;
2004                 }
2005                 i++;
2006         }
2007
2008         if (ptr == NULL) {
2009                 /* opcode not found or now not used !!! */
2010                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
2011                       dev_type);
2012                 res = -1;
2013                 cmd->op_flags = SCST_INFO_INVALID;
2014                 goto out;
2015         }
2016
2017         cmd->cdb_len = SCST_GET_CDB_LEN(op);
2018         cmd->op_name = ptr->op_name;
2019         cmd->data_direction = ptr->direction;
2020         cmd->op_flags = ptr->flags;
2021         res = (*ptr->get_trans_len)(cmd, ptr->off);
2022
2023 out:
2024         TRACE_EXIT();
2025         return res;
2026 }
2027 EXPORT_SYMBOL(scst_get_cdb_info);
2028
2029 /*
2030  * Routine to extract a lun number from an 8-byte LUN structure
2031  * in network byte order (BE).
2032  * (see SAM-2, Section 4.12.3 page 40)
2033  * Supports 2 types of lun unpacking: peripheral and logical unit.
2034  */
2035 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2036 {
2037         uint64_t res = NO_SUCH_LUN;
2038         int address_method;
2039
2040         TRACE_ENTRY();
2041
2042         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2043
2044         if (unlikely(len < 2)) {
2045                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2046                         "more", len);
2047                 goto out;
2048         }
2049
2050         if (len > 2) {
2051                 switch (len) {
2052                 case 8:
2053                         if ((*((uint64_t *)lun) &
2054                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2055                                 goto out_err;
2056                         break;
2057                 case 4:
2058                         if (*((uint16_t *)&lun[2]) != 0)
2059                                 goto out_err;
2060                         break;
2061                 case 6:
2062                         if (*((uint32_t *)&lun[2]) != 0)
2063                                 goto out_err;
2064                         break;
2065                 default:
2066                         goto out_err;
2067                 }
2068         }
2069
2070         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
2071         switch (address_method) {
2072         case 0: /* peripheral device addressing method */
2073 #if 0
2074                 if (*lun) {
2075                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2076                              "peripheral device addressing method 0x%02x, "
2077                              "expected 0", *lun);
2078                         break;
2079                 }
2080                 res = *(lun + 1);
2081                 break;
2082 #else
2083                 /*
2084                  * Looks like it's legal to use it as flat space addressing
2085                  * method as well
2086                  */
2087
2088                 /* go through */
2089 #endif
2090
2091         case 1: /* flat space addressing method */
2092                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2093                 break;
2094
2095         case 2: /* logical unit addressing method */
2096                 if (*lun & 0x3f) {
2097                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2098                                     "addressing method 0x%02x, expected 0",
2099                                     *lun & 0x3f);
2100                         break;
2101                 }
2102                 if (*(lun + 1) & 0xe0) {
2103                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
2104                                     "addressing method 0x%02x, expected 0",
2105                                     (*(lun + 1) & 0xf8) >> 5);
2106                         break;
2107                 }
2108                 res = *(lun + 1) & 0x1f;
2109                 break;
2110
2111         case 3: /* extended logical unit addressing method */
2112         default:
2113                 PRINT_ERROR("Unimplemented LUN addressing method %u",
2114                             address_method);
2115                 break;
2116         }
2117
2118 out:
2119         TRACE_EXIT_RES((int)res);
2120         return res;
2121
2122 out_err:
2123         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2124         goto out;
2125 }
2126
2127 int scst_calc_block_shift(int sector_size)
2128 {
2129         int block_shift = 0;
2130         int t;
2131
2132         if (sector_size == 0)
2133                 sector_size = 512;
2134
2135         t = sector_size;
2136         while (1) {
2137                 if ((t & 1) != 0)
2138                         break;
2139                 t >>= 1;
2140                 block_shift++;
2141         }
2142         if (block_shift < 9) {
2143                 PRINT_ERROR("Wrong sector size %d", sector_size);
2144                 block_shift = -1;
2145         }
2146
2147         TRACE_EXIT_RES(block_shift);
2148         return block_shift;
2149 }
2150 EXPORT_SYMBOL(scst_calc_block_shift);
2151
2152 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2153         int (*get_block_shift)(struct scst_cmd *cmd))
2154 {
2155         int res = 0;
2156
2157         TRACE_ENTRY();
2158
2159         /*
2160          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2161          * therefore change them only if necessary
2162          */
2163
2164         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2165               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2166
2167         switch (cmd->cdb[0]) {
2168         case SERVICE_ACTION_IN:
2169                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2170                         cmd->bufflen = READ_CAP16_LEN;
2171                         cmd->data_direction = SCST_DATA_READ;
2172                 }
2173                 break;
2174         case VERIFY_6:
2175         case VERIFY:
2176         case VERIFY_12:
2177         case VERIFY_16:
2178                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2179                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2180                         cmd->bufflen = 0;
2181                         goto set_timeout;
2182                 } else
2183                         cmd->data_len = 0;
2184                 break;
2185         default:
2186                 /* It's all good */
2187                 break;
2188         }
2189
2190         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2191                 /*
2192                  * No need for locks here, since *_detach() can not be
2193                  * called, when there are existing commands.
2194                  */
2195                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2196         }
2197
2198 set_timeout:
2199         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2200                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2201         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2202                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2203         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2204                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2205
2206         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2207               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2208
2209         TRACE_EXIT_RES(res);
2210         return res;
2211 }
2212 EXPORT_SYMBOL(scst_sbc_generic_parse);
2213
2214 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2215         int (*get_block_shift)(struct scst_cmd *cmd))
2216 {
2217         int res = 0;
2218
2219         TRACE_ENTRY();
2220
2221         /*
2222          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2223          * therefore change them only if necessary
2224          */
2225
2226         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2227               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2228
2229         cmd->cdb[1] &= 0x1f;
2230
2231         switch (cmd->cdb[0]) {
2232         case VERIFY_6:
2233         case VERIFY:
2234         case VERIFY_12:
2235         case VERIFY_16:
2236                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2237                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2238                         cmd->bufflen = 0;
2239                         goto set_timeout;
2240                 }
2241                 break;
2242         default:
2243                 /* It's all good */
2244                 break;
2245         }
2246
2247         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2248                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2249
2250 set_timeout:
2251         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2252                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2253         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2254                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2255         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2256                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2257
2258         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2259                 cmd->data_direction);
2260
2261         TRACE_EXIT();
2262         return res;
2263 }
2264 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2265
2266 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2267         int (*get_block_shift)(struct scst_cmd *cmd))
2268 {
2269         int res = 0;
2270
2271         TRACE_ENTRY();
2272
2273         /*
2274          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2275          * therefore change them only if necessary
2276          */
2277
2278         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2279               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2280
2281         cmd->cdb[1] &= 0x1f;
2282
2283         switch (cmd->cdb[0]) {
2284         case VERIFY_6:
2285         case VERIFY:
2286         case VERIFY_12:
2287         case VERIFY_16:
2288                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2289                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2290                         cmd->bufflen = 0;
2291                         goto set_timeout;
2292                 }
2293                 break;
2294         default:
2295                 /* It's all good */
2296                 break;
2297         }
2298
2299         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2300                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2301
2302 set_timeout:
2303         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2304                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2305         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2306                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2307         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2308                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2309
2310         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2311                 cmd->data_direction);
2312
2313         TRACE_EXIT_RES(res);
2314         return res;
2315 }
2316 EXPORT_SYMBOL(scst_modisk_generic_parse);
2317
2318 int scst_tape_generic_parse(struct scst_cmd *cmd,
2319         int (*get_block_size)(struct scst_cmd *cmd))
2320 {
2321         int res = 0;
2322
2323         TRACE_ENTRY();
2324
2325         /*
2326          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2327          * therefore change them only if necessary
2328          */
2329
2330         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2331               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2332
2333         if (cmd->cdb[0] == READ_POSITION) {
2334                 int tclp = cmd->cdb[1] & 4;
2335                 int long_bit = cmd->cdb[1] & 2;
2336                 int bt = cmd->cdb[1] & 1;
2337
2338                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2339                         cmd->bufflen =
2340                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2341                         cmd->data_direction = SCST_DATA_READ;
2342                 } else {
2343                         cmd->bufflen = 0;
2344                         cmd->data_direction = SCST_DATA_NONE;
2345                 }
2346         }
2347
2348         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2349                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2350
2351         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2352                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2353         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2354                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2355         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2356                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2357
2358         TRACE_EXIT_RES(res);
2359         return res;
2360 }
2361 EXPORT_SYMBOL(scst_tape_generic_parse);
2362
2363 static int scst_null_parse(struct scst_cmd *cmd)
2364 {
2365         int res = 0;
2366
2367         TRACE_ENTRY();
2368
2369         /*
2370          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2371          * therefore change them only if necessary
2372          */
2373
2374         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2375               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2376 #if 0
2377         switch (cmd->cdb[0]) {
2378         default:
2379                 /* It's all good */
2380                 break;
2381         }
2382 #endif
2383         TRACE_DBG("res %d bufflen %d direct %d",
2384               res, cmd->bufflen, cmd->data_direction);
2385
2386         TRACE_EXIT();
2387         return res;
2388 }
2389
2390 int scst_changer_generic_parse(struct scst_cmd *cmd,
2391         int (*nothing)(struct scst_cmd *cmd))
2392 {
2393         int res = scst_null_parse(cmd);
2394
2395         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2396                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2397         else
2398                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2399
2400         return res;
2401 }
2402 EXPORT_SYMBOL(scst_changer_generic_parse);
2403
2404 int scst_processor_generic_parse(struct scst_cmd *cmd,
2405         int (*nothing)(struct scst_cmd *cmd))
2406 {
2407         int res = scst_null_parse(cmd);
2408
2409         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2410                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2411         else
2412                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2413
2414         return res;
2415 }
2416 EXPORT_SYMBOL(scst_processor_generic_parse);
2417
2418 int scst_raid_generic_parse(struct scst_cmd *cmd,
2419         int (*nothing)(struct scst_cmd *cmd))
2420 {
2421         int res = scst_null_parse(cmd);
2422
2423         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2424                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2425         else
2426                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2427
2428         return res;
2429 }
2430 EXPORT_SYMBOL(scst_raid_generic_parse);
2431
2432 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2433         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2434 {
2435         int opcode = cmd->cdb[0];
2436         int status = cmd->status;
2437         int res = SCST_CMD_STATE_DEFAULT;
2438
2439         TRACE_ENTRY();
2440
2441         /*
2442          * SCST sets good defaults for cmd->is_send_status and
2443          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2444          * therefore change them only if necessary
2445          */
2446
2447         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2448                 switch (opcode) {
2449                 case READ_CAPACITY:
2450                 {
2451                         /* Always keep track of disk capacity */
2452                         int buffer_size, sector_size, sh;
2453                         uint8_t *buffer;
2454
2455                         buffer_size = scst_get_buf_first(cmd, &buffer);
2456                         if (unlikely(buffer_size <= 0)) {
2457                                 if (buffer_size < 0) {
2458                                         PRINT_ERROR("%s: Unable to get the"
2459                                         " buffer (%d)", __func__, buffer_size);
2460                                 }
2461                                 goto out;
2462                         }
2463
2464                         sector_size =
2465                             ((buffer[4] << 24) | (buffer[5] << 16) |
2466                              (buffer[6] << 8) | (buffer[7] << 0));
2467                         scst_put_buf(cmd, buffer);
2468                         if (sector_size != 0)
2469                                 sh = scst_calc_block_shift(sector_size);
2470                         else
2471                                 sh = 0;
2472                         set_block_shift(cmd, sh);
2473                         TRACE_DBG("block_shift %d", sh);
2474                         break;
2475                 }
2476                 default:
2477                         /* It's all good */
2478                         break;
2479                 }
2480         }
2481
2482         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2483               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2484
2485 out:
2486         TRACE_EXIT_RES(res);
2487         return res;
2488 }
2489 EXPORT_SYMBOL(scst_block_generic_dev_done);
2490
2491 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2492         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2493 {
2494         int opcode = cmd->cdb[0];
2495         int res = SCST_CMD_STATE_DEFAULT;
2496         int buffer_size, bs;
2497         uint8_t *buffer = NULL;
2498
2499         TRACE_ENTRY();
2500
2501         /*
2502          * SCST sets good defaults for cmd->is_send_status and
2503          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2504          * therefore change them only if necessary
2505          */
2506
2507         switch (opcode) {
2508         case MODE_SENSE:
2509         case MODE_SELECT:
2510                 buffer_size = scst_get_buf_first(cmd, &buffer);
2511                 if (unlikely(buffer_size <= 0)) {
2512                         if (buffer_size < 0) {
2513                                 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2514                                         __func__, buffer_size);
2515                         }
2516                         goto out;
2517                 }
2518                 break;
2519         }
2520
2521         switch (opcode) {
2522         case MODE_SENSE:
2523                 TRACE_DBG("%s", "MODE_SENSE");
2524                 if ((cmd->cdb[2] & 0xC0) == 0) {
2525                         if (buffer[3] == 8) {
2526                                 bs = (buffer[9] << 16) |
2527                                     (buffer[10] << 8) | buffer[11];
2528                                 set_block_size(cmd, bs);
2529                         }
2530                 }
2531                 break;
2532         case MODE_SELECT:
2533                 TRACE_DBG("%s", "MODE_SELECT");
2534                 if (buffer[3] == 8) {
2535                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2536                             (buffer[11]);
2537                         set_block_size(cmd, bs);
2538                 }
2539                 break;
2540         default:
2541                 /* It's all good */
2542                 break;
2543         }
2544
2545         switch (opcode) {
2546         case MODE_SENSE:
2547         case MODE_SELECT:
2548                 scst_put_buf(cmd, buffer);
2549                 break;
2550         }
2551
2552 out:
2553         TRACE_EXIT_RES(res);
2554         return res;
2555 }
2556 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2557
2558 static void scst_check_internal_sense(struct scst_device *dev, int result,
2559         uint8_t *sense, int sense_len)
2560 {
2561         TRACE_ENTRY();
2562
2563         if (host_byte(result) == DID_RESET) {
2564                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2565                         "reset UA");
2566                 scst_set_sense(sense, sense_len,
2567                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2568                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2569         } else if ((status_byte(result) == CHECK_CONDITION) &&
2570                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2571                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2572
2573         TRACE_EXIT();
2574         return;
2575 }
2576
2577 int scst_obtain_device_parameters(struct scst_device *dev)
2578 {
2579         int res = 0, i;
2580         uint8_t cmd[16];
2581         uint8_t buffer[4+0x0A];
2582         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2583
2584         TRACE_ENTRY();
2585
2586         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2587
2588         for (i = 0; i < 5; i++) {
2589                 /* Get control mode page */
2590                 memset(cmd, 0, sizeof(cmd));
2591                 cmd[0] = MODE_SENSE;
2592                 cmd[1] = 8; /* DBD */
2593                 cmd[2] = 0x0A;
2594                 cmd[4] = sizeof(buffer);
2595
2596                 memset(buffer, 0, sizeof(buffer));
2597                 memset(sense_buffer, 0, sizeof(sense_buffer));
2598
2599                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2600                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2601                                 sizeof(buffer), sense_buffer, 15, 0, 0);
2602
2603                 TRACE_DBG("MODE_SENSE done: %x", res);
2604
2605                 if (scsi_status_is_good(res)) {
2606                         int q;
2607
2608                         PRINT_BUFF_FLAG(TRACE_SCSI,
2609                                 "Returned control mode page data",
2610                                 buffer, sizeof(buffer));
2611
2612                         dev->tst = buffer[4+2] >> 5;
2613                         q = buffer[4+3] >> 4;
2614                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2615                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2616                                         "%d:%d:%d:%d", dev->queue_alg,
2617                                         dev->scsi_dev->host->host_no,
2618                                         dev->scsi_dev->channel,
2619                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2620                         }
2621                         dev->queue_alg = q;
2622                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2623                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2624
2625                         /*
2626                          * Unfortunately, SCSI ML doesn't provide a way to
2627                          * specify commands task attribute, so we can rely on
2628                          * device's restricted reordering only.
2629                          */
2630                         dev->has_own_order_mgmt = !dev->queue_alg;
2631
2632                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2633                                 "Device %d:%d:%d:%d: TST %x, "
2634                                 "QUEUE ALG %x, SWP %x, TAS %x, "
2635                                 "has_own_order_mgmt %d",
2636                                 dev->scsi_dev->host->host_no,
2637                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2638                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2639                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2640
2641                         goto out;
2642                 } else {
2643 #if 0
2644                         if ((status_byte(res) == CHECK_CONDITION) &&
2645 #else
2646                         /*
2647                          * 3ware controller is buggy and returns CONDITION_GOOD
2648                          * instead of CHECK_CONDITION
2649                          */
2650                         if (
2651 #endif
2652                             SCST_SENSE_VALID(sense_buffer)) {
2653                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2654                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2655                                                 "Device %d:%d:%d:%d doesn't"
2656                                                 " support control mode page,"
2657                                                 " using defaults: TST %x,"
2658                                                 " QUEUE ALG %x, SWP %x, TAS %x,"
2659                                                 " has_own_order_mgmt %d",
2660                                                 dev->scsi_dev->host->host_no,
2661                                                 dev->scsi_dev->channel,
2662                                                 dev->scsi_dev->id,
2663                                                 dev->scsi_dev->lun,
2664                                                 dev->tst,
2665                                                 dev->queue_alg,
2666                                                 dev->swp,
2667                                                 dev->tas,
2668                                                 dev->has_own_order_mgmt);
2669                                         res = 0;
2670                                         goto out;
2671                                 } else if (sense_buffer[2] == NOT_READY) {
2672                                         TRACE(TRACE_SCSI,
2673                                                 "Device %d:%d:%d:%d not ready",
2674                                                 dev->scsi_dev->host->host_no,
2675                                                 dev->scsi_dev->channel,
2676                                                 dev->scsi_dev->id,
2677                                                 dev->scsi_dev->lun);
2678                                         res = 0;
2679                                         goto out;
2680                                 }
2681                         } else {
2682                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2683                                         "Internal MODE SENSE to "
2684                                         "device %d:%d:%d:%d failed: %x",
2685                                         dev->scsi_dev->host->host_no,
2686                                         dev->scsi_dev->channel,
2687                                         dev->scsi_dev->id,
2688                                         dev->scsi_dev->lun, res);
2689                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
2690                                         "MODE SENSE sense",
2691                                         sense_buffer, sizeof(sense_buffer));
2692                         }
2693                         scst_check_internal_sense(dev, res, sense_buffer,
2694                                         sizeof(sense_buffer));
2695                 }
2696         }
2697         res = -ENODEV;
2698
2699 out:
2700         TRACE_EXIT_RES(res);
2701         return res;
2702 }
2703 EXPORT_SYMBOL(scst_obtain_device_parameters);
2704
2705 /* Called under dev_lock and BH off */
2706 void scst_process_reset(struct scst_device *dev,
2707         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2708         struct scst_mgmt_cmd *mcmd, bool setUA)
2709 {
2710         struct scst_tgt_dev *tgt_dev;
2711         struct scst_cmd *cmd, *tcmd;
2712
2713         TRACE_ENTRY();
2714
2715         /* Clear RESERVE'ation, if necessary */
2716         if (dev->dev_reserved) {
2717                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2718                                     dev_tgt_dev_list_entry) {
2719                         TRACE(TRACE_MGMT_MINOR, "Clearing RESERVE'ation for "
2720                                 "tgt_dev lun %lld",
2721                                 (long long unsigned int)tgt_dev->lun);
2722                         clear_bit(SCST_TGT_DEV_RESERVED,
2723                                   &tgt_dev->tgt_dev_flags);
2724                 }
2725                 dev->dev_reserved = 0;
2726                 /*
2727                  * There is no need to send RELEASE, since the device is going
2728                  * to be resetted. Actually, since we can be in RESET TM
2729                  * function, it might be dangerous.
2730                  */
2731         }
2732
2733         dev->dev_double_ua_possible = 1;
2734
2735         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2736                 dev_tgt_dev_list_entry) {
2737                 struct scst_session *sess = tgt_dev->sess;
2738
2739                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2740                 scst_free_all_UA(tgt_dev);
2741                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2742
2743                 spin_lock_irq(&sess->sess_list_lock);
2744
2745                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2746                 list_for_each_entry(cmd, &sess->search_cmd_list,
2747                                 search_cmd_list_entry) {
2748                         if (cmd == exclude_cmd)
2749                                 continue;
2750                         if ((cmd->tgt_dev == tgt_dev) ||
2751                             ((cmd->tgt_dev == NULL) &&
2752                              (cmd->lun == tgt_dev->lun))) {
2753                                 scst_abort_cmd(cmd, mcmd,
2754                                         (tgt_dev->sess != originator), 0);
2755                         }
2756                 }
2757                 spin_unlock_irq(&sess->sess_list_lock);
2758         }
2759
2760         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2761                                 blocked_cmd_list_entry) {
2762                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2763                         list_del(&cmd->blocked_cmd_list_entry);
2764                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2765                                 "to active cmd list", cmd);
2766                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2767                         list_add_tail(&cmd->cmd_list_entry,
2768                                 &cmd->cmd_lists->active_cmd_list);
2769                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2770                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2771                 }
2772         }
2773
2774         if (setUA) {
2775                 /* BH already off */
2776                 spin_lock(&scst_temp_UA_lock);
2777                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2778                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2779                 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2780                         sizeof(scst_temp_UA));
2781                 spin_unlock(&scst_temp_UA_lock);
2782         }
2783
2784         TRACE_EXIT();
2785         return;
2786 }
2787
2788 int scst_set_pending_UA(struct scst_cmd *cmd)
2789 {
2790         int res = 0;
2791         struct scst_tgt_dev_UA *UA_entry;
2792
2793         TRACE_ENTRY();
2794
2795         TRACE(TRACE_MGMT_MINOR, "Setting pending UA cmd %p", cmd);
2796
2797         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2798
2799         /* UA list could be cleared behind us, so retest */
2800         if (list_empty(&cmd->tgt_dev->UA_list)) {
2801                 TRACE_DBG("%s",
2802                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2803                 res = -1;
2804                 goto out_unlock;
2805         }
2806
2807         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2808                               UA_list_entry);
2809
2810         TRACE_DBG("next %p UA_entry %p",
2811               cmd->tgt_dev->UA_list.next, UA_entry);
2812
2813         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2814                 sizeof(UA_entry->UA_sense_buffer));
2815
2816         cmd->ua_ignore = 1;
2817
2818         list_del(&UA_entry->UA_list_entry);
2819
2820         mempool_free(UA_entry, scst_ua_mempool);
2821
2822         if (list_empty(&cmd->tgt_dev->UA_list)) {
2823                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2824                           &cmd->tgt_dev->tgt_dev_flags);
2825         }
2826
2827         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2828
2829 out:
2830         TRACE_EXIT_RES(res);
2831         return res;
2832
2833 out_unlock:
2834         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2835         goto out;
2836 }
2837
2838 /* Called under tgt_dev_lock and BH off */
2839 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2840         const uint8_t *sense, int sense_len, int head)
2841 {
2842         struct scst_tgt_dev_UA *UA_entry = NULL;
2843
2844         TRACE_ENTRY();
2845
2846         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2847         if (UA_entry == NULL) {
2848                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2849                      "allocation failed. The UNIT ATTENTION "
2850                      "on some sessions will be missed");
2851                 PRINT_BUFFER("Lost UA", sense, sense_len);
2852                 goto out;
2853         }
2854         memset(UA_entry, 0, sizeof(*UA_entry));
2855
2856         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2857                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2858         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2859
2860         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2861
2862         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2863
2864         if (head)
2865                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2866         else
2867                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2868
2869 out:
2870         TRACE_EXIT();
2871         return;
2872 }
2873
2874 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2875         const uint8_t *sense, int sense_len, int head)
2876 {
2877         int skip_UA = 0;
2878         struct scst_tgt_dev_UA *UA_entry_tmp;
2879
2880         TRACE_ENTRY();
2881
2882         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2883
2884         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2885                             UA_list_entry) {
2886                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer,
2887                            sense_len) == 0) {
2888                         TRACE_MGMT_DBG("%s", "UA already exists");
2889                         skip_UA = 1;
2890                         break;
2891                 }
2892         }
2893
2894         if (skip_UA == 0)
2895                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2896
2897         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2898
2899         TRACE_EXIT();
2900         return;
2901 }
2902
2903 /* Called under dev_lock and BH off */
2904 void scst_dev_check_set_local_UA(struct scst_device *dev,
2905         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2906 {
2907         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2908
2909         TRACE_ENTRY();
2910
2911         if (exclude != NULL)
2912                 exclude_tgt_dev = exclude->tgt_dev;
2913
2914         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2915                         dev_tgt_dev_list_entry) {
2916                 if (tgt_dev != exclude_tgt_dev)
2917                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2918         }
2919
2920         TRACE_EXIT();
2921         return;
2922 }
2923
2924 /* Called under dev_lock and BH off */
2925 void __scst_dev_check_set_UA(struct scst_device *dev,
2926         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2927 {
2928         TRACE_ENTRY();
2929
2930         TRACE(TRACE_MGMT_MINOR, "Processing UA dev %p", dev);
2931
2932         /* Check for reset UA */
2933         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2934                 scst_process_reset(dev,
2935                                    (exclude != NULL) ? exclude->sess : NULL,
2936                                    exclude, NULL, false);
2937
2938         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2939
2940         TRACE_EXIT();
2941         return;
2942 }
2943
2944 /* Called under tgt_dev_lock or when tgt_dev is unused */
2945 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2946 {
2947         struct scst_tgt_dev_UA *UA_entry, *t;
2948
2949         TRACE_ENTRY();
2950
2951         list_for_each_entry_safe(UA_entry, t,
2952                                  &tgt_dev->UA_list, UA_list_entry) {
2953                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
2954                                (long long unsigned int)tgt_dev->lun);
2955                 list_del(&UA_entry->UA_list_entry);
2956                 kfree(UA_entry);
2957         }
2958         INIT_LIST_HEAD(&tgt_dev->UA_list);
2959         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2960
2961         TRACE_EXIT();
2962         return;
2963 }
2964
2965 /* No locks */
2966 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2967 {
2968         struct scst_cmd *res = NULL, *cmd, *t;
2969         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2970
2971         spin_lock_irq(&tgt_dev->sn_lock);
2972
2973         if (unlikely(tgt_dev->hq_cmd_count != 0))
2974                 goto out_unlock;
2975
2976 restart:
2977         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2978                                 sn_cmd_list_entry) {
2979                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2980                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2981                 if (cmd->sn == expected_sn) {
2982                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2983                                 cmd, cmd->sn, cmd->sn_set);
2984                         tgt_dev->def_cmd_count--;
2985                         list_del(&cmd->sn_cmd_list_entry);
2986                         if (res == NULL)
2987                                 res = cmd;
2988                         else {
2989                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2990                                 TRACE_SN("Adding cmd %p to active cmd list",
2991                                         cmd);
2992                                 list_add_tail(&cmd->cmd_list_entry,
2993                                         &cmd->cmd_lists->active_cmd_list);
2994                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2995                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2996                         }
2997                 }
2998         }
2999         if (res != NULL)
3000                 goto out_unlock;
3001
3002         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
3003                                 sn_cmd_list_entry) {
3004                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3005                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3006                 if (cmd->sn == expected_sn) {
3007                         atomic_t *slot = cmd->sn_slot;
3008                         /*
3009                          * !! At this point any pointer in cmd, except !!
3010                          * !! sn_slot and sn_cmd_list_entry, could be   !!
3011                          * !! already destroyed                         !!
3012                          */
3013                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
3014                                  cmd,
3015                                  (long long unsigned int)cmd->tag,
3016                                  cmd->sn);
3017                         tgt_dev->def_cmd_count--;
3018                         list_del(&cmd->sn_cmd_list_entry);
3019                         spin_unlock_irq(&tgt_dev->sn_lock);
3020                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
3021                                              &cmd->cmd_flags))
3022                                 scst_destroy_put_cmd(cmd);
3023                         scst_inc_expected_sn(tgt_dev, slot);
3024                         expected_sn = tgt_dev->expected_sn;
3025                         spin_lock_irq(&tgt_dev->sn_lock);
3026                         goto restart;
3027                 }
3028         }
3029
3030 out_unlock:
3031         spin_unlock_irq(&tgt_dev->sn_lock);
3032         return res;
3033 }
3034
3035 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
3036         struct scst_thr_data_hdr *data,
3037         void (*free_fn) (struct scst_thr_data_hdr *data))
3038 {
3039         data->owner_thr = current;
3040         atomic_set(&data->ref, 1);
3041         EXTRACHECKS_BUG_ON(free_fn == NULL);
3042         data->free_fn = free_fn;
3043         spin_lock(&tgt_dev->thr_data_lock);
3044         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
3045         spin_unlock(&tgt_dev->thr_data_lock);
3046 }
3047 EXPORT_SYMBOL(scst_add_thr_data);
3048
3049 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
3050 {
3051         spin_lock(&tgt_dev->thr_data_lock);
3052         while (!list_empty(&tgt_dev->thr_data_list)) {
3053                 struct scst_thr_data_hdr *d = list_entry(
3054                                 tgt_dev->thr_data_list.next, typeof(*d),
3055                                 thr_data_list_entry);
3056                 list_del(&d->thr_data_list_entry);
3057                 spin_unlock(&tgt_dev->thr_data_lock);
3058                 scst_thr_data_put(d);
3059                 spin_lock(&tgt_dev->thr_data_lock);
3060         }
3061         spin_unlock(&tgt_dev->thr_data_lock);
3062         return;
3063 }
3064 EXPORT_SYMBOL(scst_del_all_thr_data);
3065
3066 void scst_dev_del_all_thr_data(struct scst_device *dev)
3067 {
3068         struct scst_tgt_dev *tgt_dev;
3069
3070         TRACE_ENTRY();
3071
3072         mutex_lock(&scst_mutex);
3073
3074         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3075                                 dev_tgt_dev_list_entry) {
3076                 scst_del_all_thr_data(tgt_dev);
3077         }
3078
3079         mutex_unlock(&scst_mutex);
3080
3081         TRACE_EXIT();
3082         return;
3083 }
3084 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
3085
3086 struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
3087         struct task_struct *tsk)
3088 {
3089         struct scst_thr_data_hdr *res = NULL, *d;
3090
3091         spin_lock(&tgt_dev->thr_data_lock);
3092         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
3093                 if (d->owner_thr == tsk) {
3094                         res = d;
3095                         scst_thr_data_get(res);
3096                         break;
3097                 }
3098         }
3099         spin_unlock(&tgt_dev->thr_data_lock);
3100         return res;
3101 }
3102 EXPORT_SYMBOL(__scst_find_thr_data);
3103
3104 /* dev_lock supposed to be held and BH disabled */
3105 void __scst_block_dev(struct scst_device *dev)
3106 {
3107         dev->block_count++;
3108         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
3109 }
3110
3111 /* No locks */
3112 static void scst_block_dev(struct scst_device *dev, int outstanding)
3113 {
3114         spin_lock_bh(&dev->dev_lock);
3115         __scst_block_dev(dev);
3116         spin_unlock_bh(&dev->dev_lock);
3117
3118         /*
3119          * Memory barrier is necessary here, because we need to read
3120          * on_dev_count in wait_event() below after we increased block_count.
3121          * Otherwise, we can miss wake up in scst_dec_on_dev_cmd().
3122          * We use the explicit barrier, because spin_unlock_bh() doesn't
3123          * provide the necessary memory barrier functionality.
3124          */
3125         smp_mb();
3126
3127         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
3128                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
3129         wait_event(dev->on_dev_waitQ,
3130                 atomic_read(&dev->on_dev_count) <= outstanding);
3131         TRACE_MGMT_DBG("%s", "wait_event() returned");
3132 }
3133
3134 /* No locks */
3135 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3136 {
3137         sBUG_ON(cmd->needs_unblocking);
3138
3139         cmd->needs_unblocking = 1;
3140         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3141                        cmd, (long long unsigned int)cmd->tag);
3142
3143         scst_block_dev(cmd->dev, outstanding);
3144 }
3145
3146 /* No locks */
3147 void scst_unblock_dev(struct scst_device *dev)
3148 {
3149         spin_lock_bh(&dev->dev_lock);
3150         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3151                 dev->block_count-1, dev);
3152         if (--dev->block_count == 0)
3153                 scst_unblock_cmds(dev);
3154         spin_unlock_bh(&dev->dev_lock);
3155         sBUG_ON(dev->block_count < 0);
3156 }
3157
3158 /* No locks */
3159 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3160 {
3161         scst_unblock_dev(cmd->dev);
3162         cmd->needs_unblocking = 0;
3163 }
3164
3165 /* No locks */
3166 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3167 {
3168         int res = 0;
3169         struct scst_device *dev = cmd->dev;
3170
3171         TRACE_ENTRY();
3172
3173         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3174
3175         atomic_inc(&dev->on_dev_count);
3176         cmd->dec_on_dev_needed = 1;
3177         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3178
3179         if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
3180                 /*
3181                  * The original command can already block the device, so
3182                  * REQUEST SENSE command should always pass.
3183                  */
3184                 goto out;
3185         }
3186
3187 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3188         spin_lock_bh(&dev->dev_lock);
3189         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3190                 goto out_unlock;
3191         if (dev->block_count > 0) {
3192                 scst_dec_on_dev_cmd(cmd);
3193                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3194                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3195                 list_add_tail(&cmd->blocked_cmd_list_entry,
3196                               &dev->blocked_cmd_list);
3197                 res = 1;
3198         } else {
3199                 __scst_block_dev(dev);
3200                 cmd->inc_blocking = 1;
3201         }
3202         spin_unlock_bh(&dev->dev_lock);
3203         goto out;
3204 #else
3205 repeat:
3206         if (unlikely(dev->block_count > 0)) {
3207                 spin_lock_bh(&dev->dev_lock);
3208                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3209                         goto out_unlock;
3210                 if (dev->block_count > 0) {
3211                         scst_dec_on_dev_cmd(cmd);
3212                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking "
3213                                 "(tag %llu, dev %p)", cmd,
3214                                 (long long unsigned int)cmd->tag, dev);
3215                         list_add_tail(&cmd->blocked_cmd_list_entry,
3216                                       &dev->blocked_cmd_list);
3217                         res = 1;
3218                         spin_unlock_bh(&dev->dev_lock);
3219                         goto out;
3220                 } else {
3221                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3222                                 "continuing");
3223                 }
3224                 spin_unlock_bh(&dev->dev_lock);
3225         }
3226         if (unlikely(dev->dev_double_ua_possible)) {
3227                 spin_lock_bh(&dev->dev_lock);
3228                 if (dev->block_count == 0) {
3229                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3230                                 "cmds due to possible double reset UA (dev %p)",
3231                                 cmd, (long long unsigned int)cmd->tag, dev);
3232                         __scst_block_dev(dev);
3233                         cmd->inc_blocking = 1;
3234                 } else {
3235                         spin_unlock_bh(&dev->dev_lock);
3236                         TRACE_MGMT_DBG("Somebody blocked the device, "
3237                                 "repeating (count %d)", dev->block_count);
3238                         goto repeat;
3239                 }
3240                 spin_unlock_bh(&dev->dev_lock);
3241         }
3242 #endif
3243
3244 out:
3245         TRACE_EXIT_RES(res);
3246         return res;
3247
3248 out_unlock:
3249         spin_unlock_bh(&dev->dev_lock);
3250         goto out;
3251 }
3252
3253 /* Called under dev_lock */
3254 static void scst_unblock_cmds(struct scst_device *dev)
3255 {
3256 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3257         struct scst_cmd *cmd, *t;
3258         unsigned long flags;
3259
3260         TRACE_ENTRY();
3261
3262         local_irq_save(flags);
3263         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3264                                  blocked_cmd_list_entry) {
3265                 int brk = 0;
3266                 /*
3267                  * Since only one cmd per time is being executed, expected_sn
3268                  * can't change behind us, if the corresponding cmd is in
3269                  * blocked_cmd_list, but we could be called before
3270                  * scst_inc_expected_sn().
3271                  *
3272                  * For HQ commands SN is not set.
3273                  */
3274                 if (likely(!cmd->internal && cmd->sn_set)) {
3275                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3276                         if (cmd->tgt_dev == NULL)
3277                                 sBUG();
3278                         expected_sn = cmd->tgt_dev->expected_sn;
3279                         if (cmd->sn == expected_sn)
3280                                 brk = 1;
3281                         else if (cmd->sn != (expected_sn+1))
3282                                 continue;
3283                 }
3284
3285                 list_del(&cmd->blocked_cmd_list_entry);
3286                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3287                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3288                 list_add(&cmd->cmd_list_entry,
3289                          &cmd->cmd_lists->active_cmd_list);
3290                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3291                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3292                 if (brk)
3293                         break;
3294         }
3295         local_irq_restore(flags);
3296 #else /* CONFIG_SCST_STRICT_SERIALIZING */
3297         struct scst_cmd *cmd, *tcmd;
3298         unsigned long flags;
3299
3300         TRACE_ENTRY();
3301
3302         local_irq_save(flags);
3303         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3304                                  blocked_cmd_list_entry) {
3305                 list_del(&cmd->blocked_cmd_list_entry);
3306                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3307                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3308                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3309                         list_add(&cmd->cmd_list_entry,
3310                                 &cmd->cmd_lists->active_cmd_list);
3311                 else
3312                         list_add_tail(&cmd->cmd_list_entry,
3313                                 &cmd->cmd_lists->active_cmd_list);
3314                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3315                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3316         }
3317         local_irq_restore(flags);
3318 #endif /* CONFIG_SCST_STRICT_SERIALIZING */
3319
3320         TRACE_EXIT();
3321         return;
3322 }
3323
3324 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3325         struct scst_cmd *out_of_sn_cmd)
3326 {
3327         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3328
3329         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3330                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3331                 scst_make_deferred_commands_active(tgt_dev);
3332         } else {
3333                 out_of_sn_cmd->out_of_sn = 1;
3334                 spin_lock_irq(&tgt_dev->sn_lock);
3335                 tgt_dev->def_cmd_count++;
3336                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3337                               &tgt_dev->skipped_sn_list);
3338                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list"
3339                         " (expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3340                         tgt_dev->expected_sn);
3341                 spin_unlock_irq(&tgt_dev->sn_lock);
3342         }
3343
3344         return;
3345 }
3346
3347 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3348         struct scst_cmd *out_of_sn_cmd)
3349 {
3350         TRACE_ENTRY();
3351
3352         if (!out_of_sn_cmd->sn_set) {
3353                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3354                 goto out;
3355         }
3356
3357         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3358
3359 out:
3360         TRACE_EXIT();
3361         return;
3362 }
3363
3364 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3365 {
3366         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3367
3368         TRACE_ENTRY();
3369
3370         if (!cmd->hq_cmd_inced)
3371                 goto out;
3372
3373         spin_lock_irq(&tgt_dev->sn_lock);
3374         tgt_dev->hq_cmd_count--;
3375         spin_unlock_irq(&tgt_dev->sn_lock);
3376
3377         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3378
3379         /*
3380          * There is no problem in checking hq_cmd_count in the
3381          * non-locked state. In the worst case we will only have
3382          * unneeded run of the deferred commands.
3383          */
3384         if (tgt_dev->hq_cmd_count == 0)
3385                 scst_make_deferred_commands_active(tgt_dev);
3386
3387 out:
3388         TRACE_EXIT();
3389         return;
3390 }
3391
3392 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3393 {
3394         TRACE_ENTRY();
3395
3396         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3397                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3398                 atomic_read(&scst_cmd_count));
3399
3400         scst_done_cmd_mgmt(cmd);
3401
3402         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3403                 if (cmd->completed) {
3404                         /* It's completed and it's OK to return its result */
3405                         goto out;
3406                 }
3407
3408                 if (cmd->dev->tas) {
3409                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3410                                 "(tag %llu), returning TASK ABORTED ", cmd,
3411                                 (long long unsigned int)cmd->tag);
3412                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3413                 } else {
3414                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3415                                 "(tag %llu), aborting without delivery or "
3416                                 "notification",
3417                                 cmd, (long long unsigned int)cmd->tag);
3418                         /*
3419                          * There is no need to check/requeue possible UA,
3420                          * because, if it exists, it will be delivered
3421                          * by the "completed" branch above.
3422                          */
3423                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3424                 }
3425         }
3426
3427 out:
3428         TRACE_EXIT();
3429         return;
3430 }
3431
3432 void __init scst_scsi_op_list_init(void)
3433 {
3434         int i;
3435         uint8_t op = 0xff;
3436
3437         TRACE_ENTRY();
3438
3439         for (i = 0; i < 256; i++)
3440                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3441
3442         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3443                 if (scst_scsi_op_table[i].ops != op) {
3444                         op = scst_scsi_op_table[i].ops;
3445                         scst_scsi_op_list[op] = i;
3446                 }
3447         }
3448
3449         TRACE_EXIT();
3450         return;
3451 }
3452
3453 #ifdef CONFIG_SCST_DEBUG
3454 /* Original taken from the XFS code */
3455 unsigned long scst_random(void)
3456 {
3457         static int Inited;
3458         static unsigned long RandomValue;
3459         static DEFINE_SPINLOCK(lock);
3460         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3461         register long rv;
3462         register long lo;
3463         register long hi;
3464         unsigned long flags;
3465
3466         spin_lock_irqsave(&lock, flags);
3467         if (!Inited) {
3468                 RandomValue = jiffies;
3469                 Inited = 1;
3470         }
3471         rv = RandomValue;
3472         hi = rv / 127773;
3473         lo = rv % 127773;
3474         rv = 16807 * lo - 2836 * hi;
3475         if (rv <= 0)
3476                 rv += 2147483647;
3477         RandomValue = rv;
3478         spin_unlock_irqrestore(&lock, flags);
3479         return rv;
3480 }
3481 EXPORT_SYMBOL(scst_random);
3482 #endif
3483
3484 #ifdef CONFIG_SCST_DEBUG_TM
3485
3486 #define TM_DBG_STATE_ABORT              0
3487 #define TM_DBG_STATE_RESET              1
3488 #define TM_DBG_STATE_OFFLINE            2
3489
3490 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3491
3492 static void tm_dbg_timer_fn(unsigned long arg);
3493
3494 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3495 /* All serialized by scst_tm_dbg_lock */
3496 static struct {
3497         unsigned int tm_dbg_release:1;
3498         unsigned int tm_dbg_blocked:1;
3499 } tm_dbg_flags;
3500 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3501 static int tm_dbg_delayed_cmds_count;
3502 static int tm_dbg_passed_cmds_count;
3503 static int tm_dbg_state;
3504 static int tm_dbg_on_state_passes;
3505 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3506 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3507
3508 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3509
3510 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3511         struct scst_acg_dev *acg_dev)
3512 {
3513         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3514                 unsigned long flags;
3515                 /* Do TM debugging only for LUN 0 */
3516                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3517                 tm_dbg_p_cmd_list_waitQ =
3518                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3519                 tm_dbg_state = INIT_TM_DBG_STATE;
3520                 tm_dbg_on_state_passes =
3521                         tm_dbg_on_state_num_passes[tm_dbg_state];
3522                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3523                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3524                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3525                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3526         }
3527 }
3528
3529 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3530 {
3531         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3532                 unsigned long flags;
3533                 del_timer_sync(&tm_dbg_timer);
3534                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3535                 tm_dbg_p_cmd_list_waitQ = NULL;
3536                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3537         }
3538 }
3539
3540 static void tm_dbg_timer_fn(unsigned long arg)
3541 {
3542         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3543         tm_dbg_flags.tm_dbg_release = 1;
3544         /* Used to make sure that all woken up threads see the new value */
3545         smp_wmb();
3546         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3547 }
3548
3549 /* Called under scst_tm_dbg_lock and IRQs off */
3550 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3551 {
3552         switch (tm_dbg_state) {
3553         case TM_DBG_STATE_ABORT:
3554                 if (tm_dbg_delayed_cmds_count == 0) {
3555                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3556                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
3557                                 " for %ld.%ld seconds (%ld HZ), "
3558                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3559                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3560                         mod_timer(&tm_dbg_timer, jiffies + d);
3561 #if 0
3562                         tm_dbg_flags.tm_dbg_blocked = 1;
3563 #endif
3564                 } else {
3565                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3566                                 "(tag %llu), delayed_cmds_count=%d, "
3567                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3568                                 tm_dbg_delayed_cmds_count,
3569                                 tm_dbg_on_state_passes);
3570                         if (tm_dbg_delayed_cmds_count == 2)
3571                                 tm_dbg_flags.tm_dbg_blocked = 0;
3572                 }
3573                 break;
3574
3575         case TM_DBG_STATE_RESET:
3576         case TM_DBG_STATE_OFFLINE:
3577                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3578                         "(tag %llu), delayed_cmds_count=%d, "
3579                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3580                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3581                 tm_dbg_flags.tm_dbg_blocked = 1;
3582                 break;
3583
3584         default:
3585                 sBUG();
3586         }
3587         /* IRQs already off */
3588         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3589         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3590         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3591         cmd->tm_dbg_delayed = 1;
3592         tm_dbg_delayed_cmds_count++;
3593         return;
3594 }
3595
3596 /* No locks */
3597 void tm_dbg_check_released_cmds(void)
3598 {
3599         if (tm_dbg_flags.tm_dbg_release) {
3600                 struct scst_cmd *cmd, *tc;
3601                 spin_lock_irq(&scst_tm_dbg_lock);
3602                 list_for_each_entry_safe_reverse(cmd, tc,
3603                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3604                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3605                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3606                                 tm_dbg_delayed_cmds_count);
3607                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3608                         list_move(&cmd->cmd_list_entry,
3609                                 &cmd->cmd_lists->active_cmd_list);
3610                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3611                 }
3612                 tm_dbg_flags.tm_dbg_release = 0;
3613                 spin_unlock_irq(&scst_tm_dbg_lock);
3614         }
3615 }
3616
3617 /* Called under scst_tm_dbg_lock */
3618 static void tm_dbg_change_state(void)
3619 {
3620         tm_dbg_flags.tm_dbg_blocked = 0;
3621         if (--tm_dbg_on_state_passes == 0) {
3622                 switch (tm_dbg_state) {
3623                 case TM_DBG_STATE_ABORT:
3624                         TRACE_MGMT_DBG("%s", "Changing "
3625                             "tm_dbg_state to RESET");
3626                         tm_dbg_state =
3627                                 TM_DBG_STATE_RESET;
3628                         tm_dbg_flags.tm_dbg_blocked = 0;
3629                         break;
3630                 case TM_DBG_STATE_RESET:
3631                 case TM_DBG_STATE_OFFLINE:
3632 #ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
3633                             TRACE_MGMT_DBG("%s", "Changing "
3634                                     "tm_dbg_state to OFFLINE");
3635                             tm_dbg_state =
3636                                 TM_DBG_STATE_OFFLINE;
3637 #else
3638                             TRACE_MGMT_DBG("%s", "Changing "
3639                                     "tm_dbg_state to ABORT");
3640                             tm_dbg_state =
3641                                 TM_DBG_STATE_ABORT;
3642 #endif
3643                         break;
3644                 default:
3645                         sBUG();
3646                 }
3647                 tm_dbg_on_state_passes =
3648                     tm_dbg_on_state_num_passes[tm_dbg_state];
3649         }
3650
3651         TRACE_MGMT_DBG("%s", "Deleting timer");
3652         del_timer(&tm_dbg_timer);
3653 }
3654
3655 /* No locks */
3656 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3657 {
3658         int res = 0;
3659         unsigned long flags;
3660
3661         if (cmd->tm_dbg_immut)
3662                 goto out;
3663
3664         if (cmd->tm_dbg_delayed) {
3665                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3666                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3667                         "delayed_cmds_count=%d", cmd, cmd->tag,
3668                         tm_dbg_delayed_cmds_count);
3669
3670                 cmd->tm_dbg_immut = 1;
3671                 tm_dbg_delayed_cmds_count--;
3672                 if ((tm_dbg_delayed_cmds_count == 0) &&
3673                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3674                         tm_dbg_change_state();
3675                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3676         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3677                                         &cmd->tgt_dev->tgt_dev_flags)) {
3678                 /* Delay 50th command */
3679                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3680                 if (tm_dbg_flags.tm_dbg_blocked ||
3681                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3682                         tm_dbg_delay_cmd(cmd);
3683                         res = 1;
3684                 } else
3685                         cmd->tm_dbg_immut = 1;
3686                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3687         }
3688
3689 out:
3690         return res;
3691 }
3692
3693 /* No locks */
3694 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3695 {
3696         struct scst_cmd *c;
3697         unsigned long flags;
3698
3699         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3700         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3701                                 cmd_list_entry) {
3702                 if (c == cmd) {
3703                         TRACE_MGMT_DBG("Abort request for "
3704                                 "delayed cmd %p (tag=%llu), moving it to "
3705                                 "active cmd list (delayed_cmds_count=%d)",
3706                                 c, c->tag, tm_dbg_delayed_cmds_count);
3707
3708                         if (!test_bit(SCST_CMD_ABORTED_OTHER,
3709                                             &cmd->cmd_flags)) {
3710                                 /* Test how completed commands handled */
3711                                 if (((scst_random() % 10) == 5)) {
3712                                         scst_set_cmd_error(cmd,
3713                                                 SCST_LOAD_SENSE(
3714                                                 scst_sense_hardw_error));
3715                                         /* It's completed now */
3716                                 }
3717                         }
3718
3719                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3720                         list_move(&c->cmd_list_entry,
3721                                 &c->cmd_lists->active_cmd_list);
3722                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3723                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3724                         break;
3725                 }
3726         }
3727         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3728 }
3729
3730 /* Might be called under scst_mutex */
3731 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3732 {
3733         unsigned long flags;
3734
3735         if (dev != NULL) {
3736                 struct scst_tgt_dev *tgt_dev;
3737                 bool found = 0;
3738
3739                 spin_lock_bh(&dev->dev_lock);
3740                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3741                                             dev_tgt_dev_list_entry) {
3742                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3743                                         &tgt_dev->tgt_dev_flags)) {
3744                                 found = 1;
3745                                 break;
3746                         }
3747                 }
3748                 spin_unlock_bh(&dev->dev_lock);
3749
3750                 if (!found)
3751                         goto out;
3752         }
3753
3754         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3755         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3756                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3757                         tm_dbg_delayed_cmds_count);
3758                 tm_dbg_change_state();
3759                 tm_dbg_flags.tm_dbg_release = 1;
3760                 /*
3761                  * Used to make sure that all woken up threads see the new
3762                  * value.
3763                  */
3764                 smp_wmb();
3765                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3766                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3767         } else {
3768                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3769         }
3770         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);