3fb76908f87ac8fcb5ea19b6c8f57eec5f5133b6
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #include "scst_cdbprobe.h"
36
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39         uint8_t *sense, int sense_len);
40
41 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
42 {
43         int res = 0;
44         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
45
46         TRACE_ENTRY();
47
48         sBUG_ON(cmd->sense != NULL);
49
50         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
51         if (cmd->sense == NULL) {
52                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
53                         "The sense data will be lost!!", cmd->cdb[0]);
54                 res = -ENOMEM;
55                 goto out;
56         }
57
58         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
59
60 out:
61         TRACE_EXIT_RES(res);
62         return res;
63 }
64 EXPORT_SYMBOL(scst_alloc_sense);
65
66 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
67         const uint8_t *sense, unsigned int len)
68 {
69         int res;
70
71         TRACE_ENTRY();
72
73         res = scst_alloc_sense(cmd, atomic);
74         if (res != 0) {
75                 PRINT_BUFFER("Lost sense", sense, len);
76                 goto out;
77         }
78
79         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
80         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
81
82 out:
83         TRACE_EXIT_RES(res);
84         return res;
85 }
86 EXPORT_SYMBOL(scst_alloc_set_sense);
87
88 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
89 {
90         TRACE_ENTRY();
91
92         cmd->status = status;
93         cmd->host_status = DID_OK;
94
95         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
96         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
97
98         cmd->data_direction = SCST_DATA_NONE;
99         cmd->resp_data_len = 0;
100         cmd->is_send_status = 1;
101
102         cmd->completed = 1;
103
104         TRACE_EXIT();
105         return;
106 }
107 EXPORT_SYMBOL(scst_set_cmd_error_status);
108
109 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
110 {
111         int rc;
112
113         TRACE_ENTRY();
114
115         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
116
117         rc = scst_alloc_sense(cmd, 1);
118         if (rc != 0) {
119                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
120                         key, asc, ascq);
121                 goto out;
122         }
123
124         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
125         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
126
127 out:
128         TRACE_EXIT();
129         return;
130 }
131 EXPORT_SYMBOL(scst_set_cmd_error);
132
133 void scst_set_sense(uint8_t *buffer, int len, int key,
134         int asc, int ascq)
135 {
136         memset(buffer, 0, len);
137         buffer[0] = 0x70;       /* Error Code                   */
138         buffer[2] = key;        /* Sense Key                    */
139         buffer[7] = 0x0a;       /* Additional Sense Length      */
140         buffer[12] = asc;       /* ASC                          */
141         buffer[13] = ascq;      /* ASCQ                         */
142         TRACE_BUFFER("Sense set", buffer, len);
143         return;
144 }
145 EXPORT_SYMBOL(scst_set_sense);
146
147 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
148         unsigned int len)
149 {
150         TRACE_ENTRY();
151
152         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
153         scst_alloc_set_sense(cmd, 1, sense, len);
154
155         TRACE_EXIT();
156         return;
157 }
158 EXPORT_SYMBOL(scst_set_cmd_error_sense);
159
160 void scst_set_busy(struct scst_cmd *cmd)
161 {
162         int c = atomic_read(&cmd->sess->sess_cmd_count);
163
164         TRACE_ENTRY();
165
166         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
167                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
168                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
169                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
170                         cmd->sess->initiator_name, c,
171                         cmd->queue_type, cmd->sess->init_phase);
172         } else {
173                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
174                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
175                         "initiator %s (cmds count %d, queue_type %x, "
176                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
177                         cmd->queue_type, cmd->sess->init_phase);
178         }
179
180         TRACE_EXIT();
181         return;
182 }
183 EXPORT_SYMBOL(scst_set_busy);
184
185 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
186 {
187         int res;
188
189         TRACE_ENTRY();
190
191         switch (cmd->state) {
192         case SCST_CMD_STATE_INIT_WAIT:
193         case SCST_CMD_STATE_INIT:
194         case SCST_CMD_STATE_PRE_PARSE:
195         case SCST_CMD_STATE_DEV_PARSE:
196                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
197                 break;
198
199         default:
200                 res = SCST_CMD_STATE_PRE_DEV_DONE;
201                 break;
202         }
203
204         TRACE_EXIT_RES(res);
205         return res;
206 }
207 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
208
209 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
210 {
211         TRACE_ENTRY();
212
213 #ifdef CONFIG_SCST_EXTRACHECKS
214         switch (cmd->state) {
215         case SCST_CMD_STATE_PRE_XMIT_RESP:
216         case SCST_CMD_STATE_XMIT_RESP:
217         case SCST_CMD_STATE_FINISHED:
218         case SCST_CMD_STATE_XMIT_WAIT:
219                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
220                         cmd->state, cmd, cmd->cdb[0]);
221                 sBUG();
222         }
223 #endif
224
225         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
226
227         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
228                            (cmd->tgt_dev == NULL));
229
230         TRACE_EXIT();
231         return;
232 }
233 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
234
235 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
236 {
237         int i, l;
238
239         TRACE_ENTRY();
240
241         scst_check_restore_sg_buff(cmd);
242         cmd->resp_data_len = resp_data_len;
243
244         if (resp_data_len == cmd->bufflen)
245                 goto out;
246
247         l = 0;
248         for (i = 0; i < cmd->sg_cnt; i++) {
249                 l += cmd->sg[i].length;
250                 if (l >= resp_data_len) {
251                         int left = resp_data_len - (l - cmd->sg[i].length);
252 #ifdef CONFIG_SCST_DEBUG
253                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
254                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
255                                 "left %d",
256                                 cmd, (long long unsigned int)cmd->tag,
257                                 resp_data_len, i,
258                                 cmd->sg[i].length, left);
259 #endif
260                         cmd->orig_sg_cnt = cmd->sg_cnt;
261                         cmd->orig_sg_entry = i;
262                         cmd->orig_entry_len = cmd->sg[i].length;
263                         cmd->sg_cnt = (left > 0) ? i+1 : i;
264                         cmd->sg[i].length = left;
265                         cmd->sg_buff_modified = 1;
266                         break;
267                 }
268         }
269
270 out:
271         TRACE_EXIT();
272         return;
273 }
274 EXPORT_SYMBOL(scst_set_resp_data_len);
275
276 /* Called under scst_mutex and suspended activity */
277 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
278 {
279         struct scst_device *dev;
280         int res = 0;
281         static int dev_num; /* protected by scst_mutex */
282
283         TRACE_ENTRY();
284
285         dev = kzalloc(sizeof(*dev), gfp_mask);
286         if (dev == NULL) {
287                 TRACE(TRACE_OUT_OF_MEM, "%s",
288                         "Allocation of scst_device failed");
289                 res = -ENOMEM;
290                 goto out;
291         }
292
293         dev->handler = &scst_null_devtype;
294         dev->p_cmd_lists = &scst_main_cmd_lists;
295         atomic_set(&dev->dev_cmd_count, 0);
296         atomic_set(&dev->write_cmd_count, 0);
297         scst_init_mem_lim(&dev->dev_mem_lim);
298         spin_lock_init(&dev->dev_lock);
299         atomic_set(&dev->on_dev_count, 0);
300         INIT_LIST_HEAD(&dev->blocked_cmd_list);
301         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
302         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
303         INIT_LIST_HEAD(&dev->threads_list);
304         init_waitqueue_head(&dev->on_dev_waitQ);
305         dev->dev_double_ua_possible = 1;
306         dev->dev_serialized = 1;
307         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
308         dev->dev_num = dev_num++;
309
310         *out_dev = dev;
311
312 out:
313         TRACE_EXIT_RES(res);
314         return res;
315 }
316
317 /* Called under scst_mutex and suspended activity */
318 void scst_free_device(struct scst_device *dev)
319 {
320         TRACE_ENTRY();
321
322 #ifdef CONFIG_SCST_EXTRACHECKS
323         if (!list_empty(&dev->dev_tgt_dev_list) ||
324             !list_empty(&dev->dev_acg_dev_list)) {
325                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
326                         "is not empty!", __func__);
327                 sBUG();
328         }
329 #endif
330
331         kfree(dev);
332
333         TRACE_EXIT();
334         return;
335 }
336
337 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
338 {
339         atomic_set(&mem_lim->alloced_pages, 0);
340         mem_lim->max_allowed_pages =
341                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
342 }
343 EXPORT_SYMBOL(scst_init_mem_lim);
344
345 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
346                                         struct scst_device *dev, uint64_t lun)
347 {
348         struct scst_acg_dev *res;
349
350         TRACE_ENTRY();
351
352 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
353         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
354 #else
355         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
356 #endif
357         if (res == NULL) {
358                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
359                 goto out;
360         }
361 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
362         memset(res, 0, sizeof(*res));
363 #endif
364
365         res->dev = dev;
366         res->acg = acg;
367         res->lun = lun;
368
369 out:
370         TRACE_EXIT_HRES(res);
371         return res;
372 }
373
374 /* The activity supposed to be suspended and scst_mutex held */
375 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
376 {
377         TRACE_ENTRY();
378
379         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
380                 acg_dev);
381         list_del(&acg_dev->acg_dev_list_entry);
382         list_del(&acg_dev->dev_acg_dev_list_entry);
383
384         kmem_cache_free(scst_acgd_cachep, acg_dev);
385
386         TRACE_EXIT();
387         return;
388 }
389
390 /* The activity supposed to be suspended and scst_mutex held */
391 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
392 {
393         struct scst_acg *acg;
394
395         TRACE_ENTRY();
396
397         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
398         if (acg == NULL) {
399                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
400                 goto out;
401         }
402
403         INIT_LIST_HEAD(&acg->acg_dev_list);
404         INIT_LIST_HEAD(&acg->acg_sess_list);
405         INIT_LIST_HEAD(&acg->acn_list);
406         acg->acg_name = acg_name;
407
408         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
409         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
410
411 out:
412         TRACE_EXIT_HRES(acg);
413         return acg;
414 }
415
416 /* The activity supposed to be suspended and scst_mutex held */
417 int scst_destroy_acg(struct scst_acg *acg)
418 {
419         struct scst_acn *n, *nn;
420         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
421         int res = 0;
422
423         TRACE_ENTRY();
424
425         if (!list_empty(&acg->acg_sess_list)) {
426                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
427                 res = -EBUSY;
428                 goto out;
429         }
430
431         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
432         list_del(&acg->scst_acg_list_entry);
433
434         /* Freeing acg_devs */
435         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
436                         acg_dev_list_entry) {
437                 struct scst_tgt_dev *tgt_dev, *tt;
438                 list_for_each_entry_safe(tgt_dev, tt,
439                                  &acg_dev->dev->dev_tgt_dev_list,
440                                  dev_tgt_dev_list_entry) {
441                         if (tgt_dev->acg_dev == acg_dev)
442                                 scst_free_tgt_dev(tgt_dev);
443                 }
444                 scst_free_acg_dev(acg_dev);
445         }
446
447         /* Freeing names */
448         list_for_each_entry_safe(n, nn, &acg->acn_list,
449                         acn_list_entry) {
450                 list_del(&n->acn_list_entry);
451                 kfree(n->name);
452                 kfree(n);
453         }
454         INIT_LIST_HEAD(&acg->acn_list);
455
456         kfree(acg);
457 out:
458         TRACE_EXIT_RES(res);
459         return res;
460 }
461
462 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
463 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
464         struct scst_acg_dev *acg_dev)
465 {
466         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
467         struct scst_tgt_dev *tgt_dev;
468         struct scst_device *dev = acg_dev->dev;
469         struct list_head *sess_tgt_dev_list_head;
470         struct scst_tgt_template *vtt = sess->tgt->tgtt;
471         int rc, i;
472
473         TRACE_ENTRY();
474
475 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
476         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
477 #else
478         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
479 #endif
480         if (tgt_dev == NULL) {
481                 TRACE(TRACE_OUT_OF_MEM, "%s",
482                       "Allocation of scst_tgt_dev failed");
483                 goto out;
484         }
485 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
486         memset(tgt_dev, 0, sizeof(*tgt_dev));
487 #endif
488
489         tgt_dev->dev = dev;
490         tgt_dev->lun = acg_dev->lun;
491         tgt_dev->acg_dev = acg_dev;
492         tgt_dev->sess = sess;
493         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
494
495         scst_sgv_pool_use_norm(tgt_dev);
496
497         if (dev->scsi_dev != NULL) {
498                 ini_sg = dev->scsi_dev->host->sg_tablesize;
499                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
500                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
501                                 ENABLE_CLUSTERING);
502         } else {
503                 ini_sg = (1 << 15) /* infinite */;
504                 ini_unchecked_isa_dma = 0;
505                 ini_use_clustering = 0;
506         }
507         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
508
509         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
510             !sess->tgt->tgtt->no_clustering)
511                 scst_sgv_pool_use_norm_clust(tgt_dev);
512
513         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
514                 scst_sgv_pool_use_dma(tgt_dev);
515         }
516
517         if (dev->scsi_dev != NULL) {
518                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
519                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
520                       dev->scsi_dev->channel, dev->scsi_dev->id,
521                       dev->scsi_dev->lun,
522                       (long long unsigned int)tgt_dev->lun);
523         } else {
524                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
525                                dev->virt_name,
526                                (long long unsigned int)tgt_dev->lun);
527         }
528
529         spin_lock_init(&tgt_dev->tgt_dev_lock);
530         INIT_LIST_HEAD(&tgt_dev->UA_list);
531         spin_lock_init(&tgt_dev->thr_data_lock);
532         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
533         spin_lock_init(&tgt_dev->sn_lock);
534         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
535         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
536         tgt_dev->expected_sn = 1;
537         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
538         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
539         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
540                 atomic_set(&tgt_dev->sn_slots[i], 0);
541
542         if (dev->handler->parse_atomic &&
543             (sess->tgt->tgtt->preprocessing_done == NULL)) {
544                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
545                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
546                                 &tgt_dev->tgt_dev_flags);
547                 if (dev->handler->exec_atomic)
548                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
549                                 &tgt_dev->tgt_dev_flags);
550         }
551         if (dev->handler->exec_atomic) {
552                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
553                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
554                                 &tgt_dev->tgt_dev_flags);
555                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
556                                 &tgt_dev->tgt_dev_flags);
557                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
558                         &tgt_dev->tgt_dev_flags);
559         }
560         if (dev->handler->dev_done_atomic &&
561             sess->tgt->tgtt->xmit_response_atomic) {
562                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
563                         &tgt_dev->tgt_dev_flags);
564         }
565
566         spin_lock_bh(&scst_temp_UA_lock);
567         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
568                 SCST_LOAD_SENSE(scst_sense_reset_UA));
569         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
570         spin_unlock_bh(&scst_temp_UA_lock);
571
572         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
573
574         if (vtt->threads_num > 0) {
575                 rc = 0;
576                 if (dev->handler->threads_num > 0)
577                         rc = scst_add_dev_threads(dev, vtt->threads_num);
578                 else if (dev->handler->threads_num == 0)
579                         rc = scst_add_cmd_threads(vtt->threads_num);
580                 if (rc != 0)
581                         goto out_free;
582         }
583
584         if (dev->handler && dev->handler->attach_tgt) {
585                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
586                       tgt_dev);
587                 rc = dev->handler->attach_tgt(tgt_dev);
588                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
589                 if (rc != 0) {
590                         PRINT_ERROR("Device handler's %s attach_tgt() "
591                             "failed: %d", dev->handler->name, rc);
592                         goto out_thr_free;
593                 }
594         }
595
596         spin_lock_bh(&dev->dev_lock);
597         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
598         if (dev->dev_reserved)
599                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
600         spin_unlock_bh(&dev->dev_lock);
601
602         sess_tgt_dev_list_head =
603                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
604         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, sess_tgt_dev_list_head);
605
606 out:
607         TRACE_EXIT();
608         return tgt_dev;
609
610 out_thr_free:
611         if (vtt->threads_num > 0) {
612                 if (dev->handler->threads_num > 0)
613                         scst_del_dev_threads(dev, vtt->threads_num);
614                 else if (dev->handler->threads_num == 0)
615                         scst_del_cmd_threads(vtt->threads_num);
616         }
617
618 out_free:
619         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
620         tgt_dev = NULL;
621         goto out;
622 }
623
624 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
625
626 /* No locks supposed to be held, scst_mutex - held */
627 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
628 {
629         TRACE_ENTRY();
630
631         scst_clear_reservation(tgt_dev);
632
633         /* With activity suspended the lock isn't needed, but let's be safe */
634         spin_lock_bh(&tgt_dev->tgt_dev_lock);
635         scst_free_all_UA(tgt_dev);
636         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
637
638         spin_lock_bh(&scst_temp_UA_lock);
639         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
640                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
641         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
642         spin_unlock_bh(&scst_temp_UA_lock);
643
644         TRACE_EXIT();
645         return;
646 }
647
648 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
649 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
650 {
651         struct scst_device *dev = tgt_dev->dev;
652         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
653
654         TRACE_ENTRY();
655
656         tm_dbg_deinit_tgt_dev(tgt_dev);
657
658         spin_lock_bh(&dev->dev_lock);
659         list_del(&tgt_dev->dev_tgt_dev_list_entry);
660         spin_unlock_bh(&dev->dev_lock);
661
662         list_del(&tgt_dev->sess_tgt_dev_list_entry);
663
664         scst_clear_reservation(tgt_dev);
665         scst_free_all_UA(tgt_dev);
666
667         if (dev->handler && dev->handler->detach_tgt) {
668                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
669                       tgt_dev);
670                 dev->handler->detach_tgt(tgt_dev);
671                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
672         }
673
674         if (vtt->threads_num > 0) {
675                 if (dev->handler->threads_num > 0)
676                         scst_del_dev_threads(dev, vtt->threads_num);
677                 else if (dev->handler->threads_num == 0)
678                         scst_del_cmd_threads(vtt->threads_num);
679         }
680
681         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
682
683         TRACE_EXIT();
684         return;
685 }
686
687 /* scst_mutex supposed to be held */
688 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
689 {
690         int res = 0;
691         struct scst_acg_dev *acg_dev;
692         struct scst_tgt_dev *tgt_dev;
693
694         TRACE_ENTRY();
695
696         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
697                         acg_dev_list_entry) {
698                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
699                 if (tgt_dev == NULL) {
700                         res = -ENOMEM;
701                         goto out_free;
702                 }
703         }
704
705 out:
706         TRACE_EXIT();
707         return res;
708
709 out_free:
710         scst_sess_free_tgt_devs(sess);
711         goto out;
712 }
713
714 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
715 void scst_sess_free_tgt_devs(struct scst_session *sess)
716 {
717         int i;
718         struct scst_tgt_dev *tgt_dev, *t;
719
720         TRACE_ENTRY();
721
722         /* The session is going down, no users, so no locks */
723         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
724                 struct list_head *sess_tgt_dev_list_head =
725                         &sess->sess_tgt_dev_list_hash[i];
726                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
727                                 sess_tgt_dev_list_entry) {
728                         scst_free_tgt_dev(tgt_dev);
729                 }
730                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
731         }
732
733         TRACE_EXIT();
734         return;
735 }
736
737 /* The activity supposed to be suspended and scst_mutex held */
738 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
739                      uint64_t lun, int read_only)
740 {
741         int res = 0;
742         struct scst_acg_dev *acg_dev;
743         struct scst_tgt_dev *tgt_dev;
744         struct scst_session *sess;
745         LIST_HEAD(tmp_tgt_dev_list);
746
747         TRACE_ENTRY();
748
749         INIT_LIST_HEAD(&tmp_tgt_dev_list);
750
751 #ifdef CONFIG_SCST_EXTRACHECKS
752         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
753                 if (acg_dev->dev == dev) {
754                         PRINT_ERROR("Device is already in group %s",
755                                 acg->acg_name);
756                         res = -EINVAL;
757                         goto out;
758                 }
759         }
760 #endif
761
762         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
763         if (acg_dev == NULL) {
764                 res = -ENOMEM;
765                 goto out;
766         }
767         acg_dev->rd_only_flag = read_only;
768
769         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
770                 acg_dev);
771         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
772         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
773
774         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry)
775         {
776                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
777                 if (tgt_dev == NULL) {
778                         res = -ENOMEM;
779                         goto out_free;
780                 }
781                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
782                               &tmp_tgt_dev_list);
783         }
784
785 out:
786         if (res == 0) {
787                 if (dev->virt_name != NULL) {
788                         PRINT_INFO("Added device %s to group %s (LUN %lld, "
789                                 "rd_only %d)", dev->virt_name, acg->acg_name,
790                                 (long long unsigned int)lun,
791                                 read_only);
792                 } else {
793                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
794                                 "%lld, rd_only %d)", dev->scsi_dev->host->host_no,
795                                 dev->scsi_dev->channel, dev->scsi_dev->id,
796                                 dev->scsi_dev->lun, acg->acg_name,
797                                 (long long unsigned int)lun,
798                                 read_only);
799                 }
800         }
801
802         TRACE_EXIT_RES(res);
803         return res;
804
805 out_free:
806         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
807                          extra_tgt_dev_list_entry) {
808                 scst_free_tgt_dev(tgt_dev);
809         }
810         scst_free_acg_dev(acg_dev);
811         goto out;
812 }
813
814 /* The activity supposed to be suspended and scst_mutex held */
815 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
816 {
817         int res = 0;
818         struct scst_acg_dev *acg_dev = NULL, *a;
819         struct scst_tgt_dev *tgt_dev, *tt;
820
821         TRACE_ENTRY();
822
823         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
824                 if (a->dev == dev) {
825                         acg_dev = a;
826                         break;
827                 }
828         }
829
830         if (acg_dev == NULL) {
831                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
832                 res = -EINVAL;
833                 goto out;
834         }
835
836         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
837                          dev_tgt_dev_list_entry) {
838                 if (tgt_dev->acg_dev == acg_dev)
839                         scst_free_tgt_dev(tgt_dev);
840         }
841         scst_free_acg_dev(acg_dev);
842
843 out:
844         if (res == 0) {
845                 if (dev->virt_name != NULL) {
846                         PRINT_INFO("Removed device %s from group %s",
847                                 dev->virt_name, acg->acg_name);
848                 } else {
849                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
850                                 dev->scsi_dev->host->host_no,
851                                 dev->scsi_dev->channel, dev->scsi_dev->id,
852                                 dev->scsi_dev->lun, acg->acg_name);
853                 }
854         }
855
856         TRACE_EXIT_RES(res);
857         return res;
858 }
859
860 /* scst_mutex supposed to be held */
861 int scst_acg_add_name(struct scst_acg *acg, const char *name)
862 {
863         int res = 0;
864         struct scst_acn *n;
865         int len;
866         char *nm;
867
868         TRACE_ENTRY();
869
870         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
871         {
872                 if (strcmp(n->name, name) == 0) {
873                         PRINT_ERROR("Name %s already exists in group %s",
874                                 name, acg->acg_name);
875                         res = -EINVAL;
876                         goto out;
877                 }
878         }
879
880         n = kmalloc(sizeof(*n), GFP_KERNEL);
881         if (n == NULL) {
882                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
883                 res = -ENOMEM;
884                 goto out;
885         }
886
887         len = strlen(name);
888         nm = kmalloc(len + 1, GFP_KERNEL);
889         if (nm == NULL) {
890                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
891                 res = -ENOMEM;
892                 goto out_free;
893         }
894
895         strcpy(nm, name);
896         n->name = nm;
897
898         list_add_tail(&n->acn_list_entry, &acg->acn_list);
899
900 out:
901         if (res == 0)
902                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
903
904         TRACE_EXIT_RES(res);
905         return res;
906
907 out_free:
908         kfree(n);
909         goto out;
910 }
911
912 /* scst_mutex supposed to be held */
913 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
914 {
915         int res = -EINVAL;
916         struct scst_acn *n;
917
918         TRACE_ENTRY();
919
920         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
921         {
922                 if (strcmp(n->name, name) == 0) {
923                         list_del(&n->acn_list_entry);
924                         kfree(n->name);
925                         kfree(n);
926                         res = 0;
927                         break;
928                 }
929         }
930
931         if (res == 0) {
932                 PRINT_INFO("Removed name %s from group %s", name,
933                         acg->acg_name);
934         } else {
935                 PRINT_ERROR("Unable to find name %s in group %s", name,
936                         acg->acg_name);
937         }
938
939         TRACE_EXIT_RES(res);
940         return res;
941 }
942
943 struct scst_cmd *scst_create_prepare_internal_cmd(
944         struct scst_cmd *orig_cmd, int bufsize)
945 {
946         struct scst_cmd *res;
947         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
948
949         TRACE_ENTRY();
950
951         res = scst_alloc_cmd(gfp_mask);
952         if (res == NULL)
953                 goto out;
954
955         res->cmd_lists = orig_cmd->cmd_lists;
956         res->sess = orig_cmd->sess;
957         res->atomic = scst_cmd_atomic(orig_cmd);
958         res->internal = 1;
959         res->tgtt = orig_cmd->tgtt;
960         res->tgt = orig_cmd->tgt;
961         res->dev = orig_cmd->dev;
962         res->tgt_dev = orig_cmd->tgt_dev;
963         res->lun = orig_cmd->lun;
964         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
965         res->data_direction = SCST_DATA_UNKNOWN;
966         res->orig_cmd = orig_cmd;
967         res->bufflen = bufsize;
968
969         res->state = SCST_CMD_STATE_PRE_PARSE;
970
971 out:
972         TRACE_EXIT_HRES((unsigned long)res);
973         return res;
974 }
975
976 void scst_free_internal_cmd(struct scst_cmd *cmd)
977 {
978         TRACE_ENTRY();
979
980         __scst_cmd_put(cmd);
981
982         TRACE_EXIT();
983         return;
984 }
985
986 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
987 {
988         int res = 0;
989 #define sbuf_size 252
990         static const uint8_t request_sense[6] =
991             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
992         struct scst_cmd *rs_cmd;
993
994         TRACE_ENTRY();
995
996         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
997         if (rs_cmd == NULL)
998                 goto out_error;
999
1000         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1001         rs_cmd->cdb_len = sizeof(request_sense);
1002         rs_cmd->data_direction = SCST_DATA_READ;
1003         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1004         rs_cmd->expected_transfer_len = sbuf_size;
1005         rs_cmd->expected_values_set = 1;
1006
1007         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1008                 "cmd list ", rs_cmd);
1009         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1010         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1011         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1012         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1013
1014 out:
1015         TRACE_EXIT_RES(res);
1016         return res;
1017
1018 out_error:
1019         res = -1;
1020         goto out;
1021 #undef sbuf_size
1022 }
1023
1024 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1025 {
1026         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1027         uint8_t *buf;
1028         int len;
1029
1030         TRACE_ENTRY();
1031
1032         sBUG_ON(orig_cmd == NULL);
1033
1034         len = scst_get_buf_first(req_cmd, &buf);
1035
1036         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1037             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1038                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1039                         buf, len);
1040                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1041                         len);
1042         } else {
1043                 PRINT_ERROR("%s", "Unable to get the sense via "
1044                         "REQUEST SENSE, returning HARDWARE ERROR");
1045                 scst_set_cmd_error(orig_cmd,
1046                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1047         }
1048
1049         if (len > 0)
1050                 scst_put_buf(req_cmd, buf);
1051
1052         scst_free_internal_cmd(req_cmd);
1053
1054         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1055         return orig_cmd;
1056 }
1057
1058 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1059 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1060 {
1061         struct scsi_request *req;
1062
1063         TRACE_ENTRY();
1064
1065         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1066                 if (req) {
1067                         if (req->sr_bufflen)
1068                                 kfree(req->sr_buffer);
1069                         scsi_release_request(req);
1070                 }
1071         }
1072
1073         TRACE_EXIT();
1074         return;
1075 }
1076
1077 static void scst_send_release(struct scst_device *dev)
1078 {
1079         struct scsi_request *req;
1080         struct scsi_device *scsi_dev;
1081         uint8_t cdb[6];
1082
1083         TRACE_ENTRY();
1084
1085         if (dev->scsi_dev == NULL)
1086                 goto out;
1087
1088         scsi_dev = dev->scsi_dev;
1089
1090         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1091         if (req == NULL) {
1092                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1093                             "to RELEASE device %d:%d:%d:%d",
1094                             scsi_dev->host->host_no, scsi_dev->channel,
1095                             scsi_dev->id, scsi_dev->lun);
1096                 goto out;
1097         }
1098
1099         memset(cdb, 0, sizeof(cdb));
1100         cdb[0] = RELEASE;
1101         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1102             ((scsi_dev->lun << 5) & 0xe0) : 0;
1103         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1104         req->sr_cmd_len = sizeof(cdb);
1105         req->sr_data_direction = SCST_DATA_NONE;
1106         req->sr_use_sg = 0;
1107         req->sr_bufflen = 0;
1108         req->sr_buffer = NULL;
1109         req->sr_request->rq_disk = dev->rq_disk;
1110         req->sr_sense_buffer[0] = 0;
1111
1112         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1113                 "mid-level", req);
1114         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1115                     scst_req_done, 15, 3);
1116
1117 out:
1118         TRACE_EXIT();
1119         return;
1120 }
1121 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1122 static void scst_send_release(struct scst_device *dev)
1123 {
1124         struct scsi_device *scsi_dev;
1125         unsigned char cdb[6];
1126         unsigned char *sense;
1127         int rc, i;
1128
1129         TRACE_ENTRY();
1130
1131         if (dev->scsi_dev == NULL)
1132                 goto out;
1133
1134         /* We can't afford missing RELEASE due to memory shortage */
1135         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1136
1137         scsi_dev = dev->scsi_dev;
1138
1139         for (i = 0; i < 5; i++) {
1140                 memset(cdb, 0, sizeof(cdb));
1141                 cdb[0] = RELEASE;
1142                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1143                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1144
1145                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1146
1147                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1148                         "SCSI mid-level");
1149                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1150                                 sense, 15, 0, 0);
1151                 TRACE_DBG("MODE_SENSE done: %x", rc);
1152
1153                 if (scsi_status_is_good(rc)) {
1154                         break;
1155                 } else {
1156                         PRINT_ERROR("RELEASE failed: %d", rc);
1157                         PRINT_BUFFER("RELEASE sense", sense,
1158                                 SCST_SENSE_BUFFERSIZE);
1159                         scst_check_internal_sense(dev, rc,
1160                                         sense, SCST_SENSE_BUFFERSIZE);
1161                 }
1162         }
1163
1164         kfree(sense);
1165
1166 out:
1167         TRACE_EXIT();
1168         return;
1169 }
1170 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1171
1172 /* scst_mutex supposed to be held */
1173 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1174 {
1175         struct scst_device *dev = tgt_dev->dev;
1176         int release = 0;
1177
1178         TRACE_ENTRY();
1179
1180         spin_lock_bh(&dev->dev_lock);
1181         if (dev->dev_reserved &&
1182             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1183                 /* This is one who holds the reservation */
1184                 struct scst_tgt_dev *tgt_dev_tmp;
1185                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1186                                     dev_tgt_dev_list_entry) {
1187                         clear_bit(SCST_TGT_DEV_RESERVED,
1188                                     &tgt_dev_tmp->tgt_dev_flags);
1189                 }
1190                 dev->dev_reserved = 0;
1191                 release = 1;
1192         }
1193         spin_unlock_bh(&dev->dev_lock);
1194
1195         if (release)
1196                 scst_send_release(dev);
1197
1198         TRACE_EXIT();
1199         return;
1200 }
1201
1202 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1203         const char *initiator_name)
1204 {
1205         struct scst_session *sess;
1206         int i;
1207         int len;
1208         char *nm;
1209
1210         TRACE_ENTRY();
1211
1212 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1213         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1214 #else
1215         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1216 #endif
1217         if (sess == NULL) {
1218                 TRACE(TRACE_OUT_OF_MEM, "%s",
1219                       "Allocation of scst_session failed");
1220                 goto out;
1221         }
1222 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1223         memset(sess, 0, sizeof(*sess));
1224 #endif
1225
1226         sess->init_phase = SCST_SESS_IPH_INITING;
1227         sess->shut_phase = SCST_SESS_SPH_READY;
1228         atomic_set(&sess->refcnt, 0);
1229         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1230                 struct list_head *sess_tgt_dev_list_head =
1231                          &sess->sess_tgt_dev_list_hash[i];
1232                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1233         }
1234         spin_lock_init(&sess->sess_list_lock);
1235         INIT_LIST_HEAD(&sess->search_cmd_list);
1236         sess->tgt = tgt;
1237         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1238         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1239
1240 #ifdef CONFIG_SCST_MEASURE_LATENCY
1241         spin_lock_init(&sess->meas_lock);
1242 #endif
1243
1244         len = strlen(initiator_name);
1245         nm = kmalloc(len + 1, gfp_mask);
1246         if (nm == NULL) {
1247                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1248                 goto out_free;
1249         }
1250
1251         strcpy(nm, initiator_name);
1252         sess->initiator_name = nm;
1253
1254 out:
1255         TRACE_EXIT();
1256         return sess;
1257
1258 out_free:
1259         kmem_cache_free(scst_sess_cachep, sess);
1260         sess = NULL;
1261         goto out;
1262 }
1263
1264 void scst_free_session(struct scst_session *sess)
1265 {
1266         TRACE_ENTRY();
1267
1268         mutex_lock(&scst_mutex);
1269
1270         TRACE_DBG("Removing sess %p from the list", sess);
1271         list_del(&sess->sess_list_entry);
1272         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1273         list_del(&sess->acg_sess_list_entry);
1274
1275         scst_sess_free_tgt_devs(sess);
1276
1277         wake_up_all(&sess->tgt->unreg_waitQ);
1278
1279         mutex_unlock(&scst_mutex);
1280
1281         kfree(sess->initiator_name);
1282         kmem_cache_free(scst_sess_cachep, sess);
1283
1284         TRACE_EXIT();
1285         return;
1286 }
1287
1288 void scst_free_session_callback(struct scst_session *sess)
1289 {
1290         struct completion *c;
1291
1292         TRACE_ENTRY();
1293
1294         TRACE_DBG("Freeing session %p", sess);
1295
1296         c = sess->shutdown_compl;
1297
1298         if (sess->unreg_done_fn) {
1299                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1300                 sess->unreg_done_fn(sess);
1301                 TRACE_DBG("%s", "unreg_done_fn() returned");
1302         }
1303         scst_free_session(sess);
1304
1305         if (c)
1306                 complete_all(c);
1307
1308         TRACE_EXIT();
1309         return;
1310 }
1311
1312 void scst_sched_session_free(struct scst_session *sess)
1313 {
1314         unsigned long flags;
1315
1316         TRACE_ENTRY();
1317
1318         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1319                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1320                         "shut phase %lx", sess, sess->shut_phase);
1321                 sBUG();
1322         }
1323
1324         spin_lock_irqsave(&scst_mgmt_lock, flags);
1325         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1326         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1327         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1328
1329         wake_up(&scst_mgmt_waitQ);
1330
1331         TRACE_EXIT();
1332         return;
1333 }
1334
1335 void scst_cmd_get(struct scst_cmd *cmd)
1336 {
1337         __scst_cmd_get(cmd);
1338 }
1339 EXPORT_SYMBOL(scst_cmd_get);
1340
1341 void scst_cmd_put(struct scst_cmd *cmd)
1342 {
1343         __scst_cmd_put(cmd);
1344 }
1345 EXPORT_SYMBOL(scst_cmd_put);
1346
1347 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1348 {
1349         struct scst_cmd *cmd;
1350
1351         TRACE_ENTRY();
1352
1353 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1354         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1355 #else
1356         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1357 #endif
1358         if (cmd == NULL) {
1359                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1360                 goto out;
1361         }
1362 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1363         memset(cmd, 0, sizeof(*cmd));
1364 #endif
1365
1366         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1367         cmd->start_time = jiffies;
1368         atomic_set(&cmd->cmd_ref, 1);
1369         cmd->cmd_lists = &scst_main_cmd_lists;
1370         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1371         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1372         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1373         cmd->retries = 0;
1374         cmd->data_len = -1;
1375         cmd->is_send_status = 1;
1376         cmd->resp_data_len = -1;
1377
1378         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1379         cmd->dbl_ua_orig_resp_data_len = -1;
1380
1381 out:
1382         TRACE_EXIT();
1383         return cmd;
1384 }
1385
1386 void scst_destroy_put_cmd(struct scst_cmd *cmd)
1387 {
1388         scst_sess_put(cmd->sess);
1389
1390         /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1391         if (likely(cmd->tgt_dev != NULL))
1392                 __scst_put();
1393
1394         scst_destroy_cmd(cmd);
1395         return;
1396 }
1397
1398 /* No locks supposed to be held */
1399 void scst_free_cmd(struct scst_cmd *cmd)
1400 {
1401         int destroy = 1;
1402
1403         TRACE_ENTRY();
1404
1405         TRACE_DBG("Freeing cmd %p (tag %llu)",
1406                   cmd, (long long unsigned int)cmd->tag);
1407
1408         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1409                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1410                         cmd, atomic_read(&scst_cmd_count));
1411         }
1412
1413         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1414                 cmd->dec_on_dev_needed);
1415
1416 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1417 #if defined(CONFIG_SCST_EXTRACHECKS)
1418         if (cmd->scsi_req) {
1419                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1420                         "scsi_req!");
1421                 scst_release_request(cmd);
1422         }
1423 #endif
1424 #endif
1425
1426         scst_check_restore_sg_buff(cmd);
1427
1428         if (unlikely(cmd->internal)) {
1429                 if (cmd->bufflen > 0)
1430                         scst_release_space(cmd);
1431                 scst_destroy_cmd(cmd);
1432                 goto out;
1433         }
1434
1435         if (cmd->tgtt->on_free_cmd != NULL) {
1436                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1437                 cmd->tgtt->on_free_cmd(cmd);
1438                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1439         }
1440
1441         if (likely(cmd->dev != NULL)) {
1442                 struct scst_dev_type *handler = cmd->dev->handler;
1443                 if (handler->on_free_cmd != NULL) {
1444                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1445                               handler->name, cmd);
1446                         handler->on_free_cmd(cmd);
1447                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1448                                 handler->name);
1449                 }
1450         }
1451
1452         scst_release_space(cmd);
1453
1454         if (unlikely(cmd->sense != NULL)) {
1455                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1456                 mempool_free(cmd->sense, scst_sense_mempool);
1457                 cmd->sense = NULL;
1458         }
1459
1460         if (likely(cmd->tgt_dev != NULL)) {
1461 #ifdef CONFIG_SCST_EXTRACHECKS
1462                 if (unlikely(!cmd->sent_for_exec)) {
1463                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1464                              "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1465                              cmd, cmd->cdb[0], cmd->tgtt->name,
1466                              (long long unsigned int)cmd->lun,
1467                              cmd->sn, cmd->tgt_dev->expected_sn);
1468                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1469                 }
1470 #endif
1471
1472                 if (unlikely(cmd->out_of_sn)) {
1473                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1474                                 "destroy=%d", cmd,
1475                                 (long long unsigned int)cmd->tag,
1476                                 cmd->sn, destroy);
1477                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1478                                         &cmd->cmd_flags);
1479                 }
1480         }
1481
1482         if (likely(destroy))
1483                 scst_destroy_put_cmd(cmd);
1484
1485 out:
1486         TRACE_EXIT();
1487         return;
1488 }
1489
1490 /* No locks supposed to be held. */
1491 void scst_check_retries(struct scst_tgt *tgt)
1492 {
1493         int need_wake_up = 0;
1494
1495         TRACE_ENTRY();
1496
1497         /*
1498          * We don't worry about overflow of finished_cmds, because we check
1499          * only for its change
1500          */
1501         atomic_inc(&tgt->finished_cmds);
1502         smp_mb__after_atomic_inc();
1503         if (unlikely(tgt->retry_cmds > 0)) {
1504                 struct scst_cmd *c, *tc;
1505                 unsigned long flags;
1506
1507                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1508                       tgt->retry_cmds);
1509
1510                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1511                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1512                                 cmd_list_entry)
1513                 {
1514                         tgt->retry_cmds--;
1515
1516                         TRACE_RETRY("Moving retry cmd %p to head of active "
1517                                 "cmd list (retry_cmds left %d)", c, tgt->retry_cmds);
1518                         spin_lock(&c->cmd_lists->cmd_list_lock);
1519                         list_move(&c->cmd_list_entry, &c->cmd_lists->active_cmd_list);
1520                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1521                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1522
1523                         need_wake_up++;
1524                         if (need_wake_up >= 2) /* "slow start" */
1525                                 break;
1526                 }
1527                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1528         }
1529
1530         TRACE_EXIT();
1531         return;
1532 }
1533
1534 void scst_tgt_retry_timer_fn(unsigned long arg)
1535 {
1536         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1537         unsigned long flags;
1538
1539         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1540
1541         spin_lock_irqsave(&tgt->tgt_lock, flags);
1542         tgt->retry_timer_active = 0;
1543         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1544
1545         scst_check_retries(tgt);
1546
1547         TRACE_EXIT();
1548         return;
1549 }
1550
1551 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
1552 {
1553         struct scst_mgmt_cmd *mcmd;
1554
1555         TRACE_ENTRY();
1556
1557         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1558         if (mcmd == NULL) {
1559                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1560                         "failed, some commands and their data could leak");
1561                 goto out;
1562         }
1563         memset(mcmd, 0, sizeof(*mcmd));
1564
1565 out:
1566         TRACE_EXIT();
1567         return mcmd;
1568 }
1569
1570 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1571 {
1572         unsigned long flags;
1573
1574         TRACE_ENTRY();
1575
1576         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1577         atomic_dec(&mcmd->sess->sess_cmd_count);
1578         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1579
1580         scst_sess_put(mcmd->sess);
1581
1582         if (mcmd->mcmd_tgt_dev != NULL)
1583                 __scst_put();
1584
1585         mempool_free(mcmd, scst_mgmt_mempool);
1586
1587         TRACE_EXIT();
1588         return;
1589 }
1590
1591 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1592 int scst_alloc_request(struct scst_cmd *cmd)
1593 {
1594         int res = 0;
1595         struct scsi_request *req;
1596         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1597
1598         TRACE_ENTRY();
1599
1600         /* cmd->dev->scsi_dev must be non-NULL here */
1601         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1602         if (req == NULL) {
1603                 TRACE(TRACE_OUT_OF_MEM, "%s",
1604                       "Allocation of scsi_request failed");
1605                 res = -ENOMEM;
1606                 goto out;
1607         }
1608
1609         cmd->scsi_req = req;
1610
1611         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1612         req->sr_cmd_len = cmd->cdb_len;
1613         req->sr_data_direction = cmd->data_direction;
1614         req->sr_use_sg = cmd->sg_cnt;
1615         req->sr_bufflen = cmd->bufflen;
1616         req->sr_buffer = cmd->sg;
1617         req->sr_request->rq_disk = cmd->dev->rq_disk;
1618         req->sr_sense_buffer[0] = 0;
1619
1620         cmd->scsi_req->upper_private_data = cmd;
1621
1622 out:
1623         TRACE_EXIT();
1624         return res;
1625 }
1626
1627 void scst_release_request(struct scst_cmd *cmd)
1628 {
1629         scsi_release_request(cmd->scsi_req);
1630         cmd->scsi_req = NULL;
1631 }
1632 #endif
1633
1634 int scst_alloc_space(struct scst_cmd *cmd)
1635 {
1636         gfp_t gfp_mask;
1637         int res = -ENOMEM;
1638         int atomic = scst_cmd_atomic(cmd);
1639         int flags;
1640         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1641
1642         TRACE_ENTRY();
1643
1644         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1645
1646         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1647         if (cmd->no_sgv)
1648                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1649
1650         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1651                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1652         if (cmd->sg == NULL)
1653                 goto out;
1654
1655         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1656                 static int ll;
1657                 if (ll < 10) {
1658                         PRINT_INFO("Unable to complete command due to "
1659                                 "SG IO count limitation (requested %d, "
1660                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1661                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1662                         ll++;
1663                 }
1664                 goto out_sg_free;
1665         }
1666
1667         res = 0;
1668
1669 out:
1670         TRACE_EXIT();
1671         return res;
1672
1673 out_sg_free:
1674         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1675         cmd->sgv = NULL;
1676         cmd->sg = NULL;
1677         cmd->sg_cnt = 0;
1678         goto out;
1679 }
1680
1681 void scst_release_space(struct scst_cmd *cmd)
1682 {
1683         TRACE_ENTRY();
1684
1685         if (cmd->sgv == NULL)
1686                 goto out;
1687
1688         if (cmd->data_buf_alloced) {
1689                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1690                 goto out;
1691         }
1692
1693         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1694
1695         cmd->sgv = NULL;
1696         cmd->sg_cnt = 0;
1697         cmd->sg = NULL;
1698         cmd->bufflen = 0;
1699         cmd->data_len = 0;
1700
1701 out:
1702         TRACE_EXIT();
1703         return;
1704 }
1705
1706 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1707
1708 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1709 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1710
1711 int scst_get_cdb_len(const uint8_t *cdb)
1712 {
1713         return SCST_GET_CDB_LEN(cdb[0]);
1714 }
1715
1716 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1717
1718 /* for special commands */
1719 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1720 {
1721         cmd->bufflen = 6;
1722         return 0;
1723 }
1724
1725 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1726 {
1727         cmd->bufflen = READ_CAP_LEN;
1728         return 0;
1729 }
1730
1731 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1732 {
1733         cmd->bufflen = 1;
1734         return 0;
1735 }
1736
1737 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1738 {
1739         uint8_t *p = (uint8_t *)cmd->cdb + off;
1740         int res = 0;
1741
1742         cmd->bufflen = 0;
1743         cmd->bufflen |= ((u32)p[0]) << 8;
1744         cmd->bufflen |= ((u32)p[1]);
1745
1746         switch (cmd->cdb[1] & 0x1f) {
1747         case 0:
1748         case 1:
1749         case 6:
1750                 if (cmd->bufflen != 0) {
1751                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1752                                 "allocation length for service action %x",
1753                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1754                         goto out_inval;
1755                 }
1756                 break;
1757         }
1758
1759         switch (cmd->cdb[1] & 0x1f) {
1760         case 0:
1761         case 1:
1762                 cmd->bufflen = 20;
1763                 break;
1764         case 6:
1765                 cmd->bufflen = 32;
1766                 break;
1767         case 8:
1768                 cmd->bufflen = max(28, cmd->bufflen);
1769                 break;
1770         default:
1771                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1772                         cmd->cdb[1] & 0x1f);
1773                 goto out_inval;
1774         }
1775
1776 out:
1777         return res;
1778
1779 out_inval:
1780         scst_set_cmd_error(cmd,
1781                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1782         res = 1;
1783         goto out;
1784 }
1785
1786 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1787 {
1788         cmd->bufflen = (u32)cmd->cdb[off];
1789         return 0;
1790 }
1791
1792 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1793 {
1794         cmd->bufflen = (u32)cmd->cdb[off];
1795         if (cmd->bufflen == 0)
1796                 cmd->bufflen = 256;
1797         return 0;
1798 }
1799
1800 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1801 {
1802         const uint8_t *p = cmd->cdb + off;
1803
1804         cmd->bufflen = 0;
1805         cmd->bufflen |= ((u32)p[0]) << 8;
1806         cmd->bufflen |= ((u32)p[1]);
1807
1808         return 0;
1809 }
1810
1811 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1812 {
1813         const uint8_t *p = cmd->cdb + off;
1814
1815         cmd->bufflen = 0;
1816         cmd->bufflen |= ((u32)p[0]) << 16;
1817         cmd->bufflen |= ((u32)p[1]) << 8;
1818         cmd->bufflen |= ((u32)p[2]);
1819
1820         return 0;
1821 }
1822
1823 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1824 {
1825         const uint8_t *p = cmd->cdb + off;
1826
1827         cmd->bufflen = 0;
1828         cmd->bufflen |= ((u32)p[0]) << 24;
1829         cmd->bufflen |= ((u32)p[1]) << 16;
1830         cmd->bufflen |= ((u32)p[2]) << 8;
1831         cmd->bufflen |= ((u32)p[3]);
1832
1833         return 0;
1834 }
1835
1836 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1837 {
1838         cmd->bufflen = 0;
1839         return 0;
1840 }
1841
1842 int scst_get_cdb_info(struct scst_cmd *cmd)
1843 {
1844         int dev_type = cmd->dev->handler->type;
1845         int i, res = 0;
1846         uint8_t op;
1847         const struct scst_sdbops *ptr = NULL;
1848
1849         TRACE_ENTRY();
1850
1851         op = cmd->cdb[0];       /* get clear opcode */
1852
1853         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1854                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1855                 dev_type);
1856
1857         i = scst_scsi_op_list[op];
1858         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1859                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1860                         ptr = &scst_scsi_op_table[i];
1861                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1862                               ptr->ops, ptr->devkey[0], /* disk     */
1863                               ptr->devkey[1],   /* tape     */
1864                               ptr->devkey[2],   /* printer */
1865                               ptr->devkey[3],   /* cpu      */
1866                               ptr->devkey[4],   /* cdr      */
1867                               ptr->devkey[5],   /* cdrom    */
1868                               ptr->devkey[6],   /* scanner */
1869                               ptr->devkey[7],   /* worm     */
1870                               ptr->devkey[8],   /* changer */
1871                               ptr->devkey[9],   /* commdev */
1872                               ptr->op_name);
1873                         TRACE_DBG("direction=%d flags=%d off=%d",
1874                               ptr->direction,
1875                               ptr->flags,
1876                               ptr->off);
1877                         break;
1878                 }
1879                 i++;
1880         }
1881
1882         if (ptr == NULL) {
1883                 /* opcode not found or now not used !!! */
1884                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1885                       dev_type);
1886                 res = -1;
1887                 cmd->op_flags = SCST_INFO_INVALID;
1888                 goto out;
1889         }
1890
1891         cmd->cdb_len = SCST_GET_CDB_LEN(op);
1892         cmd->op_name = ptr->op_name;
1893         cmd->data_direction = ptr->direction;
1894         cmd->op_flags = ptr->flags;
1895         res = (*ptr->get_trans_len)(cmd, ptr->off);
1896
1897         if (cmd->bufflen == 0) {
1898                 /*
1899                  * According to SPC bufflen 0 for data transfer commands isn't
1900                  * an error, so we need to fix the transfer direction.
1901                  */
1902                 cmd->data_direction = SCST_DATA_NONE;
1903         }
1904
1905 out:
1906         TRACE_EXIT();
1907         return res;
1908 }
1909 EXPORT_SYMBOL(scst_get_cdb_info);
1910
1911 /*
1912  * Routine to extract a lun number from an 8-byte LUN structure
1913  * in network byte order (BE).
1914  * (see SAM-2, Section 4.12.3 page 40)
1915  * Supports 2 types of lun unpacking: peripheral and logical unit.
1916  */
1917 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
1918 {
1919         uint64_t res = NO_SUCH_LUN;
1920         int address_method;
1921
1922         TRACE_ENTRY();
1923
1924         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1925
1926         if (unlikely(len < 2)) {
1927                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1928                         "more", len);
1929                 goto out;
1930         }
1931
1932         if (len > 2) {
1933                 switch (len) {
1934                 case 8:
1935                         if ((*((uint64_t *)lun) &
1936                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1937                                 goto out_err;
1938                         break;
1939                 case 4:
1940                         if (*((uint16_t *)&lun[2]) != 0)
1941                                 goto out_err;
1942                         break;
1943                 case 6:
1944                         if (*((uint32_t *)&lun[2]) != 0)
1945                                 goto out_err;
1946                         break;
1947                 default:
1948                         goto out_err;
1949                 }
1950         }
1951
1952         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
1953         switch (address_method) {
1954         case 0: /* peripheral device addressing method */
1955 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1956                 if (*lun) {
1957                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1958                              "peripheral device addressing method 0x%02x, "
1959                              "expected 0", *lun);
1960                         break;
1961                 }
1962                 res = *(lun + 1);
1963                 break;
1964 #else
1965                 /* go through */
1966 #endif
1967
1968         case 1: /* flat space addressing method */
1969                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1970                 break;
1971
1972         case 2: /* logical unit addressing method */
1973                 if (*lun & 0x3f) {
1974                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1975                                     "addressing method 0x%02x, expected 0",
1976                                     *lun & 0x3f);
1977                         break;
1978                 }
1979                 if (*(lun + 1) & 0xe0) {
1980                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
1981                                     "addressing method 0x%02x, expected 0",
1982                                     (*(lun + 1) & 0xf8) >> 5);
1983                         break;
1984                 }
1985                 res = *(lun + 1) & 0x1f;
1986                 break;
1987
1988         case 3: /* extended logical unit addressing method */
1989         default:
1990                 PRINT_ERROR("Unimplemented LUN addressing method %u",
1991                             address_method);
1992                 break;
1993         }
1994
1995 out:
1996         TRACE_EXIT_RES((int)res);
1997         return res;
1998
1999 out_err:
2000         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2001         goto out;
2002 }
2003
2004 int scst_calc_block_shift(int sector_size)
2005 {
2006         int block_shift = 0;
2007         int t;
2008
2009         if (sector_size == 0)
2010                 sector_size = 512;
2011
2012         t = sector_size;
2013         while (1) {
2014                 if ((t & 1) != 0)
2015                         break;
2016                 t >>= 1;
2017                 block_shift++;
2018         }
2019         if (block_shift < 9) {
2020                 PRINT_ERROR("Wrong sector size %d", sector_size);
2021                 block_shift = -1;
2022         }
2023
2024         TRACE_EXIT_RES(block_shift);
2025         return block_shift;
2026 }
2027 EXPORT_SYMBOL(scst_calc_block_shift);
2028
2029 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2030         int (*get_block_shift)(struct scst_cmd *cmd))
2031 {
2032         int res = 0;
2033
2034         TRACE_ENTRY();
2035
2036         /*
2037          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2038          * therefore change them only if necessary
2039          */
2040
2041         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2042               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2043
2044         switch (cmd->cdb[0]) {
2045         case SERVICE_ACTION_IN:
2046                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2047                         cmd->bufflen = READ_CAP16_LEN;
2048                         cmd->data_direction = SCST_DATA_READ;
2049                 }
2050                 break;
2051         case VERIFY_6:
2052         case VERIFY:
2053         case VERIFY_12:
2054         case VERIFY_16:
2055                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2056                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2057                         cmd->bufflen = 0;
2058                         goto set_timeout;
2059                 } else
2060                         cmd->data_len = 0;
2061                 break;
2062         default:
2063                 /* It's all good */
2064                 break;
2065         }
2066
2067         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2068                 /*
2069                  * No need for locks here, since *_detach() can not be
2070                  * called, when there are existing commands.
2071                  */
2072                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2073         }
2074
2075 set_timeout:
2076         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2077                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2078         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2079                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2080         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2081                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2082
2083         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2084               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2085
2086         TRACE_EXIT_RES(res);
2087         return res;
2088 }
2089 EXPORT_SYMBOL(scst_sbc_generic_parse);
2090
2091 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2092         int (*get_block_shift)(struct scst_cmd *cmd))
2093 {
2094         int res = 0;
2095
2096         TRACE_ENTRY();
2097
2098         /*
2099          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2100          * therefore change them only if necessary
2101          */
2102
2103         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2104               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2105
2106         cmd->cdb[1] &= 0x1f;
2107
2108         switch (cmd->cdb[0]) {
2109         case VERIFY_6:
2110         case VERIFY:
2111         case VERIFY_12:
2112         case VERIFY_16:
2113                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2114                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2115                         cmd->bufflen = 0;
2116                         goto set_timeout;
2117                 }
2118                 break;
2119         default:
2120                 /* It's all good */
2121                 break;
2122         }
2123
2124         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2125                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2126
2127 set_timeout:
2128         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2129                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2130         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2131                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2132         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2133                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2134
2135         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2136                 cmd->data_direction);
2137
2138         TRACE_EXIT();
2139         return res;
2140 }
2141 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2142
2143 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2144         int (*get_block_shift)(struct scst_cmd *cmd))
2145 {
2146         int res = 0;
2147
2148         TRACE_ENTRY();
2149
2150         /*
2151          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2152          * therefore change them only if necessary
2153          */
2154
2155         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2156               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2157
2158         cmd->cdb[1] &= 0x1f;
2159
2160         switch (cmd->cdb[0]) {
2161         case VERIFY_6:
2162         case VERIFY:
2163         case VERIFY_12:
2164         case VERIFY_16:
2165                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2166                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2167                         cmd->bufflen = 0;
2168                         goto set_timeout;
2169                 }
2170                 break;
2171         default:
2172                 /* It's all good */
2173                 break;
2174         }
2175
2176         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2177                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2178
2179 set_timeout:
2180         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2181                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2182         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2183                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2184         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2185                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2186
2187         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2188                 cmd->data_direction);
2189
2190         TRACE_EXIT_RES(res);
2191         return res;
2192 }
2193 EXPORT_SYMBOL(scst_modisk_generic_parse);
2194
2195 int scst_tape_generic_parse(struct scst_cmd *cmd,
2196         int (*get_block_size)(struct scst_cmd *cmd))
2197 {
2198         int res = 0;
2199
2200         TRACE_ENTRY();
2201
2202         /*
2203          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2204          * therefore change them only if necessary
2205          */
2206
2207         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2208               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2209
2210         if (cmd->cdb[0] == READ_POSITION) {
2211                 int tclp = cmd->cdb[1] & TCLP_BIT;
2212                 int long_bit = cmd->cdb[1] & LONG_BIT;
2213                 int bt = cmd->cdb[1] & BT_BIT;
2214
2215                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2216                         cmd->bufflen =
2217                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2218                         cmd->data_direction = SCST_DATA_READ;
2219                 } else {
2220                         cmd->bufflen = 0;
2221                         cmd->data_direction = SCST_DATA_NONE;
2222                 }
2223         }
2224
2225         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2226                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2227
2228         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2229                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2230         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2231                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2232         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2233                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2234
2235         TRACE_EXIT_RES(res);
2236         return res;
2237 }
2238 EXPORT_SYMBOL(scst_tape_generic_parse);
2239
2240 static int scst_null_parse(struct scst_cmd *cmd)
2241 {
2242         int res = 0;
2243
2244         TRACE_ENTRY();
2245
2246         /*
2247          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2248          * therefore change them only if necessary
2249          */
2250
2251         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2252               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2253 #if 0
2254         switch (cmd->cdb[0]) {
2255         default:
2256                 /* It's all good */
2257                 break;
2258         }
2259 #endif
2260         TRACE_DBG("res %d bufflen %d direct %d",
2261               res, cmd->bufflen, cmd->data_direction);
2262
2263         TRACE_EXIT();
2264         return res;
2265 }
2266
2267 int scst_changer_generic_parse(struct scst_cmd *cmd,
2268         int (*nothing)(struct scst_cmd *cmd))
2269 {
2270         int res = scst_null_parse(cmd);
2271
2272         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2273                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2274         else
2275                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2276
2277         return res;
2278 }
2279 EXPORT_SYMBOL(scst_changer_generic_parse);
2280
2281 int scst_processor_generic_parse(struct scst_cmd *cmd,
2282         int (*nothing)(struct scst_cmd *cmd))
2283 {
2284         int res = scst_null_parse(cmd);
2285
2286         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2287                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2288         else
2289                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2290
2291         return res;
2292 }
2293 EXPORT_SYMBOL(scst_processor_generic_parse);
2294
2295 int scst_raid_generic_parse(struct scst_cmd *cmd,
2296         int (*nothing)(struct scst_cmd *cmd))
2297 {
2298         int res = scst_null_parse(cmd);
2299
2300         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2301                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2302         else
2303                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2304
2305         return res;
2306 }
2307 EXPORT_SYMBOL(scst_raid_generic_parse);
2308
2309 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2310         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2311 {
2312         int opcode = cmd->cdb[0];
2313         int status = cmd->status;
2314         int res = SCST_CMD_STATE_DEFAULT;
2315
2316         TRACE_ENTRY();
2317
2318         /*
2319          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2320          * based on cmd->status and cmd->data_direction, therefore change
2321          * them only if necessary
2322          */
2323
2324         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2325                 switch (opcode) {
2326                 case READ_CAPACITY:
2327                 {
2328                         /* Always keep track of disk capacity */
2329                         int buffer_size, sector_size, sh;
2330                         uint8_t *buffer;
2331
2332                         buffer_size = scst_get_buf_first(cmd, &buffer);
2333                         if (unlikely(buffer_size <= 0)) {
2334                                 if (buffer_size < 0) {
2335                                         PRINT_ERROR("%s: Unable to get the buffer "
2336                                                 "(%d)", __func__, buffer_size);
2337                                 }
2338                                 goto out;
2339                         }
2340
2341                         sector_size =
2342                             ((buffer[4] << 24) | (buffer[5] << 16) |
2343                              (buffer[6] << 8) | (buffer[7] << 0));
2344                         scst_put_buf(cmd, buffer);
2345                         if (sector_size != 0)
2346                                 sh = scst_calc_block_shift(sector_size);
2347                         else
2348                                 sh = 0;
2349                         set_block_shift(cmd, sh);
2350                         TRACE_DBG("block_shift %d", sh);
2351                         break;
2352                 }
2353                 default:
2354                         /* It's all good */
2355                         break;
2356                 }
2357         }
2358
2359         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2360               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2361
2362 out:
2363         TRACE_EXIT_RES(res);
2364         return res;
2365 }
2366 EXPORT_SYMBOL(scst_block_generic_dev_done);
2367
2368 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2369         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2370 {
2371         int opcode = cmd->cdb[0];
2372         int res = SCST_CMD_STATE_DEFAULT;
2373         int buffer_size, bs;
2374         uint8_t *buffer = NULL;
2375
2376         TRACE_ENTRY();
2377
2378         /*
2379          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2380          * based on cmd->status and cmd->data_direction, therefore change
2381          * them only if necessary
2382          */
2383
2384         switch (opcode) {
2385         case MODE_SENSE:
2386         case MODE_SELECT:
2387                 buffer_size = scst_get_buf_first(cmd, &buffer);
2388                 if (unlikely(buffer_size <= 0)) {
2389                         if (buffer_size < 0) {
2390                                 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2391                                         __func__, buffer_size);
2392                         }
2393                         goto out;
2394                 }
2395                 break;
2396         }
2397
2398         switch (opcode) {
2399         case MODE_SENSE:
2400                 TRACE_DBG("%s", "MODE_SENSE");
2401                 if ((cmd->cdb[2] & 0xC0) == 0) {
2402                         if (buffer[3] == 8) {
2403                                 bs = (buffer[9] << 16) |
2404                                     (buffer[10] << 8) | buffer[11];
2405                                 set_block_size(cmd, bs);
2406                         }
2407                 }
2408                 break;
2409         case MODE_SELECT:
2410                 TRACE_DBG("%s", "MODE_SELECT");
2411                 if (buffer[3] == 8) {
2412                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2413                             (buffer[11]);
2414                         set_block_size(cmd, bs);
2415                 }
2416                 break;
2417         default:
2418                 /* It's all good */
2419                 break;
2420         }
2421
2422         switch (opcode) {
2423         case MODE_SENSE:
2424         case MODE_SELECT:
2425                 scst_put_buf(cmd, buffer);
2426                 break;
2427         }
2428
2429 out:
2430         TRACE_EXIT_RES(res);
2431         return res;
2432 }
2433 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2434
2435 static void scst_check_internal_sense(struct scst_device *dev, int result,
2436         uint8_t *sense, int sense_len)
2437 {
2438         TRACE_ENTRY();
2439
2440         if (host_byte(result) == DID_RESET) {
2441                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2442                         "reset UA");
2443                 scst_set_sense(sense, sense_len,
2444                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2445                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2446         } else if ((status_byte(result) == CHECK_CONDITION) &&
2447                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2448                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2449
2450         TRACE_EXIT();
2451         return;
2452 }
2453
2454 int scst_obtain_device_parameters(struct scst_device *dev)
2455 {
2456         int res = 0, i;
2457         uint8_t cmd[16];
2458         uint8_t buffer[4+0x0A];
2459         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2460
2461         TRACE_ENTRY();
2462
2463         sBUG_ON(in_interrupt() || in_atomic());
2464         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2465
2466         for (i = 0; i < 5; i++) {
2467                 /* Get control mode page */
2468                 memset(cmd, 0, sizeof(cmd));
2469                 cmd[0] = MODE_SENSE;
2470                 cmd[1] = 8; /* DBD */
2471                 cmd[2] = 0x0A;
2472                 cmd[4] = sizeof(buffer);
2473
2474                 memset(buffer, 0, sizeof(buffer));
2475                 memset(sense_buffer, 0, sizeof(sense_buffer));
2476
2477                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2478                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2479                                 sizeof(buffer), sense_buffer, 15, 0, 0);
2480
2481                 TRACE_DBG("MODE_SENSE done: %x", res);
2482
2483                 if (scsi_status_is_good(res)) {
2484                         int q;
2485
2486                         PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode page data",
2487                                 buffer, sizeof(buffer));
2488
2489                         dev->tst = buffer[4+2] >> 5;
2490                         q = buffer[4+3] >> 4;
2491                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2492                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2493                                         "%d:%d:%d:%d", dev->queue_alg,
2494                                         dev->scsi_dev->host->host_no, dev->scsi_dev->channel,
2495                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2496                         }
2497                         dev->queue_alg = q;
2498                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2499                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2500
2501                         /*
2502                          * Unfortunately, SCSI ML doesn't provide a way to
2503                          * specify commands task attribute, so we can rely on
2504                          * device's restricted reordering only.
2505                          */
2506                         dev->has_own_order_mgmt = !dev->queue_alg;
2507
2508                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d: TST %x, "
2509                                 "QUEUE ALG %x, SWP %x, TAS %x, has_own_order_mgmt "
2510                                 "%d", dev->scsi_dev->host->host_no,
2511                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2512                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2513                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2514
2515                         goto out;
2516                 } else {
2517 #if 0 /* 3ware controller is buggy and returns CONDITION_GOOD instead of CHECK_CONDITION */
2518                         if ((status_byte(res) == CHECK_CONDITION) &&
2519 #else
2520                         if (
2521 #endif
2522                             SCST_SENSE_VALID(sense_buffer)) {
2523                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2524                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device "
2525                                                 "%d:%d:%d:%d doesn't support control "
2526                                                 "mode page, using defaults: TST "
2527                                                 "%x, QUEUE ALG %x, SWP %x, TAS %x, "
2528                                                 "has_own_order_mgmt %d",
2529                                                 dev->scsi_dev->host->host_no,
2530                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2531                                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2532                                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2533                                         res = 0;
2534                                         goto out;
2535                                 } else if (sense_buffer[2] == NOT_READY) {
2536                                         TRACE(TRACE_SCSI, "Device %d:%d:%d:%d not ready",
2537                                                 dev->scsi_dev->host->host_no,
2538                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2539                                                 dev->scsi_dev->lun);
2540                                         res = 0;
2541                                         goto out;
2542                                 }
2543                         } else {
2544                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Internal MODE SENSE to "
2545                                         "device %d:%d:%d:%d failed: %x",
2546                                         dev->scsi_dev->host->host_no,
2547                                         dev->scsi_dev->channel, dev->scsi_dev->id,
2548                                         dev->scsi_dev->lun, res);
2549                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR, "MODE SENSE "
2550                                         "sense", sense_buffer, sizeof(sense_buffer));
2551                         }
2552                         scst_check_internal_sense(dev, res, sense_buffer,
2553                                         sizeof(sense_buffer));
2554                 }
2555         }
2556         res = -ENODEV;
2557
2558 out:
2559         TRACE_EXIT_RES(res);
2560         return res;
2561 }
2562 EXPORT_SYMBOL(scst_obtain_device_parameters);
2563
2564 /* Called under dev_lock and BH off */
2565 void scst_process_reset(struct scst_device *dev,
2566         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2567         struct scst_mgmt_cmd *mcmd, bool setUA)
2568 {
2569         struct scst_tgt_dev *tgt_dev;
2570         struct scst_cmd *cmd, *tcmd;
2571
2572         TRACE_ENTRY();
2573
2574         /* Clear RESERVE'ation, if necessary */
2575         if (dev->dev_reserved) {
2576                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2577                                     dev_tgt_dev_list_entry) {
2578                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2579                                 "lun %lld",
2580                                 (long long unsigned int)tgt_dev->lun);
2581                         clear_bit(SCST_TGT_DEV_RESERVED,
2582                                   &tgt_dev->tgt_dev_flags);
2583                 }
2584                 dev->dev_reserved = 0;
2585                 /*
2586                  * There is no need to send RELEASE, since the device is going
2587                  * to be resetted. Actually, since we can be in RESET TM
2588                  * function, it might be dangerous.
2589                  */
2590         }
2591
2592         dev->dev_double_ua_possible = 1;
2593         dev->dev_serialized = 1;
2594
2595         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2596                 dev_tgt_dev_list_entry) {
2597                 struct scst_session *sess = tgt_dev->sess;
2598
2599                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2600                 scst_free_all_UA(tgt_dev);
2601                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2602
2603                 spin_lock_irq(&sess->sess_list_lock);
2604
2605                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2606                 list_for_each_entry(cmd, &sess->search_cmd_list,
2607                                 search_cmd_list_entry) {
2608                         if (cmd == exclude_cmd)
2609                                 continue;
2610                         if ((cmd->tgt_dev == tgt_dev) ||
2611                             ((cmd->tgt_dev == NULL) &&
2612                              (cmd->lun == tgt_dev->lun))) {
2613                                 scst_abort_cmd(cmd, mcmd,
2614                                         (tgt_dev->sess != originator), 0);
2615                         }
2616                 }
2617                 spin_unlock_irq(&sess->sess_list_lock);
2618         }
2619
2620         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2621                                 blocked_cmd_list_entry) {
2622                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2623                         list_del(&cmd->blocked_cmd_list_entry);
2624                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2625                                 "to active cmd list", cmd);
2626                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2627                         list_add_tail(&cmd->cmd_list_entry,
2628                                 &cmd->cmd_lists->active_cmd_list);
2629                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2630                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2631                 }
2632         }
2633
2634         if (setUA) {
2635                 /* BH already off */
2636                 spin_lock(&scst_temp_UA_lock);
2637                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2638                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2639                 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2640                         sizeof(scst_temp_UA));
2641                 spin_unlock(&scst_temp_UA_lock);
2642         }
2643
2644         TRACE_EXIT();
2645         return;
2646 }
2647
2648 int scst_set_pending_UA(struct scst_cmd *cmd)
2649 {
2650         int res = 0;
2651         struct scst_tgt_dev_UA *UA_entry;
2652
2653         TRACE_ENTRY();
2654
2655         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2656
2657         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2658
2659         /* UA list could be cleared behind us, so retest */
2660         if (list_empty(&cmd->tgt_dev->UA_list)) {
2661                 TRACE_DBG("%s",
2662                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2663                 res = -1;
2664                 goto out_unlock;
2665         }
2666
2667         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2668                               UA_list_entry);
2669
2670         TRACE_DBG("next %p UA_entry %p",
2671               cmd->tgt_dev->UA_list.next, UA_entry);
2672
2673         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2674                 sizeof(UA_entry->UA_sense_buffer));
2675
2676         cmd->ua_ignore = 1;
2677
2678         list_del(&UA_entry->UA_list_entry);
2679
2680         mempool_free(UA_entry, scst_ua_mempool);
2681
2682         if (list_empty(&cmd->tgt_dev->UA_list)) {
2683                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2684                           &cmd->tgt_dev->tgt_dev_flags);
2685         }
2686
2687         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2688
2689 out:
2690         TRACE_EXIT_RES(res);
2691         return res;
2692
2693 out_unlock:
2694         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2695         goto out;
2696 }
2697
2698 /* Called under tgt_dev_lock and BH off */
2699 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2700         const uint8_t *sense, int sense_len, int head)
2701 {
2702         struct scst_tgt_dev_UA *UA_entry = NULL;
2703
2704         TRACE_ENTRY();
2705
2706         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2707         if (UA_entry == NULL) {
2708                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2709                      "allocation failed. The UNIT ATTENTION "
2710                      "on some sessions will be missed");
2711                 PRINT_BUFFER("Lost UA", sense, sense_len);
2712                 goto out;
2713         }
2714         memset(UA_entry, 0, sizeof(*UA_entry));
2715
2716         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2717                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2718         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2719
2720         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2721
2722         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2723
2724         if (head)
2725                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2726         else
2727                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2728
2729 out:
2730         TRACE_EXIT();
2731         return;
2732 }
2733
2734 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2735         const uint8_t *sense, int sense_len, int head)
2736 {
2737         int skip_UA = 0;
2738         struct scst_tgt_dev_UA *UA_entry_tmp;
2739
2740         TRACE_ENTRY();
2741
2742         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2743
2744         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2745                             UA_list_entry) {
2746                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, sense_len) == 0) {
2747                         TRACE_MGMT_DBG("%s", "UA already exists");
2748                         skip_UA = 1;
2749                         break;
2750                 }
2751         }
2752
2753         if (skip_UA == 0)
2754                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2755
2756         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2757
2758         TRACE_EXIT();
2759         return;
2760 }
2761
2762 /* Called under dev_lock and BH off */
2763 void scst_dev_check_set_local_UA(struct scst_device *dev,
2764         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2765 {
2766         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2767
2768         TRACE_ENTRY();
2769
2770         if (exclude != NULL)
2771                 exclude_tgt_dev = exclude->tgt_dev;
2772
2773         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2774                         dev_tgt_dev_list_entry) {
2775                 if (tgt_dev != exclude_tgt_dev)
2776                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2777         }
2778
2779         TRACE_EXIT();
2780         return;
2781 }
2782
2783 /* Called under dev_lock and BH off */
2784 void __scst_dev_check_set_UA(struct scst_device *dev,
2785         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2786 {
2787         TRACE_ENTRY();
2788
2789         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2790
2791         /* Check for reset UA */
2792         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2793                 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
2794                         exclude, NULL, false);
2795
2796         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2797
2798         TRACE_EXIT();
2799         return;
2800 }
2801
2802 /* Called under tgt_dev_lock or when tgt_dev is unused */
2803 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2804 {
2805         struct scst_tgt_dev_UA *UA_entry, *t;
2806
2807         TRACE_ENTRY();
2808
2809         list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
2810                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
2811                                (long long unsigned int)tgt_dev->lun);
2812                 list_del(&UA_entry->UA_list_entry);
2813                 kfree(UA_entry);
2814         }
2815         INIT_LIST_HEAD(&tgt_dev->UA_list);
2816         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2817
2818         TRACE_EXIT();
2819         return;
2820 }
2821
2822 /* No locks */
2823 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2824 {
2825         struct scst_cmd *res = NULL, *cmd, *t;
2826         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2827
2828         spin_lock_irq(&tgt_dev->sn_lock);
2829
2830         if (unlikely(tgt_dev->hq_cmd_count != 0))
2831                 goto out_unlock;
2832
2833 restart:
2834         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2835                                 sn_cmd_list_entry) {
2836                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2837                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2838                 if (cmd->sn == expected_sn) {
2839                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2840                                 cmd, cmd->sn, cmd->sn_set);
2841                         tgt_dev->def_cmd_count--;
2842                         list_del(&cmd->sn_cmd_list_entry);
2843                         if (res == NULL)
2844                                 res = cmd;
2845                         else {
2846                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2847                                 TRACE_SN("Adding cmd %p to active cmd list",
2848                                         cmd);
2849                                 list_add_tail(&cmd->cmd_list_entry,
2850                                         &cmd->cmd_lists->active_cmd_list);
2851                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2852                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2853                         }
2854                 }
2855         }
2856         if (res != NULL)
2857                 goto out_unlock;
2858
2859         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2860                                 sn_cmd_list_entry) {
2861                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2862                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2863                 if (cmd->sn == expected_sn) {
2864                         atomic_t *slot = cmd->sn_slot;
2865                         /*
2866                          * !! At this point any pointer in cmd, except !!
2867                          * !! sn_slot and sn_cmd_list_entry, could be   !!
2868                          * !! already destroyed                         !!
2869                          */
2870                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2871                                  cmd,
2872                                  (long long unsigned int)cmd->tag,
2873                                  cmd->sn);
2874                         tgt_dev->def_cmd_count--;
2875                         list_del(&cmd->sn_cmd_list_entry);
2876                         spin_unlock_irq(&tgt_dev->sn_lock);
2877                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2878                                              &cmd->cmd_flags))
2879                                 scst_destroy_put_cmd(cmd);
2880                         scst_inc_expected_sn(tgt_dev, slot);
2881                         expected_sn = tgt_dev->expected_sn;
2882                         spin_lock_irq(&tgt_dev->sn_lock);
2883                         goto restart;
2884                 }
2885         }
2886
2887 out_unlock:
2888         spin_unlock_irq(&tgt_dev->sn_lock);
2889         return res;
2890 }
2891
2892 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2893         struct scst_thr_data_hdr *data,
2894         void (*free_fn) (struct scst_thr_data_hdr *data))
2895 {
2896         data->pid = current->pid;
2897         atomic_set(&data->ref, 1);
2898         EXTRACHECKS_BUG_ON(free_fn == NULL);
2899         data->free_fn = free_fn;
2900         spin_lock(&tgt_dev->thr_data_lock);
2901         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2902         spin_unlock(&tgt_dev->thr_data_lock);
2903 }
2904 EXPORT_SYMBOL(scst_add_thr_data);
2905
2906 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2907 {
2908         spin_lock(&tgt_dev->thr_data_lock);
2909         while (!list_empty(&tgt_dev->thr_data_list)) {
2910                 struct scst_thr_data_hdr *d = list_entry(
2911                                 tgt_dev->thr_data_list.next, typeof(*d),
2912                                 thr_data_list_entry);
2913                 list_del(&d->thr_data_list_entry);
2914                 spin_unlock(&tgt_dev->thr_data_lock);
2915                 scst_thr_data_put(d);
2916                 spin_lock(&tgt_dev->thr_data_lock);
2917         }
2918         spin_unlock(&tgt_dev->thr_data_lock);
2919         return;
2920 }
2921 EXPORT_SYMBOL(scst_del_all_thr_data);
2922
2923 void scst_dev_del_all_thr_data(struct scst_device *dev)
2924 {
2925         struct scst_tgt_dev *tgt_dev;
2926
2927         TRACE_ENTRY();
2928
2929         mutex_lock(&scst_mutex);
2930
2931         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2932                                 dev_tgt_dev_list_entry) {
2933                 scst_del_all_thr_data(tgt_dev);
2934         }
2935
2936         mutex_unlock(&scst_mutex);
2937
2938         TRACE_EXIT();
2939         return;
2940 }
2941 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
2942
2943 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2944 {
2945         struct scst_thr_data_hdr *res = NULL, *d;
2946
2947         spin_lock(&tgt_dev->thr_data_lock);
2948         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2949                 if (d->pid == current->pid) {
2950                         res = d;
2951                         scst_thr_data_get(res);
2952                         break;
2953                 }
2954         }
2955         spin_unlock(&tgt_dev->thr_data_lock);
2956         return res;
2957 }
2958 EXPORT_SYMBOL(scst_find_thr_data);
2959
2960 /* dev_lock supposed to be held and BH disabled */
2961 void __scst_block_dev(struct scst_device *dev)
2962 {
2963         dev->block_count++;
2964         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
2965 }
2966
2967 /* No locks */
2968 void scst_block_dev(struct scst_device *dev, int outstanding)
2969 {
2970         spin_lock_bh(&dev->dev_lock);
2971         __scst_block_dev(dev);
2972         spin_unlock_bh(&dev->dev_lock);
2973
2974         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
2975         smp_mb();
2976
2977         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
2978                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
2979         wait_event(dev->on_dev_waitQ,
2980                 atomic_read(&dev->on_dev_count) <= outstanding);
2981         TRACE_MGMT_DBG("%s", "wait_event() returned");
2982 }
2983
2984 /* No locks */
2985 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
2986 {
2987         sBUG_ON(cmd->needs_unblocking);
2988
2989         cmd->needs_unblocking = 1;
2990         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
2991                        cmd, (long long unsigned int)cmd->tag);
2992
2993         scst_block_dev(cmd->dev, outstanding);
2994 }
2995
2996 /* No locks */
2997 void scst_unblock_dev(struct scst_device *dev)
2998 {
2999         spin_lock_bh(&dev->dev_lock);
3000         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3001                 dev->block_count-1, dev);
3002         if (--dev->block_count == 0)
3003                 scst_unblock_cmds(dev);
3004         spin_unlock_bh(&dev->dev_lock);
3005         sBUG_ON(dev->block_count < 0);
3006 }
3007
3008 /* No locks */
3009 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3010 {
3011         scst_unblock_dev(cmd->dev);
3012         cmd->needs_unblocking = 0;
3013 }
3014
3015 /* No locks */
3016 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3017 {
3018         int res = 0;
3019         struct scst_device *dev = cmd->dev;
3020
3021         TRACE_ENTRY();
3022
3023         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3024
3025         atomic_inc(&dev->on_dev_count);
3026         cmd->dec_on_dev_needed = 1;
3027         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3028
3029         if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
3030                 /*
3031                  * The original command can already block the device, so
3032                  * REQUEST SENSE command should always pass.
3033                  */
3034                 goto out;
3035         }
3036
3037 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3038         spin_lock_bh(&dev->dev_lock);
3039         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3040                 goto out_unlock;
3041         if (dev->block_count > 0) {
3042                 scst_dec_on_dev_cmd(cmd);
3043                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3044                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3045                 list_add_tail(&cmd->blocked_cmd_list_entry,
3046                               &dev->blocked_cmd_list);
3047                 res = 1;
3048         } else {
3049                 __scst_block_dev(dev);
3050                 cmd->inc_blocking = 1;
3051         }
3052         spin_unlock_bh(&dev->dev_lock);
3053         goto out;
3054 #else
3055 repeat:
3056         if (unlikely(dev->block_count > 0)) {
3057                 spin_lock_bh(&dev->dev_lock);
3058                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3059                         goto out_unlock;
3060                 barrier(); /* to reread block_count */
3061                 if (dev->block_count > 0) {
3062                         scst_dec_on_dev_cmd(cmd);
3063                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
3064                                 "serializing (tag %llu, dev %p)", cmd,
3065                                 (long long unsigned int)cmd->tag, dev);
3066                         list_add_tail(&cmd->blocked_cmd_list_entry,
3067                                       &dev->blocked_cmd_list);
3068                         res = 1;
3069                         spin_unlock_bh(&dev->dev_lock);
3070                         goto out;
3071                 } else {
3072                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3073                                 "continuing");
3074                 }
3075                 spin_unlock_bh(&dev->dev_lock);
3076         }
3077         if (unlikely(dev->dev_serialized)) {
3078                 spin_lock_bh(&dev->dev_lock);
3079                 barrier(); /* to reread block_count */
3080                 if (dev->block_count == 0) {
3081                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3082                                 "cmds due to serializing (dev %p)", cmd,
3083                                 (long long unsigned int)cmd->tag, dev);
3084                         __scst_block_dev(dev);
3085                         cmd->inc_blocking = 1;
3086                 } else {
3087                         spin_unlock_bh(&dev->dev_lock);
3088                         TRACE_MGMT_DBG("Somebody blocked the device, "
3089                                 "repeating (count %d)", dev->block_count);
3090                         goto repeat;
3091                 }
3092                 spin_unlock_bh(&dev->dev_lock);
3093         }
3094 #endif
3095
3096 out:
3097         TRACE_EXIT_RES(res);
3098         return res;
3099
3100 out_unlock:
3101         spin_unlock_bh(&dev->dev_lock);
3102         goto out;
3103 }
3104
3105 /* Called under dev_lock */
3106 void scst_unblock_cmds(struct scst_device *dev)
3107 {
3108 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3109         struct scst_cmd *cmd, *t;
3110         unsigned long flags;
3111
3112         TRACE_ENTRY();
3113
3114         local_irq_save(flags);
3115         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3116                                  blocked_cmd_list_entry) {
3117                 int brk = 0;
3118                 /*
3119                  * Since only one cmd per time is being executed, expected_sn
3120                  * can't change behind us, if the corresponding cmd is in
3121                  * blocked_cmd_list, but we could be called before
3122                  * scst_inc_expected_sn().
3123                  *
3124                  * For HQ commands SN is not set.
3125                  */
3126                 if (likely(!cmd->internal && cmd->sn_set)) {
3127                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3128                         if (cmd->tgt_dev == NULL)
3129                                 sBUG();
3130                         expected_sn = cmd->tgt_dev->expected_sn;
3131                         if (cmd->sn == expected_sn)
3132                                 brk = 1;
3133                         else if (cmd->sn != (expected_sn+1))
3134                                 continue;
3135                 }
3136
3137                 list_del(&cmd->blocked_cmd_list_entry);
3138                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3139                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3140                 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
3141                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3142                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3143                 if (brk)
3144                         break;
3145         }
3146         local_irq_restore(flags);
3147 #else /* CONFIG_SCST_STRICT_SERIALIZING */
3148         struct scst_cmd *cmd, *tcmd;
3149         unsigned long flags;
3150
3151         TRACE_ENTRY();
3152
3153         local_irq_save(flags);
3154         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3155                                  blocked_cmd_list_entry) {
3156                 list_del(&cmd->blocked_cmd_list_entry);
3157                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3158                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3159                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3160                         list_add(&cmd->cmd_list_entry,
3161                                 &cmd->cmd_lists->active_cmd_list);
3162                 else
3163                         list_add_tail(&cmd->cmd_list_entry,
3164                                 &cmd->cmd_lists->active_cmd_list);
3165                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3166                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3167         }
3168         local_irq_restore(flags);
3169 #endif /* CONFIG_SCST_STRICT_SERIALIZING */
3170
3171         TRACE_EXIT();
3172         return;
3173 }
3174
3175 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3176         struct scst_cmd *out_of_sn_cmd)
3177 {
3178         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3179
3180         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3181                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3182                 scst_make_deferred_commands_active(tgt_dev);
3183         } else {
3184                 out_of_sn_cmd->out_of_sn = 1;
3185                 spin_lock_irq(&tgt_dev->sn_lock);
3186                 tgt_dev->def_cmd_count++;
3187                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3188                               &tgt_dev->skipped_sn_list);
3189                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list "
3190                         "(expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3191                         tgt_dev->expected_sn);
3192                 spin_unlock_irq(&tgt_dev->sn_lock);
3193         }
3194
3195         return;
3196 }
3197
3198 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3199         struct scst_cmd *out_of_sn_cmd)
3200 {
3201         TRACE_ENTRY();
3202
3203         if (!out_of_sn_cmd->sn_set) {
3204                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3205                 goto out;
3206         }
3207
3208         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3209
3210 out:
3211         TRACE_EXIT();
3212         return;
3213 }
3214
3215 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3216 {
3217         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3218
3219         TRACE_ENTRY();
3220
3221         if (!cmd->hq_cmd_inced)
3222                 goto out;
3223
3224         spin_lock_irq(&tgt_dev->sn_lock);
3225         tgt_dev->hq_cmd_count--;
3226         spin_unlock_irq(&tgt_dev->sn_lock);
3227
3228         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3229
3230         /*
3231          * There is no problem in checking hq_cmd_count in the
3232          * non-locked state. In the worst case we will only have
3233          * unneeded run of the deferred commands.
3234          */
3235         if (tgt_dev->hq_cmd_count == 0)
3236                 scst_make_deferred_commands_active(tgt_dev);
3237
3238 out:
3239         TRACE_EXIT();
3240         return;
3241 }
3242
3243 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3244 {
3245         TRACE_ENTRY();
3246
3247         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3248                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3249                 atomic_read(&scst_cmd_count));
3250
3251         scst_done_cmd_mgmt(cmd);
3252
3253         smp_rmb();
3254         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3255                 if (cmd->completed) {
3256                         /* It's completed and it's OK to return its result */
3257                         goto out;
3258                 }
3259
3260                 if (cmd->dev->tas) {
3261                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3262                                 "(tag %llu), returning TASK ABORTED ", cmd,
3263                                 (long long unsigned int)cmd->tag);
3264                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3265                 } else {
3266                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3267                                 "(tag %llu), aborting without delivery or "
3268                                 "notification",
3269                                 cmd, (long long unsigned int)cmd->tag);
3270                         /*
3271                          * There is no need to check/requeue possible UA,
3272                          * because, if it exists, it will be delivered
3273                          * by the "completed" branch above.
3274                          */
3275                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3276                 }
3277         }
3278
3279 out:
3280         TRACE_EXIT();
3281         return;
3282 }
3283
3284 void __init scst_scsi_op_list_init(void)
3285 {
3286         int i;
3287         uint8_t op = 0xff;
3288
3289         TRACE_ENTRY();
3290
3291         for (i = 0; i < 256; i++)
3292                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3293
3294         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3295                 if (scst_scsi_op_table[i].ops != op) {
3296                         op = scst_scsi_op_table[i].ops;
3297                         scst_scsi_op_list[op] = i;
3298                 }
3299         }
3300
3301         TRACE_EXIT();
3302         return;
3303 }
3304
3305 #ifdef CONFIG_SCST_DEBUG
3306 /* Original taken from the XFS code */
3307 unsigned long scst_random(void)
3308 {
3309         static int Inited;
3310         static unsigned long RandomValue;
3311         static DEFINE_SPINLOCK(lock);
3312         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3313         register long rv;
3314         register long lo;
3315         register long hi;
3316         unsigned long flags;
3317
3318         spin_lock_irqsave(&lock, flags);
3319         if (!Inited) {
3320                 RandomValue = jiffies;
3321                 Inited = 1;
3322         }
3323         rv = RandomValue;
3324         hi = rv / 127773;
3325         lo = rv % 127773;
3326         rv = 16807 * lo - 2836 * hi;
3327         if (rv <= 0)
3328                 rv += 2147483647;
3329         RandomValue = rv;
3330         spin_unlock_irqrestore(&lock, flags);
3331         return rv;
3332 }
3333 EXPORT_SYMBOL(scst_random);
3334 #endif
3335
3336 #ifdef CONFIG_SCST_DEBUG_TM
3337
3338 #define TM_DBG_STATE_ABORT              0
3339 #define TM_DBG_STATE_RESET              1
3340 #define TM_DBG_STATE_OFFLINE            2
3341
3342 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3343
3344 static void tm_dbg_timer_fn(unsigned long arg);
3345
3346 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3347 /* All serialized by scst_tm_dbg_lock */
3348 struct {
3349         unsigned int tm_dbg_release:1;
3350         unsigned int tm_dbg_blocked:1;
3351 } tm_dbg_flags;
3352 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3353 static int tm_dbg_delayed_cmds_count;
3354 static int tm_dbg_passed_cmds_count;
3355 static int tm_dbg_state;
3356 static int tm_dbg_on_state_passes;
3357 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3358 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3359
3360 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3361
3362 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3363         struct scst_acg_dev *acg_dev)
3364 {
3365         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3366                 unsigned long flags;
3367                 /* Do TM debugging only for LUN 0 */
3368                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3369                 tm_dbg_p_cmd_list_waitQ =
3370                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3371                 tm_dbg_state = INIT_TM_DBG_STATE;
3372                 tm_dbg_on_state_passes =
3373                         tm_dbg_on_state_num_passes[tm_dbg_state];
3374                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3375                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3376                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3377                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3378         }
3379 }
3380
3381 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3382 {
3383         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3384                 unsigned long flags;
3385                 del_timer_sync(&tm_dbg_timer);
3386                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3387                 tm_dbg_p_cmd_list_waitQ = NULL;
3388                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3389         }
3390 }
3391
3392 static void tm_dbg_timer_fn(unsigned long arg)
3393 {
3394         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3395         tm_dbg_flags.tm_dbg_release = 1;
3396         smp_wmb();
3397         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3398 }
3399
3400 /* Called under scst_tm_dbg_lock and IRQs off */
3401 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3402 {
3403         switch (tm_dbg_state) {
3404         case TM_DBG_STATE_ABORT:
3405                 if (tm_dbg_delayed_cmds_count == 0) {
3406                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3407                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu) "
3408                                 "for %ld.%ld seconds (%ld HZ), "
3409                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3410                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3411                         mod_timer(&tm_dbg_timer, jiffies + d);
3412 #if 0
3413                         tm_dbg_flags.tm_dbg_blocked = 1;
3414 #endif
3415                 } else {
3416                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3417                                 "(tag %llu), delayed_cmds_count=%d, "
3418                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3419                                 tm_dbg_delayed_cmds_count,
3420                                 tm_dbg_on_state_passes);
3421                         if (tm_dbg_delayed_cmds_count == 2)
3422                                 tm_dbg_flags.tm_dbg_blocked = 0;
3423                 }
3424                 break;
3425
3426         case TM_DBG_STATE_RESET:
3427         case TM_DBG_STATE_OFFLINE:
3428                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3429                         "(tag %llu), delayed_cmds_count=%d, "
3430                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3431                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3432                 tm_dbg_flags.tm_dbg_blocked = 1;
3433                 break;
3434
3435         default:
3436                 sBUG();
3437         }
3438         /* IRQs already off */
3439         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3440         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3441         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3442         cmd->tm_dbg_delayed = 1;
3443         tm_dbg_delayed_cmds_count++;
3444         return;
3445 }
3446
3447 /* No locks */
3448 void tm_dbg_check_released_cmds(void)
3449 {
3450         if (tm_dbg_flags.tm_dbg_release) {
3451                 struct scst_cmd *cmd, *tc;
3452                 spin_lock_irq(&scst_tm_dbg_lock);
3453                 list_for_each_entry_safe_reverse(cmd, tc,
3454                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3455                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3456                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3457                                 tm_dbg_delayed_cmds_count);
3458                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3459                         list_move(&cmd->cmd_list_entry,
3460                                 &cmd->cmd_lists->active_cmd_list);
3461                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3462                 }
3463                 tm_dbg_flags.tm_dbg_release = 0;
3464                 spin_unlock_irq(&scst_tm_dbg_lock);
3465         }
3466 }
3467
3468 /* Called under scst_tm_dbg_lock */
3469 static void tm_dbg_change_state(void)
3470 {
3471         tm_dbg_flags.tm_dbg_blocked = 0;
3472         if (--tm_dbg_on_state_passes == 0) {
3473                 switch (tm_dbg_state) {
3474                 case TM_DBG_STATE_ABORT:
3475                         TRACE_MGMT_DBG("%s", "Changing "
3476                             "tm_dbg_state to RESET");
3477                         tm_dbg_state =
3478                                 TM_DBG_STATE_RESET;
3479                         tm_dbg_flags.tm_dbg_blocked = 0;
3480                         break;
3481                 case TM_DBG_STATE_RESET:
3482                 case TM_DBG_STATE_OFFLINE:
3483 #ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
3484                             TRACE_MGMT_DBG("%s", "Changing "
3485                                     "tm_dbg_state to OFFLINE");
3486                             tm_dbg_state =
3487                                 TM_DBG_STATE_OFFLINE;
3488 #else
3489                             TRACE_MGMT_DBG("%s", "Changing "
3490                                     "tm_dbg_state to ABORT");
3491                             tm_dbg_state =
3492                                 TM_DBG_STATE_ABORT;
3493 #endif
3494                         break;
3495                 default:
3496                         sBUG();
3497                 }
3498                 tm_dbg_on_state_passes =
3499                     tm_dbg_on_state_num_passes[tm_dbg_state];
3500         }
3501
3502         TRACE_MGMT_DBG("%s", "Deleting timer");
3503         del_timer(&tm_dbg_timer);
3504 }
3505
3506 /* No locks */
3507 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3508 {
3509         int res = 0;
3510         unsigned long flags;
3511
3512         if (cmd->tm_dbg_immut)
3513                 goto out;
3514
3515         if (cmd->tm_dbg_delayed) {
3516                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3517                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3518                         "delayed_cmds_count=%d", cmd, cmd->tag,
3519                         tm_dbg_delayed_cmds_count);
3520
3521                 cmd->tm_dbg_immut = 1;
3522                 tm_dbg_delayed_cmds_count--;
3523                 if ((tm_dbg_delayed_cmds_count == 0) &&
3524                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3525                         tm_dbg_change_state();
3526                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3527         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3528                                         &cmd->tgt_dev->tgt_dev_flags)) {
3529                 /* Delay 50th command */
3530                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3531                 if (tm_dbg_flags.tm_dbg_blocked ||
3532                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3533                         tm_dbg_delay_cmd(cmd);
3534                         res = 1;
3535                 } else
3536                         cmd->tm_dbg_immut = 1;
3537                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3538         }
3539
3540 out:
3541         return res;
3542 }
3543
3544 /* No locks */
3545 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3546 {
3547         struct scst_cmd *c;
3548         unsigned long flags;
3549
3550         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3551         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3552                                 cmd_list_entry) {
3553                 if (c == cmd) {
3554                         TRACE_MGMT_DBG("Abort request for "
3555                                 "delayed cmd %p (tag=%llu), moving it to "
3556                                 "active cmd list (delayed_cmds_count=%d)",
3557                                 c, c->tag, tm_dbg_delayed_cmds_count);
3558
3559                         if (!test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3560                                 /* Test how completed commands handled */
3561                                 if (((scst_random() % 10) == 5)) {
3562                                         scst_set_cmd_error(cmd,
3563                                            SCST_LOAD_SENSE(scst_sense_hardw_error));
3564                                         /* It's completed now */
3565                                 }
3566                         }
3567
3568                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3569                         list_move(&c->cmd_list_entry,
3570                                 &c->cmd_lists->active_cmd_list);
3571                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3572                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3573                         break;
3574                 }
3575         }
3576         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3577 }
3578
3579 /* Might be called under scst_mutex */
3580 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3581 {
3582         unsigned long flags;
3583
3584         if (dev != NULL) {
3585                 struct scst_tgt_dev *tgt_dev;
3586                 bool found = 0;
3587
3588                 spin_lock_bh(&dev->dev_lock);
3589                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3590                                             dev_tgt_dev_list_entry) {
3591                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3592                                         &tgt_dev->tgt_dev_flags)) {
3593                                 found = 1;
3594                                 break;
3595                         }
3596                 }
3597                 spin_unlock_bh(&dev->dev_lock);
3598
3599                 if (!found)
3600                         goto out;
3601         }
3602
3603         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3604         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3605                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3606                         tm_dbg_delayed_cmds_count);
3607                 tm_dbg_change_state();
3608                 tm_dbg_flags.tm_dbg_release = 1;
3609                 smp_wmb();
3610                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3611                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3612         } else {
3613                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3614         }
3615         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3616
3617 out:
3618         return;
3619 }
3620
3621 int tm_dbg_is_release(void)
3622 {
3623         return tm_dbg_flags.tm_dbg_release;
3624 }
3625 #endif /* CONFIG_SCST_DEBUG_TM */
3626
3627 #ifdef CONFIG_SCST_DEBUG_SN
3628 void scst_check_debug_sn(struct scst_cmd *cmd)
3629 {
3630         static DEFINE_SPINLOCK(lock);
3631         static int type;
3632         static int cnt;
3633         unsigned long flags;
3634         int old = cmd->queue_type;
3635
3636         spin_lock_irqsave(&lock, flags);
3637
3638         if (cnt == 0) {
3639                 if ((scst_random() % 1000) == 500) {
3640                         if ((scst_random() % 3) == 1)
3641                                 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3642                         else
3643                                 type = SCST_CMD_QUEUE_ORDERED;
3644                         do {
3645                                 cnt = scst_random() % 10;
3646                         } while (cnt == 0);
3647                 } else
3648                         goto out_unlock;
3649         }
3650
3651         cmd->queue_type = type;
3652         cnt--;
3653
3654         if (((scst_random() % 1000) == 750))
3655                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3656         else if (((scst_random() % 1000) == 751))
3657                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3658         else if (((scst_random() % 1000) == 752))
3659                 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3660
3661         TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3662                 cmd->queue_type, cnt);
3663
3664 out_unlock:
3665         spin_unlock_irqrestore(&lock, flags);
3666         return;
3667 }
3668 #endif /* CONFIG_SCST_DEBUG_SN */