- Fixes incorrect allocation length 0 by scst_user module
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #ifdef SCST_HIGHMEM
32 #include <linux/highmem.h>
33 #endif
34
35 #include "scst.h"
36 #include "scst_priv.h"
37 #include "scst_mem.h"
38
39 #include "scst_cdbprobe.h"
40
41 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
42 static void scst_check_internal_sense(struct scst_device *dev, int result,
43         uint8_t *sense, int sense_len);
44
45 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
46 {
47         int res = 0;
48         unsigned long gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
49
50         TRACE_ENTRY();
51
52         sBUG_ON(cmd->sense != NULL);
53
54         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
55         if (cmd->sense == NULL) {
56                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
57                         "The sense data will be lost!!", cmd->cdb[0]);
58                 res = -ENOMEM;
59                 goto out;
60         }
61
62         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
63
64 out:
65         TRACE_EXIT_RES(res);
66         return res;
67 }
68 EXPORT_SYMBOL(scst_alloc_sense);
69
70 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
71         const uint8_t *sense, unsigned int len)
72 {
73         int res;
74
75         TRACE_ENTRY();
76
77         res = scst_alloc_sense(cmd, atomic);
78         if (res != 0) {
79                 PRINT_BUFFER("Lost sense", sense, len);
80                 goto out;
81         }
82
83         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
84         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
85
86 out:
87         TRACE_EXIT_RES(res);
88         return res;
89 }
90 EXPORT_SYMBOL(scst_alloc_set_sense);
91
92 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
93 {
94         TRACE_ENTRY();
95
96         cmd->status = status;
97         cmd->host_status = DID_OK;
98
99         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
100         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
101
102         cmd->data_direction = SCST_DATA_NONE;
103         cmd->resp_data_len = 0;
104         cmd->is_send_status = 1;
105
106         cmd->completed = 1;
107
108         TRACE_EXIT();
109         return;
110 }
111 EXPORT_SYMBOL(scst_set_cmd_error_status);
112
113 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
114 {
115         int rc;
116
117         TRACE_ENTRY();
118
119         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
120
121         rc = scst_alloc_sense(cmd, 1);
122         if (rc != 0) {
123                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
124                         key, asc, ascq);
125                 goto out;
126         }
127
128         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
129         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
130
131 out:
132         TRACE_EXIT();
133         return;
134 }
135 EXPORT_SYMBOL(scst_set_cmd_error);
136
137 void scst_set_sense(uint8_t *buffer, int len, int key,
138         int asc, int ascq)
139 {
140         memset(buffer, 0, len);
141         buffer[0] = 0x70;       /* Error Code                   */
142         buffer[2] = key;        /* Sense Key                    */
143         buffer[7] = 0x0a;       /* Additional Sense Length      */
144         buffer[12] = asc;       /* ASC                          */
145         buffer[13] = ascq;      /* ASCQ                         */
146         TRACE_BUFFER("Sense set", buffer, len);
147         return;
148 }
149 EXPORT_SYMBOL(scst_set_sense);
150
151 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
152         unsigned int len)
153 {
154         TRACE_ENTRY();
155
156         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
157         scst_alloc_set_sense(cmd, 1, sense, len);
158
159         TRACE_EXIT();
160         return;
161 }
162 EXPORT_SYMBOL(scst_set_cmd_error_sense);
163
164 void scst_set_busy(struct scst_cmd *cmd)
165 {
166         int c = atomic_read(&cmd->sess->sess_cmd_count);
167
168         TRACE_ENTRY();
169
170         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
171                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
172                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
173                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
174                         cmd->sess->initiator_name, c,
175                         cmd->queue_type, cmd->sess->init_phase);
176         } else {
177                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
178                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
179                         "initiator %s (cmds count %d, queue_type %x, "
180                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
181                         cmd->queue_type, cmd->sess->init_phase);
182         }
183
184         TRACE_EXIT();
185         return;
186 }
187 EXPORT_SYMBOL(scst_set_busy);
188
189 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
190 {
191         int res;
192
193         TRACE_ENTRY();
194
195         switch(cmd->state) {
196         case SCST_CMD_STATE_INIT_WAIT:
197         case SCST_CMD_STATE_INIT:
198         case SCST_CMD_STATE_PRE_PARSE:
199         case SCST_CMD_STATE_DEV_PARSE:
200                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
201                 break;
202
203         default:
204                 res = SCST_CMD_STATE_PRE_DEV_DONE;
205                 break;
206         }
207
208         TRACE_EXIT_RES(res);
209         return res;
210 }
211 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
212
213 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
214 {
215         TRACE_ENTRY();
216
217 #ifdef EXTRACHECKS
218         switch(cmd->state) {
219         case SCST_CMD_STATE_PRE_XMIT_RESP:
220         case SCST_CMD_STATE_XMIT_RESP:
221         case SCST_CMD_STATE_FINISHED:
222         case SCST_CMD_STATE_XMIT_WAIT:
223                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
224                         cmd->state, cmd, cmd->cdb[0]);
225                 sBUG();
226         }
227 #endif
228
229         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
230
231         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
232                            (cmd->tgt_dev == NULL));
233
234         TRACE_EXIT();
235         return;
236 }
237 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
238
239 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
240 {
241         int i, l;
242
243         TRACE_ENTRY();
244
245         scst_check_restore_sg_buff(cmd);
246         cmd->resp_data_len = resp_data_len;
247
248         if (resp_data_len == cmd->bufflen)
249                 goto out;
250
251         l = 0;
252         for (i = 0; i < cmd->sg_cnt; i++) {
253                 l += cmd->sg[i].length;
254                 if (l >= resp_data_len) {
255                         int left = resp_data_len - (l - cmd->sg[i].length);
256 #ifdef DEBUG
257                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
258                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
259                                 "left %d",
260                                 cmd, (long long unsigned int)cmd->tag,
261                                 resp_data_len, i,
262                                 cmd->sg[i].length, left);
263 #endif
264                         cmd->orig_sg_cnt = cmd->sg_cnt;
265                         cmd->orig_sg_entry = i;
266                         cmd->orig_entry_len = cmd->sg[i].length;
267                         cmd->sg_cnt = (left > 0) ? i+1 : i;
268                         cmd->sg[i].length = left;
269                         cmd->sg_buff_modified = 1;
270                         break;
271                 }
272         }
273
274 out:
275         TRACE_EXIT();
276         return;
277 }
278 EXPORT_SYMBOL(scst_set_resp_data_len);
279
280 /* Called under scst_mutex and suspended activity */
281 int scst_alloc_device(int gfp_mask, struct scst_device **out_dev)
282 {
283         struct scst_device *dev;
284         int res = 0;
285         static int dev_num; /* protected by scst_mutex */
286
287         TRACE_ENTRY();
288
289         dev = kzalloc(sizeof(*dev), gfp_mask);
290         if (dev == NULL) {
291                 TRACE(TRACE_OUT_OF_MEM, "%s",
292                         "Allocation of scst_device failed");
293                 res = -ENOMEM;
294                 goto out;
295         }
296
297         dev->handler = &scst_null_devtype;
298         dev->p_cmd_lists = &scst_main_cmd_lists;
299         atomic_set(&dev->dev_cmd_count, 0);
300         atomic_set(&dev->write_cmd_count, 0);
301         scst_init_mem_lim(&dev->dev_mem_lim);
302         spin_lock_init(&dev->dev_lock);
303         atomic_set(&dev->on_dev_count, 0);
304         INIT_LIST_HEAD(&dev->blocked_cmd_list);
305         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
306         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
307         INIT_LIST_HEAD(&dev->threads_list);
308         init_waitqueue_head(&dev->on_dev_waitQ);
309         dev->dev_double_ua_possible = 1;
310         dev->dev_serialized = 1;
311         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
312         dev->dev_num = dev_num++;
313
314         *out_dev = dev;
315
316 out:
317         TRACE_EXIT_RES(res);
318         return res;
319 }
320
321 /* Called under scst_mutex and suspended activity */
322 void scst_free_device(struct scst_device *dev)
323 {
324         TRACE_ENTRY();
325
326 #ifdef EXTRACHECKS
327         if (!list_empty(&dev->dev_tgt_dev_list) ||
328             !list_empty(&dev->dev_acg_dev_list)) {
329                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
330                         "is not empty!", __func__);
331                 sBUG();
332         }
333 #endif
334
335         kfree(dev);
336
337         TRACE_EXIT();
338         return;
339 }
340
341 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
342 {
343         atomic_set(&mem_lim->alloced_pages, 0);
344         mem_lim->max_allowed_pages =
345                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
346 }
347 EXPORT_SYMBOL(scst_init_mem_lim);
348
349 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
350         struct scst_device *dev, lun_t lun)
351 {
352         struct scst_acg_dev *res;
353
354         TRACE_ENTRY();
355
356 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
357         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
358 #else
359         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
360 #endif
361         if (res == NULL) {
362                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
363                 goto out;
364         }
365 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
366         memset(res, 0, sizeof(*res));
367 #endif
368
369         res->dev = dev;
370         res->acg = acg;
371         res->lun = lun;
372
373 out:
374         TRACE_EXIT_HRES(res);
375         return res;
376 }
377
378 /* The activity supposed to be suspended and scst_mutex held */
379 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
380 {
381         TRACE_ENTRY();
382
383         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
384                 acg_dev);
385         list_del(&acg_dev->acg_dev_list_entry);
386         list_del(&acg_dev->dev_acg_dev_list_entry);
387
388         kmem_cache_free(scst_acgd_cachep, acg_dev);
389
390         TRACE_EXIT();
391         return;
392 }
393
394 /* The activity supposed to be suspended and scst_mutex held */
395 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
396 {
397         struct scst_acg *acg;
398
399         TRACE_ENTRY();
400
401         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
402         if (acg == NULL) {
403                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
404                 goto out;
405         }
406
407         INIT_LIST_HEAD(&acg->acg_dev_list);
408         INIT_LIST_HEAD(&acg->acg_sess_list);
409         INIT_LIST_HEAD(&acg->acn_list);
410         acg->acg_name = acg_name;
411
412         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
413         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
414
415 out:
416         TRACE_EXIT_HRES(acg);
417         return acg;
418 }
419
420 /* The activity supposed to be suspended and scst_mutex held */
421 int scst_destroy_acg(struct scst_acg *acg)
422 {
423         struct scst_acn *n, *nn;
424         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
425         int res = 0;
426
427         TRACE_ENTRY();
428
429         if (!list_empty(&acg->acg_sess_list)) {
430                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
431                 res = -EBUSY;
432                 goto out;
433         }
434
435         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
436         list_del(&acg->scst_acg_list_entry);
437
438         /* Freeing acg_devs */
439         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
440                         acg_dev_list_entry) {
441                 struct scst_tgt_dev *tgt_dev, *tt;
442                 list_for_each_entry_safe(tgt_dev, tt,
443                                  &acg_dev->dev->dev_tgt_dev_list,
444                                  dev_tgt_dev_list_entry) {
445                         if (tgt_dev->acg_dev == acg_dev)
446                                 scst_free_tgt_dev(tgt_dev);
447                 }
448                 scst_free_acg_dev(acg_dev);
449         }
450
451         /* Freeing names */
452         list_for_each_entry_safe(n, nn, &acg->acn_list,
453                         acn_list_entry) {
454                 list_del(&n->acn_list_entry);
455                 kfree(n->name);
456                 kfree(n);
457         }
458         INIT_LIST_HEAD(&acg->acn_list);
459
460         kfree(acg);
461 out:
462         TRACE_EXIT_RES(res);
463         return res;
464 }
465
466 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
467 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
468         struct scst_acg_dev *acg_dev)
469 {
470         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
471         struct scst_tgt_dev *tgt_dev;
472         struct scst_device *dev = acg_dev->dev;
473         struct list_head *sess_tgt_dev_list_head;
474         struct scst_tgt_template *vtt = sess->tgt->tgtt;
475         int rc, i;
476
477         TRACE_ENTRY();
478
479 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
480         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
481 #else
482         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
483 #endif
484         if (tgt_dev == NULL) {
485                 TRACE(TRACE_OUT_OF_MEM, "%s",
486                       "Allocation of scst_tgt_dev failed");
487                 goto out;
488         }
489 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
490         memset(tgt_dev, 0, sizeof(*tgt_dev));
491 #endif
492
493         tgt_dev->dev = dev;
494         tgt_dev->lun = acg_dev->lun;
495         tgt_dev->acg_dev = acg_dev;
496         tgt_dev->sess = sess;
497         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
498
499         scst_sgv_pool_use_norm(tgt_dev);
500
501         if (dev->scsi_dev != NULL) {
502                 ini_sg = dev->scsi_dev->host->sg_tablesize;
503                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
504                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
505                                 ENABLE_CLUSTERING);
506         } else {
507                 ini_sg = (1 << 15) /* infinite */;
508                 ini_unchecked_isa_dma = 0;
509                 ini_use_clustering = 0;
510         }
511         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
512
513         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
514             !sess->tgt->tgtt->no_clustering)
515                 scst_sgv_pool_use_norm_clust(tgt_dev);
516
517         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
518                 scst_sgv_pool_use_dma(tgt_dev);
519         } else {
520 #ifdef SCST_HIGHMEM
521                 scst_sgv_pool_use_highmem(tgt_dev);
522 #endif
523         }
524
525         if (dev->scsi_dev != NULL) {
526                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
527                       "SCST lun=%Ld", dev->scsi_dev->host->host_no,
528                       dev->scsi_dev->channel, dev->scsi_dev->id,
529                       dev->scsi_dev->lun,
530                       (long long unsigned int)tgt_dev->lun);
531         } else {
532                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%Ld",
533                                dev->virt_name,
534                                (long long unsigned int)tgt_dev->lun);
535         }
536
537         spin_lock_init(&tgt_dev->tgt_dev_lock);
538         INIT_LIST_HEAD(&tgt_dev->UA_list);
539         spin_lock_init(&tgt_dev->thr_data_lock);
540         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
541         spin_lock_init(&tgt_dev->sn_lock);
542         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
543         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
544         tgt_dev->expected_sn = 1;
545         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
546         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
547         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
548                 atomic_set(&tgt_dev->sn_slots[i], 0);
549
550         if (dev->handler->parse_atomic &&
551             (sess->tgt->tgtt->preprocessing_done == NULL)) {
552                 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
553                     (sess->tgt->tgtt->rdy_to_xfer == NULL))
554                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
555                                 &tgt_dev->tgt_dev_flags);
556                 if (dev->handler->exec_atomic || (dev->handler->exec == NULL))
557                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
558                                 &tgt_dev->tgt_dev_flags);
559         }
560         if (dev->handler->exec_atomic || (dev->handler->exec == NULL)) {
561                 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
562                     (sess->tgt->tgtt->rdy_to_xfer == NULL))
563                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
564                                 &tgt_dev->tgt_dev_flags);
565                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
566                                 &tgt_dev->tgt_dev_flags);
567                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
568                         &tgt_dev->tgt_dev_flags);
569         }
570         if ((dev->handler->dev_done_atomic ||
571              (dev->handler->dev_done == NULL)) &&
572             sess->tgt->tgtt->xmit_response_atomic) {
573                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
574                         &tgt_dev->tgt_dev_flags);
575         }
576
577         spin_lock_bh(&scst_temp_UA_lock);
578         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
579                 SCST_LOAD_SENSE(scst_sense_reset_UA));
580         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
581         spin_unlock_bh(&scst_temp_UA_lock);
582
583         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
584
585         if (vtt->threads_num > 0) {
586                 rc = 0;
587                 if (dev->handler->threads_num > 0)
588                         rc = scst_add_dev_threads(dev, vtt->threads_num);
589                 else if (dev->handler->threads_num == 0)
590                         rc = scst_add_cmd_threads(vtt->threads_num);
591                 if (rc != 0)
592                         goto out_free;
593         }
594
595         if (dev->handler && dev->handler->attach_tgt) {
596                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
597                       tgt_dev);
598                 rc = dev->handler->attach_tgt(tgt_dev);
599                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
600                 if (rc != 0) {
601                         PRINT_ERROR("Device handler's %s attach_tgt() "
602                             "failed: %d", dev->handler->name, rc);
603                         goto out_thr_free;
604                 }
605         }
606
607         spin_lock_bh(&dev->dev_lock);
608         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
609         if (dev->dev_reserved)
610                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
611         spin_unlock_bh(&dev->dev_lock);
612
613         sess_tgt_dev_list_head =
614                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
615         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, sess_tgt_dev_list_head);
616
617 out:
618         TRACE_EXIT();
619         return tgt_dev;
620
621 out_thr_free:
622         if (vtt->threads_num > 0) {
623                 if (dev->handler->threads_num > 0)
624                         scst_del_dev_threads(dev, vtt->threads_num);
625                 else if (dev->handler->threads_num == 0)
626                         scst_del_cmd_threads(vtt->threads_num);
627         }
628
629 out_free:
630         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
631         tgt_dev = NULL;
632         goto out;
633 }
634
635 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
636
637 /* No locks supposed to be held, scst_mutex - held */
638 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
639 {
640         TRACE_ENTRY();
641
642         scst_clear_reservation(tgt_dev);
643
644         /* With activity suspended the lock isn't needed, but let's be safe */
645         spin_lock_bh(&tgt_dev->tgt_dev_lock);
646         scst_free_all_UA(tgt_dev);
647         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
648
649         spin_lock_bh(&scst_temp_UA_lock);
650         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
651                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
652         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
653         spin_unlock_bh(&scst_temp_UA_lock);
654
655         TRACE_EXIT();
656         return;
657 }
658
659 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
660 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
661 {
662         struct scst_device *dev = tgt_dev->dev;
663         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
664
665         TRACE_ENTRY();
666
667         tm_dbg_deinit_tgt_dev(tgt_dev);
668
669         spin_lock_bh(&dev->dev_lock);
670         list_del(&tgt_dev->dev_tgt_dev_list_entry);
671         spin_unlock_bh(&dev->dev_lock);
672
673         list_del(&tgt_dev->sess_tgt_dev_list_entry);
674
675         scst_clear_reservation(tgt_dev);
676         scst_free_all_UA(tgt_dev);
677
678         if (dev->handler && dev->handler->detach_tgt) {
679                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
680                       tgt_dev);
681                 dev->handler->detach_tgt(tgt_dev);
682                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
683         }
684
685         if (vtt->threads_num > 0) {
686                 if (dev->handler->threads_num > 0)
687                         scst_del_dev_threads(dev, vtt->threads_num);
688                 else if (dev->handler->threads_num == 0)
689                         scst_del_cmd_threads(vtt->threads_num);
690         }
691
692         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
693
694         TRACE_EXIT();
695         return;
696 }
697
698 /* scst_mutex supposed to be held */
699 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
700 {
701         int res = 0;
702         struct scst_acg_dev *acg_dev;
703         struct scst_tgt_dev *tgt_dev;
704
705         TRACE_ENTRY();
706
707         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
708                         acg_dev_list_entry) {
709                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
710                 if (tgt_dev == NULL) {
711                         res = -ENOMEM;
712                         goto out_free;
713                 }
714         }
715
716 out:
717         TRACE_EXIT();
718         return res;
719
720 out_free:
721         scst_sess_free_tgt_devs(sess);
722         goto out;
723 }
724
725 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
726 void scst_sess_free_tgt_devs(struct scst_session *sess)
727 {
728         int i;
729         struct scst_tgt_dev *tgt_dev, *t;
730
731         TRACE_ENTRY();
732
733         /* The session is going down, no users, so no locks */
734         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
735                 struct list_head *sess_tgt_dev_list_head =
736                         &sess->sess_tgt_dev_list_hash[i];
737                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
738                                 sess_tgt_dev_list_entry) {
739                         scst_free_tgt_dev(tgt_dev);
740                 }
741                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
742         }
743
744         TRACE_EXIT();
745         return;
746 }
747
748 /* The activity supposed to be suspended and scst_mutex held */
749 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
750         int read_only)
751 {
752         int res = 0;
753         struct scst_acg_dev *acg_dev;
754         struct scst_tgt_dev *tgt_dev;
755         struct scst_session *sess;
756         LIST_HEAD(tmp_tgt_dev_list);
757
758         TRACE_ENTRY();
759
760         INIT_LIST_HEAD(&tmp_tgt_dev_list);
761
762 #ifdef EXTRACHECKS
763         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
764                 if (acg_dev->dev == dev) {
765                         PRINT_ERROR("Device is already in group %s",
766                                 acg->acg_name);
767                         res = -EINVAL;
768                         goto out;
769                 }
770         }
771 #endif
772
773         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
774         if (acg_dev == NULL) {
775                 res = -ENOMEM;
776                 goto out;
777         }
778         acg_dev->rd_only_flag = read_only;
779
780         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
781                 acg_dev);
782         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
783         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
784
785         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry)
786         {
787                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
788                 if (tgt_dev == NULL) {
789                         res = -ENOMEM;
790                         goto out_free;
791                 }
792                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
793                               &tmp_tgt_dev_list);
794         }
795
796 out:
797         if (res == 0) {
798                 if (dev->virt_name != NULL) {
799                         PRINT_INFO("Added device %s to group %s (LUN %Ld, "
800                                 "rd_only %d)", dev->virt_name, acg->acg_name,
801                                 (long long unsigned int)lun,
802                                 read_only);
803                 } else {
804                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
805                                 "%Ld, rd_only %d)", dev->scsi_dev->host->host_no,
806                                 dev->scsi_dev->channel, dev->scsi_dev->id,
807                                 dev->scsi_dev->lun, acg->acg_name,
808                                 (long long unsigned int)lun,
809                                 read_only);
810                 }
811         }
812
813         TRACE_EXIT_RES(res);
814         return res;
815
816 out_free:
817         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
818                          extra_tgt_dev_list_entry) {
819                 scst_free_tgt_dev(tgt_dev);
820         }
821         scst_free_acg_dev(acg_dev);
822         goto out;
823 }
824
825 /* The activity supposed to be suspended and scst_mutex held */
826 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
827 {
828         int res = 0;
829         struct scst_acg_dev *acg_dev = NULL, *a;
830         struct scst_tgt_dev *tgt_dev, *tt;
831
832         TRACE_ENTRY();
833
834         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
835                 if (a->dev == dev) {
836                         acg_dev = a;
837                         break;
838                 }
839         }
840
841         if (acg_dev == NULL) {
842                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
843                 res = -EINVAL;
844                 goto out;
845         }
846
847         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
848                          dev_tgt_dev_list_entry) {
849                 if (tgt_dev->acg_dev == acg_dev)
850                         scst_free_tgt_dev(tgt_dev);
851         }
852         scst_free_acg_dev(acg_dev);
853
854 out:
855         if (res == 0) {
856                 if (dev->virt_name != NULL) {
857                         PRINT_INFO("Removed device %s from group %s",
858                                 dev->virt_name, acg->acg_name);
859                 } else {
860                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
861                                 dev->scsi_dev->host->host_no,
862                                 dev->scsi_dev->channel, dev->scsi_dev->id,
863                                 dev->scsi_dev->lun, acg->acg_name);
864                 }
865         }
866
867         TRACE_EXIT_RES(res);
868         return res;
869 }
870
871 /* scst_mutex supposed to be held */
872 int scst_acg_add_name(struct scst_acg *acg, const char *name)
873 {
874         int res = 0;
875         struct scst_acn *n;
876         int len;
877         char *nm;
878
879         TRACE_ENTRY();
880
881         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
882         {
883                 if (strcmp(n->name, name) == 0) {
884                         PRINT_ERROR("Name %s already exists in group %s",
885                                 name, acg->acg_name);
886                         res = -EINVAL;
887                         goto out;
888                 }
889         }
890
891         n = kmalloc(sizeof(*n), GFP_KERNEL);
892         if (n == NULL) {
893                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
894                 res = -ENOMEM;
895                 goto out;
896         }
897
898         len = strlen(name);
899         nm = kmalloc(len + 1, GFP_KERNEL);
900         if (nm == NULL) {
901                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
902                 res = -ENOMEM;
903                 goto out_free;
904         }
905
906         strcpy(nm, name);
907         n->name = nm;
908
909         list_add_tail(&n->acn_list_entry, &acg->acn_list);
910
911 out:
912         if (res == 0)
913                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
914
915         TRACE_EXIT_RES(res);
916         return res;
917
918 out_free:
919         kfree(n);
920         goto out;
921 }
922
923 /* scst_mutex supposed to be held */
924 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
925 {
926         int res = -EINVAL;
927         struct scst_acn *n;
928
929         TRACE_ENTRY();
930
931         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
932         {
933                 if (strcmp(n->name, name) == 0) {
934                         list_del(&n->acn_list_entry);
935                         kfree(n->name);
936                         kfree(n);
937                         res = 0;
938                         break;
939                 }
940         }
941
942         if (res == 0) {
943                 PRINT_INFO("Removed name %s from group %s", name,
944                         acg->acg_name);
945         } else {
946                 PRINT_ERROR("Unable to find name %s in group %s", name,
947                         acg->acg_name);
948         }
949
950         TRACE_EXIT_RES(res);
951         return res;
952 }
953
954 struct scst_cmd *scst_create_prepare_internal_cmd(
955         struct scst_cmd *orig_cmd, int bufsize)
956 {
957         struct scst_cmd *res;
958         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
959
960         TRACE_ENTRY();
961
962         res = scst_alloc_cmd(gfp_mask);
963         if (res == NULL)
964                 goto out;
965
966         res->cmd_lists = orig_cmd->cmd_lists;
967         res->sess = orig_cmd->sess;
968         res->state = SCST_CMD_STATE_PRE_PARSE;
969         res->atomic = scst_cmd_atomic(orig_cmd);
970         res->internal = 1;
971         res->tgtt = orig_cmd->tgtt;
972         res->tgt = orig_cmd->tgt;
973         res->dev = orig_cmd->dev;
974         res->tgt_dev = orig_cmd->tgt_dev;
975         res->lun = orig_cmd->lun;
976         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
977         res->data_direction = SCST_DATA_UNKNOWN;
978         res->orig_cmd = orig_cmd;
979
980         res->bufflen = bufsize;
981
982 out:
983         TRACE_EXIT_HRES((unsigned long)res);
984         return res;
985 }
986
987 void scst_free_internal_cmd(struct scst_cmd *cmd)
988 {
989         TRACE_ENTRY();
990
991         __scst_cmd_put(cmd);
992
993         TRACE_EXIT();
994         return;
995 }
996
997 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
998 {
999         int res = SCST_CMD_STATE_RES_CONT_NEXT;
1000 #define sbuf_size 252
1001         static const uint8_t request_sense[6] =
1002             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1003         struct scst_cmd *rs_cmd;
1004
1005         TRACE_ENTRY();
1006
1007         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1008         if (rs_cmd == NULL)
1009                 goto out_error;
1010
1011         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1012         rs_cmd->cdb_len = sizeof(request_sense);
1013         rs_cmd->data_direction = SCST_DATA_READ;
1014
1015         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1016                 "cmd list ", rs_cmd);
1017         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1018         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1019         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1020
1021 out:
1022         TRACE_EXIT_RES(res);
1023         return res;
1024
1025 out_error:
1026         res = -1;
1027         goto out;
1028 #undef sbuf_size
1029 }
1030
1031 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1032 {
1033         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1034         uint8_t *buf;
1035         int len;
1036
1037         TRACE_ENTRY();
1038
1039         if (req_cmd->dev->handler->dev_done != NULL) {
1040                 int rc;
1041                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
1042                       req_cmd->dev->handler->name, req_cmd);
1043                 rc = req_cmd->dev->handler->dev_done(req_cmd);
1044                 TRACE_DBG("Dev handler %s dev_done() returned %d",
1045                       req_cmd->dev->handler->name, rc);
1046         }
1047
1048         sBUG_ON(orig_cmd);
1049
1050         len = scst_get_buf_first(req_cmd, &buf);
1051
1052         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1053             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1054                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1055                         buf, len);
1056                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1057                         len);
1058         } else {
1059                 PRINT_ERROR("%s", "Unable to get the sense via "
1060                         "REQUEST SENSE, returning HARDWARE ERROR");
1061                 scst_set_cmd_error(orig_cmd,
1062                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1063         }
1064
1065         if (len > 0)
1066                 scst_put_buf(req_cmd, buf);
1067
1068         scst_free_internal_cmd(req_cmd);
1069
1070         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1071         return orig_cmd;
1072 }
1073
1074 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1075 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1076 {
1077         struct scsi_request *req;
1078
1079         TRACE_ENTRY();
1080
1081         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1082                 if (req) {
1083                         if (req->sr_bufflen)
1084                                 kfree(req->sr_buffer);
1085                         scsi_release_request(req);
1086                 }
1087         }
1088
1089         TRACE_EXIT();
1090         return;
1091 }
1092
1093 static void scst_send_release(struct scst_device *dev)
1094 {
1095         struct scsi_request *req;
1096         struct scsi_device *scsi_dev;
1097         uint8_t cdb[6];
1098
1099         TRACE_ENTRY();
1100
1101         if (dev->scsi_dev == NULL)
1102                 goto out;
1103
1104         scsi_dev = dev->scsi_dev;
1105
1106         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1107         if (req == NULL) {
1108                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1109                             "to RELEASE device %d:%d:%d:%d",
1110                             scsi_dev->host->host_no, scsi_dev->channel,
1111                             scsi_dev->id, scsi_dev->lun);
1112                 goto out;
1113         }
1114
1115         memset(cdb, 0, sizeof(cdb));
1116         cdb[0] = RELEASE;
1117         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1118             ((scsi_dev->lun << 5) & 0xe0) : 0;
1119         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1120         req->sr_cmd_len = sizeof(cdb);
1121         req->sr_data_direction = SCST_DATA_NONE;
1122         req->sr_use_sg = 0;
1123         req->sr_bufflen = 0;
1124         req->sr_buffer = NULL;
1125         req->sr_request->rq_disk = dev->rq_disk;
1126         req->sr_sense_buffer[0] = 0;
1127
1128         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1129                 "mid-level", req);
1130         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1131                     scst_req_done, 15, 3);
1132
1133 out:
1134         TRACE_EXIT();
1135         return;
1136 }
1137 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1138 static void scst_send_release(struct scst_device *dev)
1139 {
1140         struct scsi_device *scsi_dev;
1141         unsigned char cdb[6];
1142         unsigned char *sense;
1143         int rc, i;
1144
1145         TRACE_ENTRY();
1146
1147         if (dev->scsi_dev == NULL)
1148                 goto out;
1149
1150         /* We can't afford missing RELEASE due to memory shortage */
1151         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1152
1153         scsi_dev = dev->scsi_dev;
1154
1155         for (i = 0; i < 5; i++) {
1156                 memset(cdb, 0, sizeof(cdb));
1157                 cdb[0] = RELEASE;
1158                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1159                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1160
1161                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1162
1163                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1164                         "SCSI mid-level");
1165                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1166                                 sense, 15, 0, 0);
1167                 TRACE_DBG("MODE_SENSE done: %x", rc);
1168
1169                 if (scsi_status_is_good(rc)) {
1170                         break;
1171                 } else {
1172                         PRINT_ERROR("RELEASE failed: %d", rc);
1173                         PRINT_BUFFER("RELEASE sense", sense,
1174                                 SCST_SENSE_BUFFERSIZE);
1175                         scst_check_internal_sense(dev, rc,
1176                                         sense, SCST_SENSE_BUFFERSIZE);
1177                 }
1178         }
1179
1180         kfree(sense);
1181
1182 out:
1183         TRACE_EXIT();
1184         return;
1185 }
1186 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1187
1188 /* scst_mutex supposed to be held */
1189 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1190 {
1191         struct scst_device *dev = tgt_dev->dev;
1192         int release = 0;
1193
1194         TRACE_ENTRY();
1195
1196         spin_lock_bh(&dev->dev_lock);
1197         if (dev->dev_reserved &&
1198             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1199                 /* This is one who holds the reservation */
1200                 struct scst_tgt_dev *tgt_dev_tmp;
1201                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1202                                     dev_tgt_dev_list_entry) {
1203                         clear_bit(SCST_TGT_DEV_RESERVED,
1204                                     &tgt_dev_tmp->tgt_dev_flags);
1205                 }
1206                 dev->dev_reserved = 0;
1207                 release = 1;
1208         }
1209         spin_unlock_bh(&dev->dev_lock);
1210
1211         if (release)
1212                 scst_send_release(dev);
1213
1214         TRACE_EXIT();
1215         return;
1216 }
1217
1218 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
1219         const char *initiator_name)
1220 {
1221         struct scst_session *sess;
1222         int i;
1223         int len;
1224         char *nm;
1225
1226         TRACE_ENTRY();
1227
1228 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1229         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1230 #else
1231         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1232 #endif
1233         if (sess == NULL) {
1234                 TRACE(TRACE_OUT_OF_MEM, "%s",
1235                       "Allocation of scst_session failed");
1236                 goto out;
1237         }
1238 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1239         memset(sess, 0, sizeof(*sess));
1240 #endif
1241
1242         sess->init_phase = SCST_SESS_IPH_INITING;
1243         sess->shut_phase = SCST_SESS_SPH_READY;
1244         atomic_set(&sess->refcnt, 0);
1245         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1246                 struct list_head *sess_tgt_dev_list_head =
1247                          &sess->sess_tgt_dev_list_hash[i];
1248                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1249         }
1250         spin_lock_init(&sess->sess_list_lock);
1251         INIT_LIST_HEAD(&sess->search_cmd_list);
1252         sess->tgt = tgt;
1253         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1254         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1255
1256 #ifdef MEASURE_LATENCY
1257         spin_lock_init(&sess->meas_lock);
1258 #endif
1259
1260         len = strlen(initiator_name);
1261         nm = kmalloc(len + 1, gfp_mask);
1262         if (nm == NULL) {
1263                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1264                 goto out_free;
1265         }
1266
1267         strcpy(nm, initiator_name);
1268         sess->initiator_name = nm;
1269
1270 out:
1271         TRACE_EXIT();
1272         return sess;
1273
1274 out_free:
1275         kmem_cache_free(scst_sess_cachep, sess);
1276         sess = NULL;
1277         goto out;
1278 }
1279
1280 void scst_free_session(struct scst_session *sess)
1281 {
1282         TRACE_ENTRY();
1283
1284         mutex_lock(&scst_mutex);
1285
1286         TRACE_DBG("Removing sess %p from the list", sess);
1287         list_del(&sess->sess_list_entry);
1288         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1289         list_del(&sess->acg_sess_list_entry);
1290
1291         scst_sess_free_tgt_devs(sess);
1292
1293         wake_up_all(&sess->tgt->unreg_waitQ);
1294
1295         mutex_unlock(&scst_mutex);
1296
1297         kfree(sess->initiator_name);
1298         kmem_cache_free(scst_sess_cachep, sess);
1299
1300         TRACE_EXIT();
1301         return;
1302 }
1303
1304 void scst_free_session_callback(struct scst_session *sess)
1305 {
1306         struct completion *c;
1307
1308         TRACE_ENTRY();
1309
1310         TRACE_DBG("Freeing session %p", sess);
1311
1312         c = sess->shutdown_compl;
1313
1314         if (sess->unreg_done_fn) {
1315                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1316                 sess->unreg_done_fn(sess);
1317                 TRACE_DBG("%s", "unreg_done_fn() returned");
1318         }
1319         scst_free_session(sess);
1320
1321         if (c)
1322                 complete_all(c);
1323
1324         TRACE_EXIT();
1325         return;
1326 }
1327
1328 void scst_sched_session_free(struct scst_session *sess)
1329 {
1330         unsigned long flags;
1331
1332         TRACE_ENTRY();
1333
1334         spin_lock_irqsave(&scst_mgmt_lock, flags);
1335         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1336         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1337         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1338
1339         wake_up(&scst_mgmt_waitQ);
1340
1341         TRACE_EXIT();
1342         return;
1343 }
1344
1345 void scst_cmd_get(struct scst_cmd *cmd)
1346 {
1347         __scst_cmd_get(cmd);
1348 }
1349 EXPORT_SYMBOL(scst_cmd_get);
1350
1351 void scst_cmd_put(struct scst_cmd *cmd)
1352 {
1353         __scst_cmd_put(cmd);
1354 }
1355 EXPORT_SYMBOL(scst_cmd_put);
1356
1357 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
1358 {
1359         struct scst_cmd *cmd;
1360
1361         TRACE_ENTRY();
1362
1363 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1364         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1365 #else
1366         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1367 #endif
1368         if (cmd == NULL) {
1369                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1370                 goto out;
1371         }
1372 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1373         memset(cmd, 0, sizeof(*cmd));
1374 #endif
1375
1376         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1377         atomic_set(&cmd->cmd_ref, 1);
1378         cmd->cmd_lists = &scst_main_cmd_lists;
1379         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1380         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1381         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1382         cmd->retries = 0;
1383         cmd->data_len = -1;
1384         cmd->is_send_status = 1;
1385         cmd->resp_data_len = -1;
1386         cmd->dbl_ua_orig_resp_data_len = -1;
1387         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1388
1389 out:
1390         TRACE_EXIT();
1391         return cmd;
1392 }
1393
1394 void scst_destroy_put_cmd(struct scst_cmd *cmd)
1395 {
1396         scst_sess_put(cmd->sess);
1397
1398         /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1399         if (likely(cmd->tgt_dev != NULL))
1400                 __scst_put();
1401
1402         scst_destroy_cmd(cmd);
1403         return;
1404 }
1405
1406 /* No locks supposed to be held */
1407 void scst_free_cmd(struct scst_cmd *cmd)
1408 {
1409         int destroy = 1;
1410
1411         TRACE_ENTRY();
1412
1413         TRACE_DBG("Freeing cmd %p (tag %Lu)",
1414                   cmd, (long long unsigned int)cmd->tag);
1415
1416         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1417                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1418                         cmd, atomic_read(&scst_cmd_count));
1419         }
1420
1421         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1422                 cmd->dec_on_dev_needed);
1423
1424 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1425 #if defined(EXTRACHECKS)
1426         if (cmd->scsi_req) {
1427                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1428                         "scsi_req!");
1429                 scst_release_request(cmd);
1430         }
1431 #endif
1432 #endif
1433
1434         scst_check_restore_sg_buff(cmd);
1435
1436         if (unlikely(cmd->internal)) {
1437                 if (cmd->bufflen > 0)
1438                         scst_release_space(cmd);
1439                 scst_destroy_cmd(cmd);
1440                 goto out;
1441         }
1442
1443         if (cmd->tgtt->on_free_cmd != NULL) {
1444                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1445                 cmd->tgtt->on_free_cmd(cmd);
1446                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1447         }
1448
1449         if (likely(cmd->dev != NULL)) {
1450                 struct scst_dev_type *handler = cmd->dev->handler;
1451                 if (handler->on_free_cmd != NULL) {
1452                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1453                               handler->name, cmd);
1454                         handler->on_free_cmd(cmd);
1455                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1456                                 handler->name);
1457                 }
1458         }
1459
1460         scst_release_space(cmd);
1461
1462         if (unlikely(cmd->sense != NULL)) {
1463                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1464                 mempool_free(cmd->sense, scst_sense_mempool);
1465                 cmd->sense = NULL;
1466         }
1467
1468         if (likely(cmd->tgt_dev != NULL)) {
1469 #ifdef EXTRACHECKS
1470                 if (unlikely(!cmd->sent_to_midlev)) {
1471                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1472                              "%d, target %s, lun %Ld, sn %ld, expected_sn %ld)",
1473                              cmd, cmd->cdb[0], cmd->tgtt->name,
1474                              (long long unsigned int)cmd->lun,
1475                              cmd->sn, cmd->tgt_dev->expected_sn);
1476                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1477                 }
1478 #endif
1479
1480                 if (unlikely(cmd->out_of_sn)) {
1481                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1482                                 "destroy=%d", cmd,
1483                                 (long long unsigned int)cmd->tag,
1484                                 cmd->sn, destroy);
1485                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1486                                         &cmd->cmd_flags);
1487                 }
1488         }
1489
1490         if (likely(destroy))
1491                 scst_destroy_put_cmd(cmd);
1492
1493 out:
1494         TRACE_EXIT();
1495         return;
1496 }
1497
1498 /* No locks supposed to be held. */
1499 void scst_check_retries(struct scst_tgt *tgt)
1500 {
1501         int need_wake_up = 0;
1502
1503         TRACE_ENTRY();
1504
1505         /*
1506          * We don't worry about overflow of finished_cmds, because we check
1507          * only for its change
1508          */
1509         atomic_inc(&tgt->finished_cmds);
1510         smp_mb__after_atomic_inc();
1511         if (unlikely(tgt->retry_cmds > 0)) {
1512                 struct scst_cmd *c, *tc;
1513                 unsigned long flags;
1514
1515                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1516                       tgt->retry_cmds);
1517
1518                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1519                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1520                                 cmd_list_entry)
1521                 {
1522                         tgt->retry_cmds--;
1523
1524                         TRACE_RETRY("Moving retry cmd %p to head of active "
1525                                 "cmd list (retry_cmds left %d)", c, tgt->retry_cmds);
1526                         spin_lock(&c->cmd_lists->cmd_list_lock);
1527                         list_move(&c->cmd_list_entry, &c->cmd_lists->active_cmd_list);
1528                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1529                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1530
1531                         need_wake_up++;
1532                         if (need_wake_up >= 2) /* "slow start" */
1533                                 break;
1534                 }
1535                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1536         }
1537
1538         TRACE_EXIT();
1539         return;
1540 }
1541
1542 void scst_tgt_retry_timer_fn(unsigned long arg)
1543 {
1544         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1545         unsigned long flags;
1546
1547         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1548
1549         spin_lock_irqsave(&tgt->tgt_lock, flags);
1550         tgt->retry_timer_active = 0;
1551         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1552
1553         scst_check_retries(tgt);
1554
1555         TRACE_EXIT();
1556         return;
1557 }
1558
1559 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1560 {
1561         struct scst_mgmt_cmd *mcmd;
1562
1563         TRACE_ENTRY();
1564
1565         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1566         if (mcmd == NULL) {
1567                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1568                         "failed, some commands and their data could leak");
1569                 goto out;
1570         }
1571         memset(mcmd, 0, sizeof(*mcmd));
1572
1573 out:
1574         TRACE_EXIT();
1575         return mcmd;
1576 }
1577
1578 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1579 {
1580         unsigned long flags;
1581
1582         TRACE_ENTRY();
1583
1584         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1585         atomic_dec(&mcmd->sess->sess_cmd_count);
1586         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1587
1588         scst_sess_put(mcmd->sess);
1589
1590         if (mcmd->mcmd_tgt_dev != NULL)
1591                 __scst_put();
1592
1593         mempool_free(mcmd, scst_mgmt_mempool);
1594
1595         TRACE_EXIT();
1596         return;
1597 }
1598
1599 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1600 int scst_alloc_request(struct scst_cmd *cmd)
1601 {
1602         int res = 0;
1603         struct scsi_request *req;
1604         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1605
1606         TRACE_ENTRY();
1607
1608         /* cmd->dev->scsi_dev must be non-NULL here */
1609         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1610         if (req == NULL) {
1611                 TRACE(TRACE_OUT_OF_MEM, "%s",
1612                       "Allocation of scsi_request failed");
1613                 res = -ENOMEM;
1614                 goto out;
1615         }
1616
1617         cmd->scsi_req = req;
1618
1619         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1620         req->sr_cmd_len = cmd->cdb_len;
1621         req->sr_data_direction = cmd->data_direction;
1622         req->sr_use_sg = cmd->sg_cnt;
1623         req->sr_bufflen = cmd->bufflen;
1624         req->sr_buffer = cmd->sg;
1625         req->sr_request->rq_disk = cmd->dev->rq_disk;
1626         req->sr_sense_buffer[0] = 0;
1627
1628         cmd->scsi_req->upper_private_data = cmd;
1629
1630 out:
1631         TRACE_EXIT();
1632         return res;
1633 }
1634
1635 void scst_release_request(struct scst_cmd *cmd)
1636 {
1637         scsi_release_request(cmd->scsi_req);
1638         cmd->scsi_req = NULL;
1639 }
1640 #endif
1641
1642 int scst_alloc_space(struct scst_cmd *cmd)
1643 {
1644         int gfp_mask;
1645         int res = -ENOMEM;
1646         int atomic = scst_cmd_atomic(cmd);
1647         int flags;
1648         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1649         int bufflen = cmd->bufflen;
1650
1651         TRACE_ENTRY();
1652
1653         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1654
1655         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1656         if (cmd->no_sgv)
1657                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1658
1659         if (unlikely(cmd->bufflen == 0)) {
1660                 /* ToDo: remove when 1.0.1 will be started */
1661                 TRACE(TRACE_MGMT_MINOR, "Warning: data direction %d or/and "
1662                         "zero buffer length. Opcode 0x%x, handler %s, target "
1663                         "%s", cmd->data_direction, cmd->cdb[0],
1664                         cmd->dev->handler->name, cmd->tgtt->name);
1665                 /*
1666                  * Be on the safe side and alloc stub buffer. Neither target
1667                  * drivers, nor user space will touch it, since bufflen
1668                  * remains 0.
1669                  */
1670                 bufflen = PAGE_SIZE;
1671         }
1672
1673         cmd->sg = sgv_pool_alloc(tgt_dev->pool, bufflen, gfp_mask, flags,
1674                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1675         if (cmd->sg == NULL)
1676                 goto out;
1677
1678         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1679                 static int ll;
1680                 if (ll < 10) {
1681                         PRINT_INFO("Unable to complete command due to "
1682                                 "SG IO count limitation (requested %d, "
1683                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1684                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1685                         ll++;
1686                 }
1687                 goto out_sg_free;
1688         }
1689
1690         res = 0;
1691
1692 out:
1693         TRACE_EXIT();
1694         return res;
1695
1696 out_sg_free:
1697         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1698         cmd->sgv = NULL;
1699         cmd->sg = NULL;
1700         cmd->sg_cnt = 0;
1701         goto out;
1702 }
1703
1704 void scst_release_space(struct scst_cmd *cmd)
1705 {
1706         TRACE_ENTRY();
1707
1708         if (cmd->sgv == NULL)
1709                 goto out;
1710
1711         if (cmd->data_buf_alloced) {
1712                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1713                 goto out;
1714         }
1715
1716         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1717
1718         cmd->sgv = NULL;
1719         cmd->sg_cnt = 0;
1720         cmd->sg = NULL;
1721         cmd->bufflen = 0;
1722         cmd->data_len = 0;
1723
1724 out:
1725         TRACE_EXIT();
1726         return;
1727 }
1728
1729 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1730
1731 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1732 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1733
1734 int scst_get_cdb_len(const uint8_t *cdb)
1735 {
1736         return SCST_GET_CDB_LEN(cdb[0]);
1737 }
1738
1739 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1740
1741 /* for special commands */
1742 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1743 {
1744         cmd->bufflen = 6;
1745         return 0;
1746 }
1747
1748 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1749 {
1750         cmd->bufflen = READ_CAP_LEN;
1751         return 0;
1752 }
1753
1754 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1755 {
1756         cmd->bufflen = 1;
1757         return 0;
1758 }
1759
1760 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1761 {
1762         uint8_t *p = (uint8_t *)cmd->cdb + off;
1763         int res = 0;
1764
1765         cmd->bufflen = 0;
1766         cmd->bufflen |= ((u32)p[0]) << 8;
1767         cmd->bufflen |= ((u32)p[1]);
1768
1769         switch (cmd->cdb[1] & 0x1f) {
1770         case 0:
1771         case 1:
1772         case 6:
1773                 if (cmd->bufflen != 0) {
1774                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1775                                 "allocation length for service action %x",
1776                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1777                         goto out_inval;
1778                 }
1779                 break;
1780         }
1781
1782         switch (cmd->cdb[1] & 0x1f) {
1783         case 0:
1784         case 1:
1785                 cmd->bufflen = 20;
1786                 break;
1787         case 6:
1788                 cmd->bufflen = 32;
1789                 break;
1790         case 8:
1791                 cmd->bufflen = max(28, cmd->bufflen);
1792                 break;
1793         default:
1794                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1795                         cmd->cdb[1] & 0x1f);
1796                 goto out_inval;
1797         }
1798
1799 out:
1800         return res;
1801
1802 out_inval:
1803         scst_set_cmd_error(cmd,
1804                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1805         res = 1;
1806         goto out;
1807 }
1808
1809 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1810 {
1811         cmd->bufflen = (u32)cmd->cdb[off];
1812         return 0;
1813 }
1814
1815 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1816 {
1817         cmd->bufflen = (u32)cmd->cdb[off];
1818         if (cmd->bufflen == 0)
1819                 cmd->bufflen = 256;
1820         return 0;
1821 }
1822
1823 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1824 {
1825         const uint8_t *p = cmd->cdb + off;
1826
1827         cmd->bufflen = 0;
1828         cmd->bufflen |= ((u32)p[0]) << 8;
1829         cmd->bufflen |= ((u32)p[1]);
1830
1831         return 0;
1832 }
1833
1834 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1835 {
1836         const uint8_t *p = cmd->cdb + off;
1837
1838         cmd->bufflen = 0;
1839         cmd->bufflen |= ((u32)p[0]) << 16;
1840         cmd->bufflen |= ((u32)p[1]) << 8;
1841         cmd->bufflen |= ((u32)p[2]);
1842
1843         return 0;
1844 }
1845
1846 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1847 {
1848         const uint8_t *p = cmd->cdb + off;
1849
1850         cmd->bufflen = 0;
1851         cmd->bufflen |= ((u32)p[0]) << 24;
1852         cmd->bufflen |= ((u32)p[1]) << 16;
1853         cmd->bufflen |= ((u32)p[2]) << 8;
1854         cmd->bufflen |= ((u32)p[3]);
1855
1856         return 0;
1857 }
1858
1859 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1860 {
1861         cmd->bufflen = 0;
1862         return 0;
1863 }
1864
1865 int scst_get_cdb_info(struct scst_cmd *cmd)
1866 {
1867         int dev_type = cmd->dev->handler->type;
1868         int i, res = 0;
1869         uint8_t op;
1870         const struct scst_sdbops *ptr = NULL;
1871
1872         TRACE_ENTRY();
1873
1874         op = cmd->cdb[0];       /* get clear opcode */
1875
1876         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1877                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1878                 dev_type);
1879
1880         i = scst_scsi_op_list[op];
1881         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1882                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1883                         ptr = &scst_scsi_op_table[i];
1884                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1885                               ptr->ops, ptr->devkey[0], /* disk     */
1886                               ptr->devkey[1],   /* tape     */
1887                               ptr->devkey[2],   /* printer */
1888                               ptr->devkey[3],   /* cpu      */
1889                               ptr->devkey[4],   /* cdr      */
1890                               ptr->devkey[5],   /* cdrom    */
1891                               ptr->devkey[6],   /* scanner */
1892                               ptr->devkey[7],   /* worm     */
1893                               ptr->devkey[8],   /* changer */
1894                               ptr->devkey[9],   /* commdev */
1895                               ptr->op_name);
1896                         TRACE_DBG("direction=%d flags=%d off=%d",
1897                               ptr->direction,
1898                               ptr->flags,
1899                               ptr->off);
1900                         break;
1901                 }
1902                 i++;
1903         }
1904
1905         if (ptr == NULL) {
1906                 /* opcode not found or now not used !!! */
1907                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1908                       dev_type);
1909                 res = -1;
1910                 cmd->op_flags = SCST_INFO_INVALID;
1911                 goto out;
1912         }
1913
1914         cmd->cdb_len = SCST_GET_CDB_LEN(op);
1915         cmd->op_name = ptr->op_name;
1916         cmd->data_direction = ptr->direction;
1917         cmd->op_flags = ptr->flags;
1918         res = (*ptr->get_trans_len)(cmd, ptr->off);
1919 #if 0 /* ToDo: enable when 1.0.1 will be started and fix all scst_get_buf_first() returns 0 cases */
1920         if (unlikely(cmd->bufflen == 0)) {
1921                 /*
1922                  * According to SPC bufflen 0 for data transfer commands isn't
1923                  * an error, so we need to fix the transfer direction.
1924                  */
1925                 cmd->data_direction = SCST_DATA_NONE;
1926         }
1927 #endif
1928
1929 out:
1930         TRACE_EXIT();
1931         return res;
1932 }
1933 EXPORT_SYMBOL(scst_get_cdb_info);
1934
1935 /*
1936  * Routine to extract a lun number from an 8-byte LUN structure
1937  * in network byte order (BE).
1938  * (see SAM-2, Section 4.12.3 page 40)
1939  * Supports 2 types of lun unpacking: peripheral and logical unit.
1940  */
1941 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1942 {
1943         lun_t res = (lun_t)-1;
1944         int address_method;
1945
1946         TRACE_ENTRY();
1947
1948         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1949
1950         if (unlikely(len < 2)) {
1951                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1952                         "more", len);
1953                 goto out;
1954         }
1955
1956         if (len > 2) {
1957                 switch (len) {
1958                 case 8:
1959                         if ((*((uint64_t *)lun) &
1960                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1961                                 goto out_err;
1962                         break;
1963                 case 4:
1964                         if (*((uint16_t *)&lun[2]) != 0)
1965                                 goto out_err;
1966                         break;
1967                 case 6:
1968                         if (*((uint32_t *)&lun[2]) != 0)
1969                                 goto out_err;
1970                         break;
1971                 default:
1972                         goto out_err;
1973                 }
1974         }
1975
1976         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
1977         switch (address_method) {
1978         case 0: /* peripheral device addressing method */
1979 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1980                 if (*lun) {
1981                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1982                              "peripheral device addressing method 0x%02x, "
1983                              "expected 0", *lun);
1984                         break;
1985                 }
1986                 res = *(lun + 1);
1987                 break;
1988 #else
1989                 /* go through */
1990 #endif
1991
1992         case 1: /* flat space addressing method */
1993                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1994                 break;
1995
1996         case 2: /* logical unit addressing method */
1997                 if (*lun & 0x3f) {
1998                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1999                                     "addressing method 0x%02x, expected 0",
2000                                     *lun & 0x3f);
2001                         break;
2002                 }
2003                 if (*(lun + 1) & 0xe0) {
2004                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
2005                                     "addressing method 0x%02x, expected 0",
2006                                     (*(lun + 1) & 0xf8) >> 5);
2007                         break;
2008                 }
2009                 res = *(lun + 1) & 0x1f;
2010                 break;
2011
2012         case 3: /* extended logical unit addressing method */
2013         default:
2014                 PRINT_ERROR("Unimplemented LUN addressing method %u",
2015                             address_method);
2016                 break;
2017         }
2018
2019 out:
2020         TRACE_EXIT_RES((int)res);
2021         return res;
2022
2023 out_err:
2024         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2025         goto out;
2026 }
2027
2028 int scst_calc_block_shift(int sector_size)
2029 {
2030         int block_shift = 0;
2031         int t;
2032
2033         if (sector_size == 0)
2034                 sector_size = 512;
2035
2036         t = sector_size;
2037         while (1) {
2038                 if ((t & 1) != 0)
2039                         break;
2040                 t >>= 1;
2041                 block_shift++;
2042         }
2043         if (block_shift < 9) {
2044                 PRINT_ERROR("Wrong sector size %d", sector_size);
2045                 block_shift = -1;
2046         }
2047
2048         TRACE_EXIT_RES(block_shift);
2049         return block_shift;
2050 }
2051 EXPORT_SYMBOL(scst_calc_block_shift);
2052
2053 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2054         int (*get_block_shift)(struct scst_cmd *cmd))
2055 {
2056         int res = 0;
2057
2058         TRACE_ENTRY();
2059
2060         /*
2061          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2062          * therefore change them only if necessary
2063          */
2064
2065         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2066               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2067
2068         switch (cmd->cdb[0]) {
2069         case SERVICE_ACTION_IN:
2070                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2071                         cmd->bufflen = READ_CAP16_LEN;
2072                         cmd->data_direction = SCST_DATA_READ;
2073                 }
2074                 break;
2075         case VERIFY_6:
2076         case VERIFY:
2077         case VERIFY_12:
2078         case VERIFY_16:
2079                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2080                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2081                         cmd->bufflen = 0;
2082                         goto set_timeout;
2083                 } else
2084                         cmd->data_len = 0;
2085                 break;
2086         default:
2087                 /* It's all good */
2088                 break;
2089         }
2090
2091         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2092                 /*
2093                  * No need for locks here, since *_detach() can not be
2094                  * called, when there are existing commands.
2095                  */
2096                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2097         }
2098
2099 set_timeout:
2100         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2101                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2102         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2103                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2104         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2105                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2106
2107         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2108               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2109
2110         TRACE_EXIT_RES(res);
2111         return res;
2112 }
2113 EXPORT_SYMBOL(scst_sbc_generic_parse);
2114
2115 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2116         int (*get_block_shift)(struct scst_cmd *cmd))
2117 {
2118         int res = 0;
2119
2120         TRACE_ENTRY();
2121
2122         /*
2123          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2124          * therefore change them only if necessary
2125          */
2126
2127         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2128               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2129
2130         cmd->cdb[1] &= 0x1f;
2131
2132         switch (cmd->cdb[0]) {
2133         case VERIFY_6:
2134         case VERIFY:
2135         case VERIFY_12:
2136         case VERIFY_16:
2137                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2138                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2139                         cmd->bufflen = 0;
2140                         goto set_timeout;
2141                 }
2142                 break;
2143         default:
2144                 /* It's all good */
2145                 break;
2146         }
2147
2148         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2149                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2150
2151 set_timeout:
2152         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2153                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2154         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2155                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2156         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2157                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2158
2159         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2160                 cmd->data_direction);
2161
2162         TRACE_EXIT();
2163         return res;
2164 }
2165 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2166
2167 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2168         int (*get_block_shift)(struct scst_cmd *cmd))
2169 {
2170         int res = 0;
2171
2172         TRACE_ENTRY();
2173
2174         /*
2175          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2176          * therefore change them only if necessary
2177          */
2178
2179         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2180               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2181
2182         cmd->cdb[1] &= 0x1f;
2183
2184         switch (cmd->cdb[0]) {
2185         case VERIFY_6:
2186         case VERIFY:
2187         case VERIFY_12:
2188         case VERIFY_16:
2189                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2190                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2191                         cmd->bufflen = 0;
2192                         goto set_timeout;
2193                 }
2194                 break;
2195         default:
2196                 /* It's all good */
2197                 break;
2198         }
2199
2200         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2201                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2202
2203 set_timeout:
2204         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2205                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2206         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2207                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2208         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2209                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2210
2211         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2212                 cmd->data_direction);
2213
2214         TRACE_EXIT_RES(res);
2215         return res;
2216 }
2217 EXPORT_SYMBOL(scst_modisk_generic_parse);
2218
2219 int scst_tape_generic_parse(struct scst_cmd *cmd,
2220         int (*get_block_size)(struct scst_cmd *cmd))
2221 {
2222         int res = 0;
2223
2224         TRACE_ENTRY();
2225
2226         /*
2227          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2228          * therefore change them only if necessary
2229          */
2230
2231         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2232               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2233
2234         if (cmd->cdb[0] == READ_POSITION) {
2235                 int tclp = cmd->cdb[1] & TCLP_BIT;
2236                 int long_bit = cmd->cdb[1] & LONG_BIT;
2237                 int bt = cmd->cdb[1] & BT_BIT;
2238
2239                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2240                         cmd->bufflen =
2241                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2242                         cmd->data_direction = SCST_DATA_READ;
2243                 } else {
2244                         cmd->bufflen = 0;
2245                         cmd->data_direction = SCST_DATA_NONE;
2246                 }
2247         }
2248
2249         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2250                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2251
2252         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2253                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2254         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2255                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2256         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2257                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2258
2259         TRACE_EXIT_RES(res);
2260         return res;
2261 }
2262 EXPORT_SYMBOL(scst_tape_generic_parse);
2263
2264 static int scst_null_parse(struct scst_cmd *cmd)
2265 {
2266         int res = 0;
2267
2268         TRACE_ENTRY();
2269
2270         /*
2271          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2272          * therefore change them only if necessary
2273          */
2274
2275         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2276               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2277 #if 0
2278         switch (cmd->cdb[0]) {
2279         default:
2280                 /* It's all good */
2281                 break;
2282         }
2283 #endif
2284         TRACE_DBG("res %d bufflen %d direct %d",
2285               res, cmd->bufflen, cmd->data_direction);
2286
2287         TRACE_EXIT();
2288         return res;
2289 }
2290
2291 int scst_changer_generic_parse(struct scst_cmd *cmd,
2292         int (*nothing)(struct scst_cmd *cmd))
2293 {
2294         int res = scst_null_parse(cmd);
2295
2296         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2297                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2298         else
2299                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2300
2301         return res;
2302 }
2303 EXPORT_SYMBOL(scst_changer_generic_parse);
2304
2305 int scst_processor_generic_parse(struct scst_cmd *cmd,
2306         int (*nothing)(struct scst_cmd *cmd))
2307 {
2308         int res = scst_null_parse(cmd);
2309
2310         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2311                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2312         else
2313                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2314
2315         return res;
2316 }
2317 EXPORT_SYMBOL(scst_processor_generic_parse);
2318
2319 int scst_raid_generic_parse(struct scst_cmd *cmd,
2320         int (*nothing)(struct scst_cmd *cmd))
2321 {
2322         int res = scst_null_parse(cmd);
2323
2324         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2325                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2326         else
2327                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2328
2329         return res;
2330 }
2331 EXPORT_SYMBOL(scst_raid_generic_parse);
2332
2333 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2334         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2335 {
2336         int opcode = cmd->cdb[0];
2337         int status = cmd->status;
2338         int res = SCST_CMD_STATE_DEFAULT;
2339
2340         TRACE_ENTRY();
2341
2342         /*
2343          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2344          * based on cmd->status and cmd->data_direction, therefore change
2345          * them only if necessary
2346          */
2347
2348         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2349                 switch (opcode) {
2350                 case READ_CAPACITY:
2351                 {
2352                         /* Always keep track of disk capacity */
2353                         int buffer_size, sector_size, sh;
2354                         uint8_t *buffer;
2355
2356                         buffer_size = scst_get_buf_first(cmd, &buffer);
2357                         if (unlikely(buffer_size <= 0)) {
2358                                 PRINT_ERROR("%s: Unable to get the buffer "
2359                                         "(%d)", __func__, buffer_size);
2360                                 goto out;
2361                         }
2362
2363                         sector_size =
2364                             ((buffer[4] << 24) | (buffer[5] << 16) |
2365                              (buffer[6] << 8) | (buffer[7] << 0));
2366                         scst_put_buf(cmd, buffer);
2367                         if (sector_size != 0)
2368                                 sh = scst_calc_block_shift(sector_size);
2369                         else
2370                                 sh = 0;
2371                         set_block_shift(cmd, sh);
2372                         TRACE_DBG("block_shift %d", sh);
2373                         break;
2374                 }
2375                 default:
2376                         /* It's all good */
2377                         break;
2378                 }
2379         }
2380
2381         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2382               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2383
2384 out:
2385         TRACE_EXIT_RES(res);
2386         return res;
2387 }
2388 EXPORT_SYMBOL(scst_block_generic_dev_done);
2389
2390 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2391         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2392 {
2393         int opcode = cmd->cdb[0];
2394         int res = SCST_CMD_STATE_DEFAULT;
2395         int buffer_size, bs;
2396         uint8_t *buffer = NULL;
2397
2398         TRACE_ENTRY();
2399
2400         /*
2401          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2402          * based on cmd->status and cmd->data_direction, therefore change
2403          * them only if necessary
2404          */
2405
2406         switch (opcode) {
2407         case MODE_SENSE:
2408         case MODE_SELECT:
2409                 buffer_size = scst_get_buf_first(cmd, &buffer);
2410                 if (unlikely(buffer_size <= 0)) {
2411                         PRINT_ERROR("%s: Unable to get the buffer (%d)",
2412                                 __func__, buffer_size);
2413                         goto out;
2414                 }
2415                 break;
2416         }
2417
2418         switch (opcode) {
2419         case MODE_SENSE:
2420                 TRACE_DBG("%s", "MODE_SENSE");
2421                 if ((cmd->cdb[2] & 0xC0) == 0) {
2422                         if (buffer[3] == 8) {
2423                                 bs = (buffer[9] << 16) |
2424                                     (buffer[10] << 8) | buffer[11];
2425                                 set_block_size(cmd, bs);
2426                         }
2427                 }
2428                 break;
2429         case MODE_SELECT:
2430                 TRACE_DBG("%s", "MODE_SELECT");
2431                 if (buffer[3] == 8) {
2432                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2433                             (buffer[11]);
2434                         set_block_size(cmd, bs);
2435                 }
2436                 break;
2437         default:
2438                 /* It's all good */
2439                 break;
2440         }
2441
2442         switch (opcode) {
2443         case MODE_SENSE:
2444         case MODE_SELECT:
2445                 scst_put_buf(cmd, buffer);
2446                 break;
2447         }
2448
2449 out:
2450         TRACE_EXIT_RES(res);
2451         return res;
2452 }
2453 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2454
2455 static void scst_check_internal_sense(struct scst_device *dev, int result,
2456         uint8_t *sense, int sense_len)
2457 {
2458         TRACE_ENTRY();
2459
2460         if (host_byte(result) == DID_RESET) {
2461                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2462                         "reset UA");
2463                 scst_set_sense(sense, sense_len,
2464                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2465                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2466         } else if ((status_byte(result) == CHECK_CONDITION) &&
2467                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2468                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2469
2470         TRACE_EXIT();
2471         return;
2472 }
2473
2474 int scst_obtain_device_parameters(struct scst_device *dev)
2475 {
2476         int res = 0, i;
2477         uint8_t cmd[16];
2478         uint8_t buffer[4+0x0A];
2479         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2480
2481         TRACE_ENTRY();
2482
2483         sBUG_ON(in_interrupt() || in_atomic());
2484         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2485
2486         for (i = 0; i < 5; i++) {
2487                 /* Get control mode page */
2488                 memset(cmd, 0, sizeof(cmd));
2489                 cmd[0] = MODE_SENSE;
2490                 cmd[1] = 8; /* DBD */
2491                 cmd[2] = 0x0A;
2492                 cmd[4] = sizeof(buffer);
2493
2494                 memset(buffer, 0, sizeof(buffer));
2495                 memset(sense_buffer, 0, sizeof(sense_buffer));
2496
2497                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2498                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2499                                 sizeof(buffer), sense_buffer, 15, 0, 0);
2500
2501                 TRACE_DBG("MODE_SENSE done: %x", res);
2502
2503                 if (scsi_status_is_good(res)) {
2504                         int q;
2505
2506                         PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode page data",
2507                                 buffer, sizeof(buffer));
2508
2509                         dev->tst = buffer[4+2] >> 5;
2510                         q = buffer[4+3] >> 4;
2511                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2512                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2513                                         "%d:%d:%d:%d", dev->queue_alg,
2514                                         dev->scsi_dev->host->host_no, dev->scsi_dev->channel,
2515                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2516                         }
2517                         dev->queue_alg = q;
2518                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2519                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2520
2521                         /*
2522                          * Unfortunately, SCSI ML doesn't provide a way to
2523                          * specify commands task attribute, so we can rely on
2524                          * device's restricted reordering only.
2525                          */
2526                         dev->has_own_order_mgmt = !dev->queue_alg;
2527
2528                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d: TST %x, "
2529                                 "QUEUE ALG %x, SWP %x, TAS %x, has_own_order_mgmt "
2530                                 "%d", dev->scsi_dev->host->host_no,
2531                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2532                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2533                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2534
2535                         goto out;
2536                 } else {
2537 #if 0 /* 3ware controller is buggy and returns CONDITION_GOOD instead of CHECK_CONDITION */
2538                         if ((status_byte(res) == CHECK_CONDITION) &&
2539 #else
2540                         if (
2541 #endif
2542                             SCST_SENSE_VALID(sense_buffer)) {
2543                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2544                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device "
2545                                                 "%d:%d:%d:%d doesn't support control "
2546                                                 "mode page, using defaults: TST "
2547                                                 "%x, QUEUE ALG %x, SWP %x, TAS %x, "
2548                                                 "has_own_order_mgmt %d",
2549                                                 dev->scsi_dev->host->host_no,
2550                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2551                                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2552                                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2553                                         res = 0;
2554                                         goto out;
2555                                 } else if (sense_buffer[2] == NOT_READY) {
2556                                         TRACE(TRACE_SCSI, "Device %d:%d:%d:%d not ready",
2557                                                 dev->scsi_dev->host->host_no,
2558                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2559                                                 dev->scsi_dev->lun);
2560                                         res = 0;
2561                                         goto out;
2562                                 }
2563                         } else {
2564                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Internal MODE SENSE to "
2565                                         "device %d:%d:%d:%d failed: %x",
2566                                         dev->scsi_dev->host->host_no,
2567                                         dev->scsi_dev->channel, dev->scsi_dev->id,
2568                                         dev->scsi_dev->lun, res);
2569                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR, "MODE SENSE "
2570                                         "sense", sense_buffer, sizeof(sense_buffer));
2571                         }
2572                         scst_check_internal_sense(dev, res, sense_buffer,
2573                                         sizeof(sense_buffer));
2574                 }
2575         }
2576         res = -ENODEV;
2577
2578 out:
2579         TRACE_EXIT_RES(res);
2580         return res;
2581 }
2582 EXPORT_SYMBOL(scst_obtain_device_parameters);
2583
2584 /* Called under dev_lock and BH off */
2585 void scst_process_reset(struct scst_device *dev,
2586         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2587         struct scst_mgmt_cmd *mcmd)
2588 {
2589         struct scst_tgt_dev *tgt_dev;
2590         struct scst_cmd *cmd, *tcmd;
2591
2592         TRACE_ENTRY();
2593
2594         /* Clear RESERVE'ation, if necessary */
2595         if (dev->dev_reserved) {
2596                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2597                                     dev_tgt_dev_list_entry) {
2598                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2599                                 "lun %Ld",
2600                                 (long long unsigned int)tgt_dev->lun);
2601                         clear_bit(SCST_TGT_DEV_RESERVED,
2602                                   &tgt_dev->tgt_dev_flags);
2603                 }
2604                 dev->dev_reserved = 0;
2605                 /*
2606                  * There is no need to send RELEASE, since the device is going
2607                  * to be resetted. Actually, since we can be in RESET TM
2608                  * function, it might be dangerous.
2609                  */
2610         }
2611
2612         dev->dev_double_ua_possible = 1;
2613         dev->dev_serialized = 1;
2614
2615         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2616                 dev_tgt_dev_list_entry) {
2617                 struct scst_session *sess = tgt_dev->sess;
2618
2619                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2620                 scst_free_all_UA(tgt_dev);
2621                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2622
2623                 spin_lock_irq(&sess->sess_list_lock);
2624
2625                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2626                 list_for_each_entry(cmd, &sess->search_cmd_list,
2627                                 search_cmd_list_entry) {
2628                         if (cmd == exclude_cmd)
2629                                 continue;
2630                         if ((cmd->tgt_dev == tgt_dev) ||
2631                             ((cmd->tgt_dev == NULL) &&
2632                              (cmd->lun == tgt_dev->lun))) {
2633                                 scst_abort_cmd(cmd, mcmd,
2634                                         (tgt_dev->sess != originator), 0);
2635                         }
2636                 }
2637                 spin_unlock_irq(&sess->sess_list_lock);
2638         }
2639
2640         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2641                                 blocked_cmd_list_entry) {
2642                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2643                         list_del(&cmd->blocked_cmd_list_entry);
2644                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2645                                 "to active cmd list", cmd);
2646                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2647                         list_add_tail(&cmd->cmd_list_entry,
2648                                 &cmd->cmd_lists->active_cmd_list);
2649                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2650                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2651                 }
2652         }
2653
2654         /* BH already off */
2655         spin_lock(&scst_temp_UA_lock);
2656         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2657                 SCST_LOAD_SENSE(scst_sense_reset_UA));
2658         scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2659                 sizeof(scst_temp_UA));
2660         spin_unlock(&scst_temp_UA_lock);
2661
2662         TRACE_EXIT();
2663         return;
2664 }
2665
2666 int scst_set_pending_UA(struct scst_cmd *cmd)
2667 {
2668         int res = 0;
2669         struct scst_tgt_dev_UA *UA_entry;
2670
2671         TRACE_ENTRY();
2672
2673         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2674
2675         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2676
2677         /* UA list could be cleared behind us, so retest */
2678         if (list_empty(&cmd->tgt_dev->UA_list)) {
2679                 TRACE_DBG("%s",
2680                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2681                 res = -1;
2682                 goto out_unlock;
2683         }
2684
2685         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2686                               UA_list_entry);
2687
2688         TRACE_DBG("next %p UA_entry %p",
2689               cmd->tgt_dev->UA_list.next, UA_entry);
2690
2691         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2692                 sizeof(UA_entry->UA_sense_buffer));
2693
2694         cmd->ua_ignore = 1;
2695
2696         list_del(&UA_entry->UA_list_entry);
2697
2698         mempool_free(UA_entry, scst_ua_mempool);
2699
2700         if (list_empty(&cmd->tgt_dev->UA_list)) {
2701                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2702                           &cmd->tgt_dev->tgt_dev_flags);
2703         }
2704
2705         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2706
2707 out:
2708         TRACE_EXIT_RES(res);
2709         return res;
2710
2711 out_unlock:
2712         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2713         goto out;
2714 }
2715
2716 /* Called under tgt_dev_lock and BH off */
2717 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2718         const uint8_t *sense, int sense_len, int head)
2719 {
2720         struct scst_tgt_dev_UA *UA_entry = NULL;
2721
2722         TRACE_ENTRY();
2723
2724         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2725         if (UA_entry == NULL) {
2726                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2727                      "allocation failed. The UNIT ATTENTION "
2728                      "on some sessions will be missed");
2729                 PRINT_BUFFER("Lost UA", sense, sense_len);
2730                 goto out;
2731         }
2732         memset(UA_entry, 0, sizeof(*UA_entry));
2733
2734         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2735                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2736         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2737
2738         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2739
2740         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2741
2742         if (head)
2743                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2744         else
2745                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2746
2747 out:
2748         TRACE_EXIT();
2749         return;
2750 }
2751
2752 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2753         const uint8_t *sense, int sense_len, int head)
2754 {
2755         int skip_UA = 0;
2756         struct scst_tgt_dev_UA *UA_entry_tmp;
2757
2758         TRACE_ENTRY();
2759
2760         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2761
2762         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2763                             UA_list_entry) {
2764                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, sense_len) == 0) {
2765                         TRACE_MGMT_DBG("%s", "UA already exists");
2766                         skip_UA = 1;
2767                         break;
2768                 }
2769         }
2770
2771         if (skip_UA == 0)
2772                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2773
2774         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2775
2776         TRACE_EXIT();
2777         return;
2778 }
2779
2780 /* Called under dev_lock and BH off */
2781 void scst_dev_check_set_local_UA(struct scst_device *dev,
2782         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2783 {
2784         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2785
2786         TRACE_ENTRY();
2787
2788         if (exclude != NULL)
2789                 exclude_tgt_dev = exclude->tgt_dev;
2790
2791         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2792                         dev_tgt_dev_list_entry) {
2793                 if (tgt_dev != exclude_tgt_dev)
2794                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2795         }
2796
2797         TRACE_EXIT();
2798         return;
2799 }
2800
2801 /* Called under dev_lock and BH off */
2802 void __scst_dev_check_set_UA(struct scst_device *dev,
2803         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2804 {
2805         TRACE_ENTRY();
2806
2807         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2808
2809         /* Check for reset UA */
2810         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2811                 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
2812                         exclude, NULL);
2813
2814         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2815
2816         TRACE_EXIT();
2817         return;
2818 }
2819
2820 /* Called under tgt_dev_lock or when tgt_dev is unused */
2821 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2822 {
2823         struct scst_tgt_dev_UA *UA_entry, *t;
2824
2825         TRACE_ENTRY();
2826
2827         list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
2828                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %Ld",
2829                                (long long unsigned int)tgt_dev->lun);
2830                 list_del(&UA_entry->UA_list_entry);
2831                 kfree(UA_entry);
2832         }
2833         INIT_LIST_HEAD(&tgt_dev->UA_list);
2834         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2835
2836         TRACE_EXIT();
2837         return;
2838 }
2839
2840 /* No locks */
2841 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2842 {
2843         struct scst_cmd *res = NULL, *cmd, *t;
2844         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2845
2846         spin_lock_irq(&tgt_dev->sn_lock);
2847
2848         if (unlikely(tgt_dev->hq_cmd_count != 0))
2849                 goto out_unlock;
2850
2851 restart:
2852         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2853                                 sn_cmd_list_entry) {
2854                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2855                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2856                 if (cmd->sn == expected_sn) {
2857                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2858                                 cmd, cmd->sn, cmd->sn_set);
2859                         tgt_dev->def_cmd_count--;
2860                         list_del(&cmd->sn_cmd_list_entry);
2861                         if (res == NULL)
2862                                 res = cmd;
2863                         else {
2864                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2865                                 TRACE_SN("Adding cmd %p to active cmd list",
2866                                         cmd);
2867                                 list_add_tail(&cmd->cmd_list_entry,
2868                                         &cmd->cmd_lists->active_cmd_list);
2869                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2870                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2871                         }
2872                 }
2873         }
2874         if (res != NULL)
2875                 goto out_unlock;
2876
2877         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2878                                 sn_cmd_list_entry) {
2879                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2880                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2881                 if (cmd->sn == expected_sn) {
2882                         atomic_t *slot = cmd->sn_slot;
2883                         /*
2884                          * !! At this point any pointer in cmd, except !!
2885                          * !! sn_slot and sn_cmd_list_entry, could be   !!
2886                          * !! already destroyed                         !!
2887                          */
2888                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2889                                  cmd,
2890                                  (long long unsigned int)cmd->tag,
2891                                  cmd->sn);
2892                         tgt_dev->def_cmd_count--;
2893                         list_del(&cmd->sn_cmd_list_entry);
2894                         spin_unlock_irq(&tgt_dev->sn_lock);
2895                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2896                                              &cmd->cmd_flags))
2897                                 scst_destroy_put_cmd(cmd);
2898                         scst_inc_expected_sn(tgt_dev, slot);
2899                         expected_sn = tgt_dev->expected_sn;
2900                         spin_lock_irq(&tgt_dev->sn_lock);
2901                         goto restart;
2902                 }
2903         }
2904
2905 out_unlock:
2906         spin_unlock_irq(&tgt_dev->sn_lock);
2907         return res;
2908 }
2909
2910 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2911         struct scst_thr_data_hdr *data,
2912         void (*free_fn) (struct scst_thr_data_hdr *data))
2913 {
2914         data->pid = current->pid;
2915         atomic_set(&data->ref, 1);
2916         EXTRACHECKS_BUG_ON(free_fn == NULL);
2917         data->free_fn = free_fn;
2918         spin_lock(&tgt_dev->thr_data_lock);
2919         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2920         spin_unlock(&tgt_dev->thr_data_lock);
2921 }
2922 EXPORT_SYMBOL(scst_add_thr_data);
2923
2924 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2925 {
2926         spin_lock(&tgt_dev->thr_data_lock);
2927         while (!list_empty(&tgt_dev->thr_data_list)) {
2928                 struct scst_thr_data_hdr *d = list_entry(
2929                                 tgt_dev->thr_data_list.next, typeof(*d),
2930                                 thr_data_list_entry);
2931                 list_del(&d->thr_data_list_entry);
2932                 spin_unlock(&tgt_dev->thr_data_lock);
2933                 scst_thr_data_put(d);
2934                 spin_lock(&tgt_dev->thr_data_lock);
2935         }
2936         spin_unlock(&tgt_dev->thr_data_lock);
2937         return;
2938 }
2939 EXPORT_SYMBOL(scst_del_all_thr_data);
2940
2941 void scst_dev_del_all_thr_data(struct scst_device *dev)
2942 {
2943         struct scst_tgt_dev *tgt_dev;
2944
2945         TRACE_ENTRY();
2946
2947         mutex_lock(&scst_mutex);
2948
2949         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2950                                 dev_tgt_dev_list_entry) {
2951                 scst_del_all_thr_data(tgt_dev);
2952         }
2953
2954         mutex_unlock(&scst_mutex);
2955
2956         TRACE_EXIT();
2957         return;
2958 }
2959 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
2960
2961 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2962 {
2963         struct scst_thr_data_hdr *res = NULL, *d;
2964
2965         spin_lock(&tgt_dev->thr_data_lock);
2966         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2967                 if (d->pid == current->pid) {
2968                         res = d;
2969                         scst_thr_data_get(res);
2970                         break;
2971                 }
2972         }
2973         spin_unlock(&tgt_dev->thr_data_lock);
2974         return res;
2975 }
2976 EXPORT_SYMBOL(scst_find_thr_data);
2977
2978 /* dev_lock supposed to be held and BH disabled */
2979 void __scst_block_dev(struct scst_device *dev)
2980 {
2981         dev->block_count++;
2982         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
2983 }
2984
2985 /* No locks */
2986 void scst_block_dev(struct scst_device *dev, int outstanding)
2987 {
2988         spin_lock_bh(&dev->dev_lock);
2989         __scst_block_dev(dev);
2990         spin_unlock_bh(&dev->dev_lock);
2991
2992         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
2993         smp_mb();
2994
2995         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
2996                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
2997         wait_event(dev->on_dev_waitQ,
2998                 atomic_read(&dev->on_dev_count) <= outstanding);
2999         TRACE_MGMT_DBG("%s", "wait_event() returned");
3000 }
3001
3002 /* No locks */
3003 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3004 {
3005         sBUG_ON(cmd->needs_unblocking);
3006
3007         cmd->needs_unblocking = 1;
3008         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3009                        cmd, (long long unsigned int)cmd->tag);
3010
3011         scst_block_dev(cmd->dev, outstanding);
3012 }
3013
3014 /* No locks */
3015 void scst_unblock_dev(struct scst_device *dev)
3016 {
3017         spin_lock_bh(&dev->dev_lock);
3018         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3019                 dev->block_count-1, dev);
3020         if (--dev->block_count == 0)
3021                 scst_unblock_cmds(dev);
3022         spin_unlock_bh(&dev->dev_lock);
3023         sBUG_ON(dev->block_count < 0);
3024 }
3025
3026 /* No locks */
3027 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3028 {
3029         scst_unblock_dev(cmd->dev);
3030         cmd->needs_unblocking = 0;
3031 }
3032
3033 /* No locks */
3034 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3035 {
3036         int res = 0;
3037         struct scst_device *dev = cmd->dev;
3038
3039         TRACE_ENTRY();
3040
3041         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3042
3043         atomic_inc(&dev->on_dev_count);
3044         cmd->dec_on_dev_needed = 1;
3045         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3046
3047 #ifdef STRICT_SERIALIZING
3048         spin_lock_bh(&dev->dev_lock);
3049         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3050                 goto out_unlock;
3051         if (dev->block_count > 0) {
3052                 scst_dec_on_dev_cmd(cmd);
3053                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3054                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3055                 list_add_tail(&cmd->blocked_cmd_list_entry,
3056                               &dev->blocked_cmd_list);
3057                 res = 1;
3058         } else {
3059                 __scst_block_dev(dev);
3060                 cmd->inc_blocking = 1;
3061         }
3062         spin_unlock_bh(&dev->dev_lock);
3063         goto out;
3064 #else
3065 repeat:
3066         if (unlikely(dev->block_count > 0)) {
3067                 spin_lock_bh(&dev->dev_lock);
3068                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3069                         goto out_unlock;
3070                 barrier(); /* to reread block_count */
3071                 if (dev->block_count > 0) {
3072                         scst_dec_on_dev_cmd(cmd);
3073                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
3074                                 "serializing (tag %llu, dev %p)", cmd,
3075                                 (long long unsigned int)cmd->tag, dev);
3076                         list_add_tail(&cmd->blocked_cmd_list_entry,
3077                                       &dev->blocked_cmd_list);
3078                         res = 1;
3079                         spin_unlock_bh(&dev->dev_lock);
3080                         goto out;
3081                 } else {
3082                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3083                                 "continuing");
3084                 }
3085                 spin_unlock_bh(&dev->dev_lock);
3086         }
3087         if (unlikely(dev->dev_serialized)) {
3088                 spin_lock_bh(&dev->dev_lock);
3089                 barrier(); /* to reread block_count */
3090                 if (dev->block_count == 0) {
3091                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3092                                 "cmds due to serializing (dev %p)", cmd,
3093                                 (long long unsigned int)cmd->tag, dev);
3094                         __scst_block_dev(dev);
3095                         cmd->inc_blocking = 1;
3096                 } else {
3097                         spin_unlock_bh(&dev->dev_lock);
3098                         TRACE_MGMT_DBG("Somebody blocked the device, "
3099                                 "repeating (count %d)", dev->block_count);
3100                         goto repeat;
3101                 }
3102                 spin_unlock_bh(&dev->dev_lock);
3103         }
3104 #endif
3105
3106 out:
3107         TRACE_EXIT_RES(res);
3108         return res;
3109
3110 out_unlock:
3111         spin_unlock_bh(&dev->dev_lock);
3112         goto out;
3113 }
3114
3115 /* Called under dev_lock */
3116 void scst_unblock_cmds(struct scst_device *dev)
3117 {
3118 #ifdef STRICT_SERIALIZING
3119         struct scst_cmd *cmd, *t;
3120         unsigned long flags;
3121
3122         TRACE_ENTRY();
3123
3124         local_irq_save(flags);
3125         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3126                                  blocked_cmd_list_entry) {
3127                 int brk = 0;
3128                 /*
3129                  * Since only one cmd per time is being executed, expected_sn
3130                  * can't change behind us, if the corresponding cmd is in
3131                  * blocked_cmd_list, but we could be called before
3132                  * scst_inc_expected_sn().
3133                  */
3134                 if (likely(!cmd->internal && !cmd->retry)) {
3135                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3136                         if (cmd->tgt_dev == NULL)
3137                                 sBUG();
3138                         expected_sn = cmd->tgt_dev->expected_sn;
3139                         if (cmd->sn == expected_sn)
3140                                 brk = 1;
3141                         else if (cmd->sn != (expected_sn+1))
3142                                 continue;
3143                 }
3144
3145                 list_del(&cmd->blocked_cmd_list_entry);
3146                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3147                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3148                 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
3149                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3150                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3151                 if (brk)
3152                         break;
3153         }
3154         local_irq_restore(flags);
3155 #else /* STRICT_SERIALIZING */
3156         struct scst_cmd *cmd, *tcmd;
3157         unsigned long flags;
3158
3159         TRACE_ENTRY();
3160
3161         local_irq_save(flags);
3162         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3163                                  blocked_cmd_list_entry) {
3164                 list_del(&cmd->blocked_cmd_list_entry);
3165                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3166                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3167                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3168                         list_add(&cmd->cmd_list_entry,
3169                                 &cmd->cmd_lists->active_cmd_list);
3170                 else
3171                         list_add_tail(&cmd->cmd_list_entry,
3172                                 &cmd->cmd_lists->active_cmd_list);
3173                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3174                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3175         }
3176         local_irq_restore(flags);
3177 #endif /* STRICT_SERIALIZING */
3178
3179         TRACE_EXIT();
3180         return;
3181 }
3182
3183 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3184         struct scst_cmd *out_of_sn_cmd)
3185 {
3186         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3187
3188         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3189                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3190                 scst_make_deferred_commands_active(tgt_dev, out_of_sn_cmd);
3191         } else {
3192                 out_of_sn_cmd->out_of_sn = 1;
3193                 spin_lock_irq(&tgt_dev->sn_lock);
3194                 tgt_dev->def_cmd_count++;
3195                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3196                               &tgt_dev->skipped_sn_list);
3197                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list "
3198                         "(expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3199                         tgt_dev->expected_sn);
3200                 spin_unlock_irq(&tgt_dev->sn_lock);
3201         }
3202
3203         return;
3204 }
3205
3206 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3207         struct scst_cmd *out_of_sn_cmd)
3208 {
3209         TRACE_ENTRY();
3210
3211         if (!out_of_sn_cmd->sn_set) {
3212                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3213                 goto out;
3214         }
3215
3216         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3217
3218 out:
3219         TRACE_EXIT();
3220         return;
3221 }
3222
3223 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3224 {
3225         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3226
3227         TRACE_ENTRY();
3228
3229         if (!cmd->hq_cmd_inced)
3230                 goto out;
3231
3232         spin_lock_irq(&tgt_dev->sn_lock);
3233         tgt_dev->hq_cmd_count--;
3234         spin_unlock_irq(&tgt_dev->sn_lock);
3235
3236         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3237
3238         /*
3239          * There is no problem in checking hq_cmd_count in the
3240          * non-locked state. In the worst case we will only have
3241          * unneeded run of the deferred commands.
3242          */
3243         if (tgt_dev->hq_cmd_count == 0)
3244                 scst_make_deferred_commands_active(tgt_dev, cmd);
3245
3246 out:
3247         TRACE_EXIT();
3248         return;
3249 }
3250
3251 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3252 {
3253         TRACE_ENTRY();
3254
3255         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3256                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3257                 atomic_read(&scst_cmd_count));
3258
3259         scst_done_cmd_mgmt(cmd);
3260
3261         smp_rmb();
3262         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3263                 if (cmd->completed) {
3264                         /* It's completed and it's OK to return its result */
3265                         goto out;
3266                 }
3267
3268                 if (cmd->dev->tas) {
3269                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3270                                 "(tag %llu), returning TASK ABORTED ", cmd,
3271                                 (long long unsigned int)cmd->tag);
3272                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3273                 } else {
3274                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3275                                 "(tag %llu), aborting without delivery or "
3276                                 "notification",
3277                                 cmd, (long long unsigned int)cmd->tag);
3278                         /*
3279                          * There is no need to check/requeue possible UA,
3280                          * because, if it exists, it will be delivered
3281                          * by the "completed" branch above.
3282                          */
3283                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3284                 }
3285         }
3286
3287 out:
3288         TRACE_EXIT();
3289         return;
3290 }
3291
3292 void __init scst_scsi_op_list_init(void)
3293 {
3294         int i;
3295         uint8_t op = 0xff;
3296
3297         TRACE_ENTRY();
3298
3299         for (i = 0; i < 256; i++)
3300                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3301
3302         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3303                 if (scst_scsi_op_table[i].ops != op) {
3304                         op = scst_scsi_op_table[i].ops;
3305                         scst_scsi_op_list[op] = i;
3306                 }
3307         }
3308
3309         TRACE_EXIT();
3310         return;
3311 }
3312
3313 #ifdef DEBUG
3314 /* Original taken from the XFS code */
3315 unsigned long scst_random(void)
3316 {
3317         static int Inited;
3318         static unsigned long RandomValue;
3319         static DEFINE_SPINLOCK(lock);
3320         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3321         register long rv;
3322         register long lo;
3323         register long hi;
3324         unsigned long flags;
3325
3326         spin_lock_irqsave(&lock, flags);
3327         if (!Inited) {
3328                 RandomValue = jiffies;
3329                 Inited = 1;
3330         }
3331         rv = RandomValue;
3332         hi = rv / 127773;
3333         lo = rv % 127773;
3334         rv = 16807 * lo - 2836 * hi;
3335         if (rv <= 0)
3336                 rv += 2147483647;
3337         RandomValue = rv;
3338         spin_unlock_irqrestore(&lock, flags);
3339         return rv;
3340 }
3341 EXPORT_SYMBOL(scst_random);
3342 #endif
3343
3344 #ifdef DEBUG_TM
3345
3346 #define TM_DBG_STATE_ABORT              0
3347 #define TM_DBG_STATE_RESET              1
3348 #define TM_DBG_STATE_OFFLINE            2
3349
3350 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3351
3352 static void tm_dbg_timer_fn(unsigned long arg);
3353
3354 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3355 /* All serialized by scst_tm_dbg_lock */
3356 struct {
3357         unsigned int tm_dbg_release:1;
3358         unsigned int tm_dbg_blocked:1;
3359 } tm_dbg_flags;
3360 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3361 static int tm_dbg_delayed_cmds_count;
3362 static int tm_dbg_passed_cmds_count;
3363 static int tm_dbg_state;
3364 static int tm_dbg_on_state_passes;
3365 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3366 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3367
3368 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3369
3370 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3371         struct scst_acg_dev *acg_dev)
3372 {
3373         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3374                 unsigned long flags;
3375                 /* Do TM debugging only for LUN 0 */
3376                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3377                 tm_dbg_p_cmd_list_waitQ =
3378                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3379                 tm_dbg_state = INIT_TM_DBG_STATE;
3380                 tm_dbg_on_state_passes =
3381                         tm_dbg_on_state_num_passes[tm_dbg_state];
3382                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3383                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3384                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3385                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3386         }
3387 }
3388
3389 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3390 {
3391         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3392                 unsigned long flags;
3393                 del_timer_sync(&tm_dbg_timer);
3394                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3395                 tm_dbg_p_cmd_list_waitQ = NULL;
3396                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3397         }
3398 }
3399
3400 static void tm_dbg_timer_fn(unsigned long arg)
3401 {
3402         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3403         tm_dbg_flags.tm_dbg_release = 1;
3404         smp_wmb();
3405         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3406 }
3407
3408 /* Called under scst_tm_dbg_lock and IRQs off */
3409 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3410 {
3411         switch (tm_dbg_state) {
3412         case TM_DBG_STATE_ABORT:
3413                 if (tm_dbg_delayed_cmds_count == 0) {
3414                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3415                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu) "
3416                                 "for %ld.%ld seconds (%ld HZ), "
3417                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3418                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3419                         mod_timer(&tm_dbg_timer, jiffies + d);
3420 #if 0
3421                         tm_dbg_flags.tm_dbg_blocked = 1;
3422 #endif
3423                 } else {
3424                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3425                                 "(tag %llu), delayed_cmds_count=%d, "
3426                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3427                                 tm_dbg_delayed_cmds_count,
3428                                 tm_dbg_on_state_passes);
3429                         if (tm_dbg_delayed_cmds_count == 2)
3430                                 tm_dbg_flags.tm_dbg_blocked = 0;
3431                 }
3432                 break;
3433
3434         case TM_DBG_STATE_RESET:
3435         case TM_DBG_STATE_OFFLINE:
3436                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3437                         "(tag %llu), delayed_cmds_count=%d, "
3438                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3439                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3440                 tm_dbg_flags.tm_dbg_blocked = 1;
3441                 break;
3442
3443         default:
3444                 sBUG();
3445         }
3446         /* IRQs already off */
3447         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3448         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3449         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3450         cmd->tm_dbg_delayed = 1;
3451         tm_dbg_delayed_cmds_count++;
3452         return;
3453 }
3454
3455 /* No locks */
3456 void tm_dbg_check_released_cmds(void)
3457 {
3458         if (tm_dbg_flags.tm_dbg_release) {
3459                 struct scst_cmd *cmd, *tc;
3460                 spin_lock_irq(&scst_tm_dbg_lock);
3461                 list_for_each_entry_safe_reverse(cmd, tc,
3462                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3463                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3464                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3465                                 tm_dbg_delayed_cmds_count);
3466                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3467                         list_move(&cmd->cmd_list_entry,
3468                                 &cmd->cmd_lists->active_cmd_list);
3469                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3470                 }
3471                 tm_dbg_flags.tm_dbg_release = 0;
3472                 spin_unlock_irq(&scst_tm_dbg_lock);
3473         }
3474 }
3475
3476 /* Called under scst_tm_dbg_lock */
3477 static void tm_dbg_change_state(void)
3478 {
3479         tm_dbg_flags.tm_dbg_blocked = 0;
3480         if (--tm_dbg_on_state_passes == 0) {
3481                 switch (tm_dbg_state) {
3482                 case TM_DBG_STATE_ABORT:
3483                         TRACE_MGMT_DBG("%s", "Changing "
3484                             "tm_dbg_state to RESET");
3485                         tm_dbg_state =
3486                                 TM_DBG_STATE_RESET;
3487                         tm_dbg_flags.tm_dbg_blocked = 0;
3488                         break;
3489                 case TM_DBG_STATE_RESET:
3490                 case TM_DBG_STATE_OFFLINE:
3491                         if (TM_DBG_GO_OFFLINE) {
3492                             TRACE_MGMT_DBG("%s", "Changing "
3493                                     "tm_dbg_state to OFFLINE");
3494                             tm_dbg_state =
3495                                 TM_DBG_STATE_OFFLINE;
3496                         } else {
3497                             TRACE_MGMT_DBG("%s", "Changing "
3498                                     "tm_dbg_state to ABORT");
3499                             tm_dbg_state =
3500                                 TM_DBG_STATE_ABORT;
3501                         }
3502                         break;
3503                 default:
3504                         sBUG();
3505                 }
3506                 tm_dbg_on_state_passes =
3507                     tm_dbg_on_state_num_passes[tm_dbg_state];
3508         }
3509
3510         TRACE_MGMT_DBG("%s", "Deleting timer");
3511         del_timer(&tm_dbg_timer);
3512 }
3513
3514 /* No locks */
3515 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3516 {
3517         int res = 0;
3518         unsigned long flags;
3519
3520         if (cmd->tm_dbg_immut)
3521                 goto out;
3522
3523         if (cmd->tm_dbg_delayed) {
3524                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3525                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3526                         "delayed_cmds_count=%d", cmd, cmd->tag,
3527                         tm_dbg_delayed_cmds_count);
3528
3529                 cmd->tm_dbg_immut = 1;
3530                 tm_dbg_delayed_cmds_count--;
3531                 if ((tm_dbg_delayed_cmds_count == 0) &&
3532                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3533                         tm_dbg_change_state();
3534                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3535         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3536                                         &cmd->tgt_dev->tgt_dev_flags)) {
3537                 /* Delay 50th command */
3538                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3539                 if (tm_dbg_flags.tm_dbg_blocked ||
3540                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3541                         tm_dbg_delay_cmd(cmd);
3542                         res = 1;
3543                 } else
3544                         cmd->tm_dbg_immut = 1;
3545                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3546         }
3547
3548 out:
3549         return res;
3550 }
3551
3552 /* No locks */
3553 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3554 {
3555         struct scst_cmd *c;
3556         unsigned long flags;
3557
3558         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3559         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3560                                 cmd_list_entry) {
3561                 if (c == cmd) {
3562                         TRACE_MGMT_DBG("Abort request for "
3563                                 "delayed cmd %p (tag=%llu), moving it to "
3564                                 "active cmd list (delayed_cmds_count=%d)",
3565                                 c, c->tag, tm_dbg_delayed_cmds_count);
3566
3567                         if (!test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3568                                 /* Test how completed commands handled */
3569                                 if (((scst_random() % 10) == 5)) {
3570                                         scst_set_cmd_error(cmd,
3571                                            SCST_LOAD_SENSE(scst_sense_hardw_error));
3572                                         /* It's completed now */
3573                                 }
3574                         }
3575
3576                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3577                         list_move(&c->cmd_list_entry,
3578                                 &c->cmd_lists->active_cmd_list);
3579                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3580                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3581                         break;
3582                 }
3583         }
3584         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3585 }
3586
3587 /* Might be called under scst_mutex */
3588 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3589 {
3590         unsigned long flags;
3591
3592         if (dev != NULL) {
3593                 struct scst_tgt_dev *tgt_dev;
3594                 bool found = 0;
3595
3596                 spin_lock_bh(&dev->dev_lock);
3597                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3598                                             dev_tgt_dev_list_entry) {
3599                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3600                                         &tgt_dev->tgt_dev_flags)) {
3601                                 found = 1;
3602                                 break;
3603                         }
3604                 }
3605                 spin_unlock_bh(&dev->dev_lock);
3606
3607                 if (!found)
3608                         goto out;
3609         }
3610
3611         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3612         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3613                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3614                         tm_dbg_delayed_cmds_count);
3615                 tm_dbg_change_state();
3616                 tm_dbg_flags.tm_dbg_release = 1;
3617                 smp_wmb();
3618                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3619                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3620         } else {
3621                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3622         }
3623         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3624
3625 out:
3626         return;
3627 }
3628
3629 int tm_dbg_is_release(void)
3630 {
3631         return tm_dbg_flags.tm_dbg_release;
3632 }
3633 #endif /* DEBUG_TM */
3634
3635 #ifdef DEBUG_SN
3636 void scst_check_debug_sn(struct scst_cmd *cmd)
3637 {
3638         static DEFINE_SPINLOCK(lock);
3639         static int type;
3640         static int cnt;
3641         unsigned long flags;
3642         int old = cmd->queue_type;
3643
3644         spin_lock_irqsave(&lock, flags);
3645
3646         if (cnt == 0) {
3647                 if ((scst_random() % 1000) == 500) {
3648                         if ((scst_random() % 3) == 1)
3649                                 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3650                         else
3651                                 type = SCST_CMD_QUEUE_ORDERED;
3652                         do {
3653                                 cnt = scst_random() % 10;
3654                         } while (cnt == 0);
3655                 } else
3656                         goto out_unlock;
3657         }
3658
3659         cmd->queue_type = type;
3660         cnt--;
3661
3662         if (((scst_random() % 1000) == 750))
3663                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3664         else if (((scst_random() % 1000) == 751))
3665                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3666         else if (((scst_random() % 1000) == 752))
3667                 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3668
3669         TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3670                 cmd->queue_type, cnt);
3671
3672 out_unlock:
3673         spin_unlock_irqrestore(&lock, flags);
3674         return;
3675 }
3676 #endif /* DEBUG_SN */