The patch below fixes the following:
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #include "scst_cdbprobe.h"
36
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39         uint8_t *sense, int sense_len);
40
41 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
42 {
43         int res = 0;
44         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
45
46         TRACE_ENTRY();
47
48         sBUG_ON(cmd->sense != NULL);
49
50         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
51         if (cmd->sense == NULL) {
52                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
53                         "The sense data will be lost!!", cmd->cdb[0]);
54                 res = -ENOMEM;
55                 goto out;
56         }
57
58         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
59
60 out:
61         TRACE_EXIT_RES(res);
62         return res;
63 }
64 EXPORT_SYMBOL(scst_alloc_sense);
65
66 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
67         const uint8_t *sense, unsigned int len)
68 {
69         int res;
70
71         TRACE_ENTRY();
72
73         res = scst_alloc_sense(cmd, atomic);
74         if (res != 0) {
75                 PRINT_BUFFER("Lost sense", sense, len);
76                 goto out;
77         }
78
79         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
80         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
81
82 out:
83         TRACE_EXIT_RES(res);
84         return res;
85 }
86 EXPORT_SYMBOL(scst_alloc_set_sense);
87
88 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
89 {
90         TRACE_ENTRY();
91
92         cmd->status = status;
93         cmd->host_status = DID_OK;
94
95         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
96         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
97
98         cmd->data_direction = SCST_DATA_NONE;
99         cmd->resp_data_len = 0;
100         cmd->is_send_status = 1;
101
102         cmd->completed = 1;
103
104         TRACE_EXIT();
105         return;
106 }
107 EXPORT_SYMBOL(scst_set_cmd_error_status);
108
109 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
110 {
111         int rc;
112
113         TRACE_ENTRY();
114
115         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
116
117         rc = scst_alloc_sense(cmd, 1);
118         if (rc != 0) {
119                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
120                         key, asc, ascq);
121                 goto out;
122         }
123
124         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
125         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
126
127 out:
128         TRACE_EXIT();
129         return;
130 }
131 EXPORT_SYMBOL(scst_set_cmd_error);
132
133 void scst_set_sense(uint8_t *buffer, int len, int key,
134         int asc, int ascq)
135 {
136         memset(buffer, 0, len);
137         buffer[0] = 0x70;       /* Error Code                   */
138         buffer[2] = key;        /* Sense Key                    */
139         buffer[7] = 0x0a;       /* Additional Sense Length      */
140         buffer[12] = asc;       /* ASC                          */
141         buffer[13] = ascq;      /* ASCQ                         */
142         TRACE_BUFFER("Sense set", buffer, len);
143         return;
144 }
145 EXPORT_SYMBOL(scst_set_sense);
146
147 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
148         unsigned int len)
149 {
150         TRACE_ENTRY();
151
152         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
153         scst_alloc_set_sense(cmd, 1, sense, len);
154
155         TRACE_EXIT();
156         return;
157 }
158 EXPORT_SYMBOL(scst_set_cmd_error_sense);
159
160 void scst_set_busy(struct scst_cmd *cmd)
161 {
162         int c = atomic_read(&cmd->sess->sess_cmd_count);
163
164         TRACE_ENTRY();
165
166         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
167                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
168                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
169                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
170                         cmd->sess->initiator_name, c,
171                         cmd->queue_type, cmd->sess->init_phase);
172         } else {
173                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
174                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
175                         "initiator %s (cmds count %d, queue_type %x, "
176                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
177                         cmd->queue_type, cmd->sess->init_phase);
178         }
179
180         TRACE_EXIT();
181         return;
182 }
183 EXPORT_SYMBOL(scst_set_busy);
184
185 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
186 {
187         int res;
188
189         TRACE_ENTRY();
190
191         switch (cmd->state) {
192         case SCST_CMD_STATE_INIT_WAIT:
193         case SCST_CMD_STATE_INIT:
194         case SCST_CMD_STATE_PRE_PARSE:
195         case SCST_CMD_STATE_DEV_PARSE:
196                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
197                 break;
198
199         default:
200                 res = SCST_CMD_STATE_PRE_DEV_DONE;
201                 break;
202         }
203
204         TRACE_EXIT_RES(res);
205         return res;
206 }
207 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
208
209 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
210 {
211         TRACE_ENTRY();
212
213 #ifdef CONFIG_SCST_EXTRACHECKS
214         switch (cmd->state) {
215         case SCST_CMD_STATE_PRE_XMIT_RESP:
216         case SCST_CMD_STATE_XMIT_RESP:
217         case SCST_CMD_STATE_FINISHED:
218         case SCST_CMD_STATE_XMIT_WAIT:
219                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
220                         cmd->state, cmd, cmd->cdb[0]);
221                 sBUG();
222         }
223 #endif
224
225         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
226
227         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
228                            (cmd->tgt_dev == NULL));
229
230         TRACE_EXIT();
231         return;
232 }
233 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
234
235 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
236 {
237         int i, l;
238
239         TRACE_ENTRY();
240
241         scst_check_restore_sg_buff(cmd);
242         cmd->resp_data_len = resp_data_len;
243
244         if (resp_data_len == cmd->bufflen)
245                 goto out;
246
247         l = 0;
248         for (i = 0; i < cmd->sg_cnt; i++) {
249                 l += cmd->sg[i].length;
250                 if (l >= resp_data_len) {
251                         int left = resp_data_len - (l - cmd->sg[i].length);
252 #ifdef CONFIG_SCST_DEBUG
253                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
254                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
255                                 "left %d",
256                                 cmd, (long long unsigned int)cmd->tag,
257                                 resp_data_len, i,
258                                 cmd->sg[i].length, left);
259 #endif
260                         cmd->orig_sg_cnt = cmd->sg_cnt;
261                         cmd->orig_sg_entry = i;
262                         cmd->orig_entry_len = cmd->sg[i].length;
263                         cmd->sg_cnt = (left > 0) ? i+1 : i;
264                         cmd->sg[i].length = left;
265                         cmd->sg_buff_modified = 1;
266                         break;
267                 }
268         }
269
270 out:
271         TRACE_EXIT();
272         return;
273 }
274 EXPORT_SYMBOL(scst_set_resp_data_len);
275
276 /* Called under scst_mutex and suspended activity */
277 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
278 {
279         struct scst_device *dev;
280         int res = 0;
281         static int dev_num; /* protected by scst_mutex */
282
283         TRACE_ENTRY();
284
285         dev = kzalloc(sizeof(*dev), gfp_mask);
286         if (dev == NULL) {
287                 TRACE(TRACE_OUT_OF_MEM, "%s",
288                         "Allocation of scst_device failed");
289                 res = -ENOMEM;
290                 goto out;
291         }
292
293         dev->handler = &scst_null_devtype;
294         dev->p_cmd_lists = &scst_main_cmd_lists;
295         atomic_set(&dev->dev_cmd_count, 0);
296         atomic_set(&dev->write_cmd_count, 0);
297         scst_init_mem_lim(&dev->dev_mem_lim);
298         spin_lock_init(&dev->dev_lock);
299         atomic_set(&dev->on_dev_count, 0);
300         INIT_LIST_HEAD(&dev->blocked_cmd_list);
301         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
302         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
303         INIT_LIST_HEAD(&dev->threads_list);
304         init_waitqueue_head(&dev->on_dev_waitQ);
305         dev->dev_double_ua_possible = 1;
306         dev->dev_serialized = 1;
307         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
308         dev->dev_num = dev_num++;
309
310         *out_dev = dev;
311
312 out:
313         TRACE_EXIT_RES(res);
314         return res;
315 }
316
317 /* Called under scst_mutex and suspended activity */
318 void scst_free_device(struct scst_device *dev)
319 {
320         TRACE_ENTRY();
321
322 #ifdef CONFIG_SCST_EXTRACHECKS
323         if (!list_empty(&dev->dev_tgt_dev_list) ||
324             !list_empty(&dev->dev_acg_dev_list)) {
325                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
326                         "is not empty!", __func__);
327                 sBUG();
328         }
329 #endif
330
331         kfree(dev);
332
333         TRACE_EXIT();
334         return;
335 }
336
337 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
338 {
339         atomic_set(&mem_lim->alloced_pages, 0);
340         mem_lim->max_allowed_pages =
341                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
342 }
343 EXPORT_SYMBOL(scst_init_mem_lim);
344
345 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
346                                         struct scst_device *dev, uint64_t lun)
347 {
348         struct scst_acg_dev *res;
349
350         TRACE_ENTRY();
351
352 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
353         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
354 #else
355         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
356 #endif
357         if (res == NULL) {
358                 TRACE(TRACE_OUT_OF_MEM,
359                       "%s", "Allocation of scst_acg_dev failed");
360                 goto out;
361         }
362 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
363         memset(res, 0, sizeof(*res));
364 #endif
365
366         res->dev = dev;
367         res->acg = acg;
368         res->lun = lun;
369
370 out:
371         TRACE_EXIT_HRES(res);
372         return res;
373 }
374
375 /* The activity supposed to be suspended and scst_mutex held */
376 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
377 {
378         TRACE_ENTRY();
379
380         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
381                 acg_dev);
382         list_del(&acg_dev->acg_dev_list_entry);
383         list_del(&acg_dev->dev_acg_dev_list_entry);
384
385         kmem_cache_free(scst_acgd_cachep, acg_dev);
386
387         TRACE_EXIT();
388         return;
389 }
390
391 /* The activity supposed to be suspended and scst_mutex held */
392 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
393 {
394         struct scst_acg *acg;
395
396         TRACE_ENTRY();
397
398         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
399         if (acg == NULL) {
400                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
401                 goto out;
402         }
403
404         INIT_LIST_HEAD(&acg->acg_dev_list);
405         INIT_LIST_HEAD(&acg->acg_sess_list);
406         INIT_LIST_HEAD(&acg->acn_list);
407         acg->acg_name = acg_name;
408
409         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
410         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
411
412 out:
413         TRACE_EXIT_HRES(acg);
414         return acg;
415 }
416
417 /* The activity supposed to be suspended and scst_mutex held */
418 int scst_destroy_acg(struct scst_acg *acg)
419 {
420         struct scst_acn *n, *nn;
421         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
422         int res = 0;
423
424         TRACE_ENTRY();
425
426         if (!list_empty(&acg->acg_sess_list)) {
427                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
428                 res = -EBUSY;
429                 goto out;
430         }
431
432         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
433         list_del(&acg->scst_acg_list_entry);
434
435         /* Freeing acg_devs */
436         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
437                         acg_dev_list_entry) {
438                 struct scst_tgt_dev *tgt_dev, *tt;
439                 list_for_each_entry_safe(tgt_dev, tt,
440                                  &acg_dev->dev->dev_tgt_dev_list,
441                                  dev_tgt_dev_list_entry) {
442                         if (tgt_dev->acg_dev == acg_dev)
443                                 scst_free_tgt_dev(tgt_dev);
444                 }
445                 scst_free_acg_dev(acg_dev);
446         }
447
448         /* Freeing names */
449         list_for_each_entry_safe(n, nn, &acg->acn_list,
450                         acn_list_entry) {
451                 list_del(&n->acn_list_entry);
452                 kfree(n->name);
453                 kfree(n);
454         }
455         INIT_LIST_HEAD(&acg->acn_list);
456
457         kfree(acg);
458 out:
459         TRACE_EXIT_RES(res);
460         return res;
461 }
462
463 /*
464  * scst_mutex supposed to be held, there must not be parallel activity in this
465  * session.
466  */
467 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
468         struct scst_acg_dev *acg_dev)
469 {
470         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
471         struct scst_tgt_dev *tgt_dev;
472         struct scst_device *dev = acg_dev->dev;
473         struct list_head *sess_tgt_dev_list_head;
474         struct scst_tgt_template *vtt = sess->tgt->tgtt;
475         int rc, i;
476
477         TRACE_ENTRY();
478
479 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
480         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
481 #else
482         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
483 #endif
484         if (tgt_dev == NULL) {
485                 TRACE(TRACE_OUT_OF_MEM, "%s",
486                       "Allocation of scst_tgt_dev failed");
487                 goto out;
488         }
489 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
490         memset(tgt_dev, 0, sizeof(*tgt_dev));
491 #endif
492
493         tgt_dev->dev = dev;
494         tgt_dev->lun = acg_dev->lun;
495         tgt_dev->acg_dev = acg_dev;
496         tgt_dev->sess = sess;
497         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
498
499         scst_sgv_pool_use_norm(tgt_dev);
500
501         if (dev->scsi_dev != NULL) {
502                 ini_sg = dev->scsi_dev->host->sg_tablesize;
503                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
504                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
505                                 ENABLE_CLUSTERING);
506         } else {
507                 ini_sg = (1 << 15) /* infinite */;
508                 ini_unchecked_isa_dma = 0;
509                 ini_use_clustering = 0;
510         }
511         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
512
513         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
514             !sess->tgt->tgtt->no_clustering)
515                 scst_sgv_pool_use_norm_clust(tgt_dev);
516
517         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
518                 scst_sgv_pool_use_dma(tgt_dev);
519
520         if (dev->scsi_dev != NULL) {
521                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
522                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
523                       dev->scsi_dev->channel, dev->scsi_dev->id,
524                       dev->scsi_dev->lun,
525                       (long long unsigned int)tgt_dev->lun);
526         } else {
527                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
528                                dev->virt_name,
529                                (long long unsigned int)tgt_dev->lun);
530         }
531
532         spin_lock_init(&tgt_dev->tgt_dev_lock);
533         INIT_LIST_HEAD(&tgt_dev->UA_list);
534         spin_lock_init(&tgt_dev->thr_data_lock);
535         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
536         spin_lock_init(&tgt_dev->sn_lock);
537         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
538         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
539         tgt_dev->expected_sn = 1;
540         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
541         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
542         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
543                 atomic_set(&tgt_dev->sn_slots[i], 0);
544
545         if (dev->handler->parse_atomic &&
546             (sess->tgt->tgtt->preprocessing_done == NULL)) {
547                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
548                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
549                                 &tgt_dev->tgt_dev_flags);
550                 if (dev->handler->exec_atomic)
551                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
552                                 &tgt_dev->tgt_dev_flags);
553         }
554         if (dev->handler->exec_atomic) {
555                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
556                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
557                                 &tgt_dev->tgt_dev_flags);
558                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
559                                 &tgt_dev->tgt_dev_flags);
560                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
561                         &tgt_dev->tgt_dev_flags);
562         }
563         if (dev->handler->dev_done_atomic &&
564             sess->tgt->tgtt->xmit_response_atomic) {
565                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
566                         &tgt_dev->tgt_dev_flags);
567         }
568
569         spin_lock_bh(&scst_temp_UA_lock);
570         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
571                 SCST_LOAD_SENSE(scst_sense_reset_UA));
572         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
573         spin_unlock_bh(&scst_temp_UA_lock);
574
575         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
576
577         if (vtt->threads_num > 0) {
578                 rc = 0;
579                 if (dev->handler->threads_num > 0)
580                         rc = scst_add_dev_threads(dev, vtt->threads_num);
581                 else if (dev->handler->threads_num == 0)
582                         rc = scst_add_cmd_threads(vtt->threads_num);
583                 if (rc != 0)
584                         goto out_free;
585         }
586
587         if (dev->handler && dev->handler->attach_tgt) {
588                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
589                       tgt_dev);
590                 rc = dev->handler->attach_tgt(tgt_dev);
591                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
592                 if (rc != 0) {
593                         PRINT_ERROR("Device handler's %s attach_tgt() "
594                             "failed: %d", dev->handler->name, rc);
595                         goto out_thr_free;
596                 }
597         }
598
599         spin_lock_bh(&dev->dev_lock);
600         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
601         if (dev->dev_reserved)
602                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
603         spin_unlock_bh(&dev->dev_lock);
604
605         sess_tgt_dev_list_head =
606                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
607         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
608                       sess_tgt_dev_list_head);
609
610 out:
611         TRACE_EXIT();
612         return tgt_dev;
613
614 out_thr_free:
615         if (vtt->threads_num > 0) {
616                 if (dev->handler->threads_num > 0)
617                         scst_del_dev_threads(dev, vtt->threads_num);
618                 else if (dev->handler->threads_num == 0)
619                         scst_del_cmd_threads(vtt->threads_num);
620         }
621
622 out_free:
623         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
624         tgt_dev = NULL;
625         goto out;
626 }
627
628 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
629
630 /* No locks supposed to be held, scst_mutex - held */
631 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
632 {
633         TRACE_ENTRY();
634
635         scst_clear_reservation(tgt_dev);
636
637         /* With activity suspended the lock isn't needed, but let's be safe */
638         spin_lock_bh(&tgt_dev->tgt_dev_lock);
639         scst_free_all_UA(tgt_dev);
640         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
641
642         spin_lock_bh(&scst_temp_UA_lock);
643         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
644                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
645         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
646         spin_unlock_bh(&scst_temp_UA_lock);
647
648         TRACE_EXIT();
649         return;
650 }
651
652 /*
653  * scst_mutex supposed to be held, there must not be parallel activity in this
654  * session.
655  */
656 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
657 {
658         struct scst_device *dev = tgt_dev->dev;
659         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
660
661         TRACE_ENTRY();
662
663         tm_dbg_deinit_tgt_dev(tgt_dev);
664
665         spin_lock_bh(&dev->dev_lock);
666         list_del(&tgt_dev->dev_tgt_dev_list_entry);
667         spin_unlock_bh(&dev->dev_lock);
668
669         list_del(&tgt_dev->sess_tgt_dev_list_entry);
670
671         scst_clear_reservation(tgt_dev);
672         scst_free_all_UA(tgt_dev);
673
674         if (dev->handler && dev->handler->detach_tgt) {
675                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
676                       tgt_dev);
677                 dev->handler->detach_tgt(tgt_dev);
678                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
679         }
680
681         if (vtt->threads_num > 0) {
682                 if (dev->handler->threads_num > 0)
683                         scst_del_dev_threads(dev, vtt->threads_num);
684                 else if (dev->handler->threads_num == 0)
685                         scst_del_cmd_threads(vtt->threads_num);
686         }
687
688         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
689
690         TRACE_EXIT();
691         return;
692 }
693
694 /* scst_mutex supposed to be held */
695 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
696 {
697         int res = 0;
698         struct scst_acg_dev *acg_dev;
699         struct scst_tgt_dev *tgt_dev;
700
701         TRACE_ENTRY();
702
703         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
704                         acg_dev_list_entry) {
705                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
706                 if (tgt_dev == NULL) {
707                         res = -ENOMEM;
708                         goto out_free;
709                 }
710         }
711
712 out:
713         TRACE_EXIT();
714         return res;
715
716 out_free:
717         scst_sess_free_tgt_devs(sess);
718         goto out;
719 }
720
721 /*
722  * scst_mutex supposed to be held, there must not be parallel activity in this
723  * session.
724  */
725 void scst_sess_free_tgt_devs(struct scst_session *sess)
726 {
727         int i;
728         struct scst_tgt_dev *tgt_dev, *t;
729
730         TRACE_ENTRY();
731
732         /* The session is going down, no users, so no locks */
733         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
734                 struct list_head *sess_tgt_dev_list_head =
735                         &sess->sess_tgt_dev_list_hash[i];
736                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
737                                 sess_tgt_dev_list_entry) {
738                         scst_free_tgt_dev(tgt_dev);
739                 }
740                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
741         }
742
743         TRACE_EXIT();
744         return;
745 }
746
747 /* The activity supposed to be suspended and scst_mutex held */
748 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
749                      uint64_t lun, int read_only)
750 {
751         int res = 0;
752         struct scst_acg_dev *acg_dev;
753         struct scst_tgt_dev *tgt_dev;
754         struct scst_session *sess;
755         LIST_HEAD(tmp_tgt_dev_list);
756
757         TRACE_ENTRY();
758
759         INIT_LIST_HEAD(&tmp_tgt_dev_list);
760
761 #ifdef CONFIG_SCST_EXTRACHECKS
762         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
763                 if (acg_dev->dev == dev) {
764                         PRINT_ERROR("Device is already in group %s",
765                                 acg->acg_name);
766                         res = -EINVAL;
767                         goto out;
768                 }
769         }
770 #endif
771
772         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
773         if (acg_dev == NULL) {
774                 res = -ENOMEM;
775                 goto out;
776         }
777         acg_dev->rd_only_flag = read_only;
778
779         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
780                 acg_dev);
781         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
782         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
783
784         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry)
785         {
786                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
787                 if (tgt_dev == NULL) {
788                         res = -ENOMEM;
789                         goto out_free;
790                 }
791                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
792                               &tmp_tgt_dev_list);
793         }
794
795 out:
796         if (res == 0) {
797                 if (dev->virt_name != NULL) {
798                         PRINT_INFO("Added device %s to group %s (LUN %lld, "
799                                 "rd_only %d)", dev->virt_name, acg->acg_name,
800                                 (long long unsigned int)lun,
801                                 read_only);
802                 } else {
803                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
804                                 "%lld, rd_only %d)",
805                                 dev->scsi_dev->host->host_no,
806                                 dev->scsi_dev->channel, dev->scsi_dev->id,
807                                 dev->scsi_dev->lun, acg->acg_name,
808                                 (long long unsigned int)lun,
809                                 read_only);
810                 }
811         }
812
813         TRACE_EXIT_RES(res);
814         return res;
815
816 out_free:
817         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
818                          extra_tgt_dev_list_entry) {
819                 scst_free_tgt_dev(tgt_dev);
820         }
821         scst_free_acg_dev(acg_dev);
822         goto out;
823 }
824
825 /* The activity supposed to be suspended and scst_mutex held */
826 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
827 {
828         int res = 0;
829         struct scst_acg_dev *acg_dev = NULL, *a;
830         struct scst_tgt_dev *tgt_dev, *tt;
831
832         TRACE_ENTRY();
833
834         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
835                 if (a->dev == dev) {
836                         acg_dev = a;
837                         break;
838                 }
839         }
840
841         if (acg_dev == NULL) {
842                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
843                 res = -EINVAL;
844                 goto out;
845         }
846
847         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
848                          dev_tgt_dev_list_entry) {
849                 if (tgt_dev->acg_dev == acg_dev)
850                         scst_free_tgt_dev(tgt_dev);
851         }
852         scst_free_acg_dev(acg_dev);
853
854 out:
855         if (res == 0) {
856                 if (dev->virt_name != NULL) {
857                         PRINT_INFO("Removed device %s from group %s",
858                                 dev->virt_name, acg->acg_name);
859                 } else {
860                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
861                                 dev->scsi_dev->host->host_no,
862                                 dev->scsi_dev->channel, dev->scsi_dev->id,
863                                 dev->scsi_dev->lun, acg->acg_name);
864                 }
865         }
866
867         TRACE_EXIT_RES(res);
868         return res;
869 }
870
871 /* scst_mutex supposed to be held */
872 int scst_acg_add_name(struct scst_acg *acg, const char *name)
873 {
874         int res = 0;
875         struct scst_acn *n;
876         int len;
877         char *nm;
878
879         TRACE_ENTRY();
880
881         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
882         {
883                 if (strcmp(n->name, name) == 0) {
884                         PRINT_ERROR("Name %s already exists in group %s",
885                                 name, acg->acg_name);
886                         res = -EINVAL;
887                         goto out;
888                 }
889         }
890
891         n = kmalloc(sizeof(*n), GFP_KERNEL);
892         if (n == NULL) {
893                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
894                 res = -ENOMEM;
895                 goto out;
896         }
897
898         len = strlen(name);
899         nm = kmalloc(len + 1, GFP_KERNEL);
900         if (nm == NULL) {
901                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
902                 res = -ENOMEM;
903                 goto out_free;
904         }
905
906         strcpy(nm, name);
907         n->name = nm;
908
909         list_add_tail(&n->acn_list_entry, &acg->acn_list);
910
911 out:
912         if (res == 0)
913                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
914
915         TRACE_EXIT_RES(res);
916         return res;
917
918 out_free:
919         kfree(n);
920         goto out;
921 }
922
923 /* scst_mutex supposed to be held */
924 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
925 {
926         int res = -EINVAL;
927         struct scst_acn *n;
928
929         TRACE_ENTRY();
930
931         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
932         {
933                 if (strcmp(n->name, name) == 0) {
934                         list_del(&n->acn_list_entry);
935                         kfree(n->name);
936                         kfree(n);
937                         res = 0;
938                         break;
939                 }
940         }
941
942         if (res == 0) {
943                 PRINT_INFO("Removed name %s from group %s", name,
944                         acg->acg_name);
945         } else {
946                 PRINT_ERROR("Unable to find name %s in group %s", name,
947                         acg->acg_name);
948         }
949
950         TRACE_EXIT_RES(res);
951         return res;
952 }
953
954 struct scst_cmd *scst_create_prepare_internal_cmd(
955         struct scst_cmd *orig_cmd, int bufsize)
956 {
957         struct scst_cmd *res;
958         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
959
960         TRACE_ENTRY();
961
962         res = scst_alloc_cmd(gfp_mask);
963         if (res == NULL)
964                 goto out;
965
966         res->cmd_lists = orig_cmd->cmd_lists;
967         res->sess = orig_cmd->sess;
968         res->atomic = scst_cmd_atomic(orig_cmd);
969         res->internal = 1;
970         res->tgtt = orig_cmd->tgtt;
971         res->tgt = orig_cmd->tgt;
972         res->dev = orig_cmd->dev;
973         res->tgt_dev = orig_cmd->tgt_dev;
974         res->lun = orig_cmd->lun;
975         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
976         res->data_direction = SCST_DATA_UNKNOWN;
977         res->orig_cmd = orig_cmd;
978         res->bufflen = bufsize;
979
980         res->state = SCST_CMD_STATE_PRE_PARSE;
981
982 out:
983         TRACE_EXIT_HRES((unsigned long)res);
984         return res;
985 }
986
987 void scst_free_internal_cmd(struct scst_cmd *cmd)
988 {
989         TRACE_ENTRY();
990
991         __scst_cmd_put(cmd);
992
993         TRACE_EXIT();
994         return;
995 }
996
997 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
998 {
999         int res = 0;
1000 #define sbuf_size 252
1001         static const uint8_t request_sense[6] =
1002             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1003         struct scst_cmd *rs_cmd;
1004
1005         TRACE_ENTRY();
1006
1007         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1008         if (rs_cmd == NULL)
1009                 goto out_error;
1010
1011         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1012         rs_cmd->cdb_len = sizeof(request_sense);
1013         rs_cmd->data_direction = SCST_DATA_READ;
1014         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1015         rs_cmd->expected_transfer_len = sbuf_size;
1016         rs_cmd->expected_values_set = 1;
1017
1018         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1019                 "cmd list ", rs_cmd);
1020         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1021         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1022         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1023         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1024
1025 out:
1026         TRACE_EXIT_RES(res);
1027         return res;
1028
1029 out_error:
1030         res = -1;
1031         goto out;
1032 #undef sbuf_size
1033 }
1034
1035 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1036 {
1037         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1038         uint8_t *buf;
1039         int len;
1040
1041         TRACE_ENTRY();
1042
1043         sBUG_ON(orig_cmd == NULL);
1044
1045         len = scst_get_buf_first(req_cmd, &buf);
1046
1047         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1048             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1049                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1050                         buf, len);
1051                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1052                         len);
1053         } else {
1054                 PRINT_ERROR("%s", "Unable to get the sense via "
1055                         "REQUEST SENSE, returning HARDWARE ERROR");
1056                 scst_set_cmd_error(orig_cmd,
1057                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1058         }
1059
1060         if (len > 0)
1061                 scst_put_buf(req_cmd, buf);
1062
1063         scst_free_internal_cmd(req_cmd);
1064
1065         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1066         return orig_cmd;
1067 }
1068
1069 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1070 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1071 {
1072         struct scsi_request *req;
1073
1074         TRACE_ENTRY();
1075
1076         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1077                 if (req) {
1078                         if (req->sr_bufflen)
1079                                 kfree(req->sr_buffer);
1080                         scsi_release_request(req);
1081                 }
1082         }
1083
1084         TRACE_EXIT();
1085         return;
1086 }
1087
1088 static void scst_send_release(struct scst_device *dev)
1089 {
1090         struct scsi_request *req;
1091         struct scsi_device *scsi_dev;
1092         uint8_t cdb[6];
1093
1094         TRACE_ENTRY();
1095
1096         if (dev->scsi_dev == NULL)
1097                 goto out;
1098
1099         scsi_dev = dev->scsi_dev;
1100
1101         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1102         if (req == NULL) {
1103                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1104                             "to RELEASE device %d:%d:%d:%d",
1105                             scsi_dev->host->host_no, scsi_dev->channel,
1106                             scsi_dev->id, scsi_dev->lun);
1107                 goto out;
1108         }
1109
1110         memset(cdb, 0, sizeof(cdb));
1111         cdb[0] = RELEASE;
1112         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1113             ((scsi_dev->lun << 5) & 0xe0) : 0;
1114         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1115         req->sr_cmd_len = sizeof(cdb);
1116         req->sr_data_direction = SCST_DATA_NONE;
1117         req->sr_use_sg = 0;
1118         req->sr_bufflen = 0;
1119         req->sr_buffer = NULL;
1120         req->sr_request->rq_disk = dev->rq_disk;
1121         req->sr_sense_buffer[0] = 0;
1122
1123         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1124                 "mid-level", req);
1125         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1126                     scst_req_done, 15, 3);
1127
1128 out:
1129         TRACE_EXIT();
1130         return;
1131 }
1132 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1133 static void scst_send_release(struct scst_device *dev)
1134 {
1135         struct scsi_device *scsi_dev;
1136         unsigned char cdb[6];
1137         unsigned char *sense;
1138         int rc, i;
1139
1140         TRACE_ENTRY();
1141
1142         if (dev->scsi_dev == NULL)
1143                 goto out;
1144
1145         /* We can't afford missing RELEASE due to memory shortage */
1146         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1147
1148         scsi_dev = dev->scsi_dev;
1149
1150         for (i = 0; i < 5; i++) {
1151                 memset(cdb, 0, sizeof(cdb));
1152                 cdb[0] = RELEASE;
1153                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1154                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1155
1156                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1157
1158                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1159                         "SCSI mid-level");
1160                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1161                                 sense, 15, 0, 0);
1162                 TRACE_DBG("MODE_SENSE done: %x", rc);
1163
1164                 if (scsi_status_is_good(rc)) {
1165                         break;
1166                 } else {
1167                         PRINT_ERROR("RELEASE failed: %d", rc);
1168                         PRINT_BUFFER("RELEASE sense", sense,
1169                                 SCST_SENSE_BUFFERSIZE);
1170                         scst_check_internal_sense(dev, rc,
1171                                         sense, SCST_SENSE_BUFFERSIZE);
1172                 }
1173         }
1174
1175         kfree(sense);
1176
1177 out:
1178         TRACE_EXIT();
1179         return;
1180 }
1181 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1182
1183 /* scst_mutex supposed to be held */
1184 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1185 {
1186         struct scst_device *dev = tgt_dev->dev;
1187         int release = 0;
1188
1189         TRACE_ENTRY();
1190
1191         spin_lock_bh(&dev->dev_lock);
1192         if (dev->dev_reserved &&
1193             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1194                 /* This is one who holds the reservation */
1195                 struct scst_tgt_dev *tgt_dev_tmp;
1196                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1197                                     dev_tgt_dev_list_entry) {
1198                         clear_bit(SCST_TGT_DEV_RESERVED,
1199                                     &tgt_dev_tmp->tgt_dev_flags);
1200                 }
1201                 dev->dev_reserved = 0;
1202                 release = 1;
1203         }
1204         spin_unlock_bh(&dev->dev_lock);
1205
1206         if (release)
1207                 scst_send_release(dev);
1208
1209         TRACE_EXIT();
1210         return;
1211 }
1212
1213 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1214         const char *initiator_name)
1215 {
1216         struct scst_session *sess;
1217         int i;
1218         int len;
1219         char *nm;
1220
1221         TRACE_ENTRY();
1222
1223 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1224         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1225 #else
1226         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1227 #endif
1228         if (sess == NULL) {
1229                 TRACE(TRACE_OUT_OF_MEM, "%s",
1230                       "Allocation of scst_session failed");
1231                 goto out;
1232         }
1233 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1234         memset(sess, 0, sizeof(*sess));
1235 #endif
1236
1237         sess->init_phase = SCST_SESS_IPH_INITING;
1238         sess->shut_phase = SCST_SESS_SPH_READY;
1239         atomic_set(&sess->refcnt, 0);
1240         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1241                 struct list_head *sess_tgt_dev_list_head =
1242                          &sess->sess_tgt_dev_list_hash[i];
1243                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1244         }
1245         spin_lock_init(&sess->sess_list_lock);
1246         INIT_LIST_HEAD(&sess->search_cmd_list);
1247         sess->tgt = tgt;
1248         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1249         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1250
1251 #ifdef CONFIG_SCST_MEASURE_LATENCY
1252         spin_lock_init(&sess->meas_lock);
1253 #endif
1254
1255         len = strlen(initiator_name);
1256         nm = kmalloc(len + 1, gfp_mask);
1257         if (nm == NULL) {
1258                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1259                 goto out_free;
1260         }
1261
1262         strcpy(nm, initiator_name);
1263         sess->initiator_name = nm;
1264
1265 out:
1266         TRACE_EXIT();
1267         return sess;
1268
1269 out_free:
1270         kmem_cache_free(scst_sess_cachep, sess);
1271         sess = NULL;
1272         goto out;
1273 }
1274
1275 void scst_free_session(struct scst_session *sess)
1276 {
1277         TRACE_ENTRY();
1278
1279         mutex_lock(&scst_mutex);
1280
1281         TRACE_DBG("Removing sess %p from the list", sess);
1282         list_del(&sess->sess_list_entry);
1283         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1284         list_del(&sess->acg_sess_list_entry);
1285
1286         scst_sess_free_tgt_devs(sess);
1287
1288         wake_up_all(&sess->tgt->unreg_waitQ);
1289
1290         mutex_unlock(&scst_mutex);
1291
1292         kfree(sess->initiator_name);
1293         kmem_cache_free(scst_sess_cachep, sess);
1294
1295         TRACE_EXIT();
1296         return;
1297 }
1298
1299 void scst_free_session_callback(struct scst_session *sess)
1300 {
1301         struct completion *c;
1302
1303         TRACE_ENTRY();
1304
1305         TRACE_DBG("Freeing session %p", sess);
1306
1307         c = sess->shutdown_compl;
1308
1309         if (sess->unreg_done_fn) {
1310                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1311                 sess->unreg_done_fn(sess);
1312                 TRACE_DBG("%s", "unreg_done_fn() returned");
1313         }
1314         scst_free_session(sess);
1315
1316         if (c)
1317                 complete_all(c);
1318
1319         TRACE_EXIT();
1320         return;
1321 }
1322
1323 void scst_sched_session_free(struct scst_session *sess)
1324 {
1325         unsigned long flags;
1326
1327         TRACE_ENTRY();
1328
1329         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1330                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1331                         "shut phase %lx", sess, sess->shut_phase);
1332                 sBUG();
1333         }
1334
1335         spin_lock_irqsave(&scst_mgmt_lock, flags);
1336         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1337         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1338         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1339
1340         wake_up(&scst_mgmt_waitQ);
1341
1342         TRACE_EXIT();
1343         return;
1344 }
1345
1346 void scst_cmd_get(struct scst_cmd *cmd)
1347 {
1348         __scst_cmd_get(cmd);
1349 }
1350 EXPORT_SYMBOL(scst_cmd_get);
1351
1352 void scst_cmd_put(struct scst_cmd *cmd)
1353 {
1354         __scst_cmd_put(cmd);
1355 }
1356 EXPORT_SYMBOL(scst_cmd_put);
1357
1358 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1359 {
1360         struct scst_cmd *cmd;
1361
1362         TRACE_ENTRY();
1363
1364 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1365         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1366 #else
1367         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1368 #endif
1369         if (cmd == NULL) {
1370                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1371                 goto out;
1372         }
1373 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1374         memset(cmd, 0, sizeof(*cmd));
1375 #endif
1376
1377         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1378         cmd->start_time = jiffies;
1379         atomic_set(&cmd->cmd_ref, 1);
1380         cmd->cmd_lists = &scst_main_cmd_lists;
1381         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1382         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1383         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1384         cmd->retries = 0;
1385         cmd->data_len = -1;
1386         cmd->is_send_status = 1;
1387         cmd->resp_data_len = -1;
1388
1389         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1390         cmd->dbl_ua_orig_resp_data_len = -1;
1391
1392 out:
1393         TRACE_EXIT();
1394         return cmd;
1395 }
1396
1397 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1398 {
1399         scst_sess_put(cmd->sess);
1400
1401         /*
1402          * At this point tgt_dev can be dead, but the pointer remains non-NULL
1403          */
1404         if (likely(cmd->tgt_dev != NULL))
1405                 __scst_put();
1406
1407         scst_destroy_cmd(cmd);
1408         return;
1409 }
1410
1411 /* No locks supposed to be held */
1412 void scst_free_cmd(struct scst_cmd *cmd)
1413 {
1414         int destroy = 1;
1415
1416         TRACE_ENTRY();
1417
1418         TRACE_DBG("Freeing cmd %p (tag %llu)",
1419                   cmd, (long long unsigned int)cmd->tag);
1420
1421         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1422                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1423                         cmd, atomic_read(&scst_cmd_count));
1424         }
1425
1426         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1427                 cmd->dec_on_dev_needed);
1428
1429 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1430 #if defined(CONFIG_SCST_EXTRACHECKS)
1431         if (cmd->scsi_req) {
1432                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1433                         "scsi_req!");
1434                 scst_release_request(cmd);
1435         }
1436 #endif
1437 #endif
1438
1439         scst_check_restore_sg_buff(cmd);
1440
1441         if (unlikely(cmd->internal)) {
1442                 if (cmd->bufflen > 0)
1443                         scst_release_space(cmd);
1444                 scst_destroy_cmd(cmd);
1445                 goto out;
1446         }
1447
1448         if (cmd->tgtt->on_free_cmd != NULL) {
1449                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1450                 cmd->tgtt->on_free_cmd(cmd);
1451                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1452         }
1453
1454         if (likely(cmd->dev != NULL)) {
1455                 struct scst_dev_type *handler = cmd->dev->handler;
1456                 if (handler->on_free_cmd != NULL) {
1457                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1458                               handler->name, cmd);
1459                         handler->on_free_cmd(cmd);
1460                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1461                                 handler->name);
1462                 }
1463         }
1464
1465         scst_release_space(cmd);
1466
1467         if (unlikely(cmd->sense != NULL)) {
1468                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1469                 mempool_free(cmd->sense, scst_sense_mempool);
1470                 cmd->sense = NULL;
1471         }
1472
1473         if (likely(cmd->tgt_dev != NULL)) {
1474 #ifdef CONFIG_SCST_EXTRACHECKS
1475                 if (unlikely(!cmd->sent_for_exec)) {
1476                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1477                             "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1478                             cmd, cmd->cdb[0], cmd->tgtt->name,
1479                             (long long unsigned int)cmd->lun,
1480                             cmd->sn, cmd->tgt_dev->expected_sn);
1481                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1482                 }
1483 #endif
1484
1485                 if (unlikely(cmd->out_of_sn)) {
1486                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1487                                 "destroy=%d", cmd,
1488                                 (long long unsigned int)cmd->tag,
1489                                 cmd->sn, destroy);
1490                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1491                                         &cmd->cmd_flags);
1492                 }
1493         }
1494
1495         if (likely(destroy))
1496                 scst_destroy_put_cmd(cmd);
1497
1498 out:
1499         TRACE_EXIT();
1500         return;
1501 }
1502
1503 /* No locks supposed to be held. */
1504 void scst_check_retries(struct scst_tgt *tgt)
1505 {
1506         int need_wake_up = 0;
1507
1508         TRACE_ENTRY();
1509
1510         /*
1511          * We don't worry about overflow of finished_cmds, because we check
1512          * only for its change
1513          */
1514         atomic_inc(&tgt->finished_cmds);
1515         smp_mb__after_atomic_inc();
1516         if (unlikely(tgt->retry_cmds > 0)) {
1517                 struct scst_cmd *c, *tc;
1518                 unsigned long flags;
1519
1520                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1521                       tgt->retry_cmds);
1522
1523                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1524                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1525                                 cmd_list_entry)
1526                 {
1527                         tgt->retry_cmds--;
1528
1529                         TRACE_RETRY("Moving retry cmd %p to head of active "
1530                                 "cmd list (retry_cmds left %d)",
1531                                 c, tgt->retry_cmds);
1532                         spin_lock(&c->cmd_lists->cmd_list_lock);
1533                         list_move(&c->cmd_list_entry,
1534                                   &c->cmd_lists->active_cmd_list);
1535                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1536                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1537
1538                         need_wake_up++;
1539                         if (need_wake_up >= 2) /* "slow start" */
1540                                 break;
1541                 }
1542                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1543         }
1544
1545         TRACE_EXIT();
1546         return;
1547 }
1548
1549 void scst_tgt_retry_timer_fn(unsigned long arg)
1550 {
1551         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1552         unsigned long flags;
1553
1554         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1555
1556         spin_lock_irqsave(&tgt->tgt_lock, flags);
1557         tgt->retry_timer_active = 0;
1558         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1559
1560         scst_check_retries(tgt);
1561
1562         TRACE_EXIT();
1563         return;
1564 }
1565
1566 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
1567 {
1568         struct scst_mgmt_cmd *mcmd;
1569
1570         TRACE_ENTRY();
1571
1572         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1573         if (mcmd == NULL) {
1574                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1575                         "failed, some commands and their data could leak");
1576                 goto out;
1577         }
1578         memset(mcmd, 0, sizeof(*mcmd));
1579
1580 out:
1581         TRACE_EXIT();
1582         return mcmd;
1583 }
1584
1585 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1586 {
1587         unsigned long flags;
1588
1589         TRACE_ENTRY();
1590
1591         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1592         atomic_dec(&mcmd->sess->sess_cmd_count);
1593         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1594
1595         scst_sess_put(mcmd->sess);
1596
1597         if (mcmd->mcmd_tgt_dev != NULL)
1598                 __scst_put();
1599
1600         mempool_free(mcmd, scst_mgmt_mempool);
1601
1602         TRACE_EXIT();
1603         return;
1604 }
1605
1606 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1607 int scst_alloc_request(struct scst_cmd *cmd)
1608 {
1609         int res = 0;
1610         struct scsi_request *req;
1611         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1612
1613         TRACE_ENTRY();
1614
1615         /* cmd->dev->scsi_dev must be non-NULL here */
1616         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1617         if (req == NULL) {
1618                 TRACE(TRACE_OUT_OF_MEM, "%s",
1619                       "Allocation of scsi_request failed");
1620                 res = -ENOMEM;
1621                 goto out;
1622         }
1623
1624         cmd->scsi_req = req;
1625
1626         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1627         req->sr_cmd_len = cmd->cdb_len;
1628         req->sr_data_direction = cmd->data_direction;
1629         req->sr_use_sg = cmd->sg_cnt;
1630         req->sr_bufflen = cmd->bufflen;
1631         req->sr_buffer = cmd->sg;
1632         req->sr_request->rq_disk = cmd->dev->rq_disk;
1633         req->sr_sense_buffer[0] = 0;
1634
1635         cmd->scsi_req->upper_private_data = cmd;
1636
1637 out:
1638         TRACE_EXIT();
1639         return res;
1640 }
1641
1642 void scst_release_request(struct scst_cmd *cmd)
1643 {
1644         scsi_release_request(cmd->scsi_req);
1645         cmd->scsi_req = NULL;
1646 }
1647 #endif
1648
1649 int scst_alloc_space(struct scst_cmd *cmd)
1650 {
1651         gfp_t gfp_mask;
1652         int res = -ENOMEM;
1653         int atomic = scst_cmd_atomic(cmd);
1654         int flags;
1655         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1656
1657         TRACE_ENTRY();
1658
1659         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1660
1661         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1662         if (cmd->no_sgv)
1663                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1664
1665         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1666                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1667         if (cmd->sg == NULL)
1668                 goto out;
1669
1670         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1671                 static int ll;
1672                 if (ll < 10) {
1673                         PRINT_INFO("Unable to complete command due to "
1674                                 "SG IO count limitation (requested %d, "
1675                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1676                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1677                         ll++;
1678                 }
1679                 goto out_sg_free;
1680         }
1681
1682         res = 0;
1683
1684 out:
1685         TRACE_EXIT();
1686         return res;
1687
1688 out_sg_free:
1689         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1690         cmd->sgv = NULL;
1691         cmd->sg = NULL;
1692         cmd->sg_cnt = 0;
1693         goto out;
1694 }
1695
1696 void scst_release_space(struct scst_cmd *cmd)
1697 {
1698         TRACE_ENTRY();
1699
1700         if (cmd->sgv == NULL)
1701                 goto out;
1702
1703         if (cmd->data_buf_alloced) {
1704                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1705                 goto out;
1706         }
1707
1708         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1709
1710         cmd->sgv = NULL;
1711         cmd->sg_cnt = 0;
1712         cmd->sg = NULL;
1713         cmd->bufflen = 0;
1714         cmd->data_len = 0;
1715
1716 out:
1717         TRACE_EXIT();
1718         return;
1719 }
1720
1721 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1722
1723 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1724 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1725
1726 int scst_get_cdb_len(const uint8_t *cdb)
1727 {
1728         return SCST_GET_CDB_LEN(cdb[0]);
1729 }
1730
1731 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1732
1733 /* for special commands */
1734 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1735 {
1736         cmd->bufflen = 6;
1737         return 0;
1738 }
1739
1740 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1741 {
1742         cmd->bufflen = READ_CAP_LEN;
1743         return 0;
1744 }
1745
1746 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1747 {
1748         cmd->bufflen = 1;
1749         return 0;
1750 }
1751
1752 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1753 {
1754         uint8_t *p = (uint8_t *)cmd->cdb + off;
1755         int res = 0;
1756
1757         cmd->bufflen = 0;
1758         cmd->bufflen |= ((u32)p[0]) << 8;
1759         cmd->bufflen |= ((u32)p[1]);
1760
1761         switch (cmd->cdb[1] & 0x1f) {
1762         case 0:
1763         case 1:
1764         case 6:
1765                 if (cmd->bufflen != 0) {
1766                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1767                                 "allocation length for service action %x",
1768                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1769                         goto out_inval;
1770                 }
1771                 break;
1772         }
1773
1774         switch (cmd->cdb[1] & 0x1f) {
1775         case 0:
1776         case 1:
1777                 cmd->bufflen = 20;
1778                 break;
1779         case 6:
1780                 cmd->bufflen = 32;
1781                 break;
1782         case 8:
1783                 cmd->bufflen = max(28, cmd->bufflen);
1784                 break;
1785         default:
1786                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1787                         cmd->cdb[1] & 0x1f);
1788                 goto out_inval;
1789         }
1790
1791 out:
1792         return res;
1793
1794 out_inval:
1795         scst_set_cmd_error(cmd,
1796                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1797         res = 1;
1798         goto out;
1799 }
1800
1801 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1802 {
1803         cmd->bufflen = (u32)cmd->cdb[off];
1804         return 0;
1805 }
1806
1807 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1808 {
1809         cmd->bufflen = (u32)cmd->cdb[off];
1810         if (cmd->bufflen == 0)
1811                 cmd->bufflen = 256;
1812         return 0;
1813 }
1814
1815 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1816 {
1817         const uint8_t *p = cmd->cdb + off;
1818
1819         cmd->bufflen = 0;
1820         cmd->bufflen |= ((u32)p[0]) << 8;
1821         cmd->bufflen |= ((u32)p[1]);
1822
1823         return 0;
1824 }
1825
1826 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1827 {
1828         const uint8_t *p = cmd->cdb + off;
1829
1830         cmd->bufflen = 0;
1831         cmd->bufflen |= ((u32)p[0]) << 16;
1832         cmd->bufflen |= ((u32)p[1]) << 8;
1833         cmd->bufflen |= ((u32)p[2]);
1834
1835         return 0;
1836 }
1837
1838 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1839 {
1840         const uint8_t *p = cmd->cdb + off;
1841
1842         cmd->bufflen = 0;
1843         cmd->bufflen |= ((u32)p[0]) << 24;
1844         cmd->bufflen |= ((u32)p[1]) << 16;
1845         cmd->bufflen |= ((u32)p[2]) << 8;
1846         cmd->bufflen |= ((u32)p[3]);
1847
1848         return 0;
1849 }
1850
1851 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1852 {
1853         cmd->bufflen = 0;
1854         return 0;
1855 }
1856
1857 int scst_get_cdb_info(struct scst_cmd *cmd)
1858 {
1859         int dev_type = cmd->dev->handler->type;
1860         int i, res = 0;
1861         uint8_t op;
1862         const struct scst_sdbops *ptr = NULL;
1863
1864         TRACE_ENTRY();
1865
1866         op = cmd->cdb[0];       /* get clear opcode */
1867
1868         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1869                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1870                 dev_type);
1871
1872         i = scst_scsi_op_list[op];
1873         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1874                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1875                         ptr = &scst_scsi_op_table[i];
1876                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1877                               ptr->ops, ptr->devkey[0], /* disk     */
1878                               ptr->devkey[1],   /* tape     */
1879                               ptr->devkey[2],   /* printer */
1880                               ptr->devkey[3],   /* cpu      */
1881                               ptr->devkey[4],   /* cdr      */
1882                               ptr->devkey[5],   /* cdrom    */
1883                               ptr->devkey[6],   /* scanner */
1884                               ptr->devkey[7],   /* worm     */
1885                               ptr->devkey[8],   /* changer */
1886                               ptr->devkey[9],   /* commdev */
1887                               ptr->op_name);
1888                         TRACE_DBG("direction=%d flags=%d off=%d",
1889                               ptr->direction,
1890                               ptr->flags,
1891                               ptr->off);
1892                         break;
1893                 }
1894                 i++;
1895         }
1896
1897         if (ptr == NULL) {
1898                 /* opcode not found or now not used !!! */
1899                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1900                       dev_type);
1901                 res = -1;
1902                 cmd->op_flags = SCST_INFO_INVALID;
1903                 goto out;
1904         }
1905
1906         cmd->cdb_len = SCST_GET_CDB_LEN(op);
1907         cmd->op_name = ptr->op_name;
1908         cmd->data_direction = ptr->direction;
1909         cmd->op_flags = ptr->flags;
1910         res = (*ptr->get_trans_len)(cmd, ptr->off);
1911
1912         if (cmd->bufflen == 0) {
1913                 /*
1914                  * According to SPC bufflen 0 for data transfer commands isn't
1915                  * an error, so we need to fix the transfer direction.
1916                  */
1917                 cmd->data_direction = SCST_DATA_NONE;
1918         }
1919
1920 out:
1921         TRACE_EXIT();
1922         return res;
1923 }
1924 EXPORT_SYMBOL(scst_get_cdb_info);
1925
1926 /*
1927  * Routine to extract a lun number from an 8-byte LUN structure
1928  * in network byte order (BE).
1929  * (see SAM-2, Section 4.12.3 page 40)
1930  * Supports 2 types of lun unpacking: peripheral and logical unit.
1931  */
1932 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
1933 {
1934         uint64_t res = NO_SUCH_LUN;
1935         int address_method;
1936
1937         TRACE_ENTRY();
1938
1939         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1940
1941         if (unlikely(len < 2)) {
1942                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1943                         "more", len);
1944                 goto out;
1945         }
1946
1947         if (len > 2) {
1948                 switch (len) {
1949                 case 8:
1950                         if ((*((uint64_t *)lun) &
1951                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1952                                 goto out_err;
1953                         break;
1954                 case 4:
1955                         if (*((uint16_t *)&lun[2]) != 0)
1956                                 goto out_err;
1957                         break;
1958                 case 6:
1959                         if (*((uint32_t *)&lun[2]) != 0)
1960                                 goto out_err;
1961                         break;
1962                 default:
1963                         goto out_err;
1964                 }
1965         }
1966
1967         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
1968         switch (address_method) {
1969         case 0: /* peripheral device addressing method */
1970 #if 0
1971                 if (*lun) {
1972                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1973                              "peripheral device addressing method 0x%02x, "
1974                              "expected 0", *lun);
1975                         break;
1976                 }
1977                 res = *(lun + 1);
1978                 break;
1979 #else
1980                 /*
1981                  * Looks like it's legal to use it as flat space addressing
1982                  * method as well
1983                  */
1984
1985                 /* go through */
1986 #endif
1987
1988         case 1: /* flat space addressing method */
1989                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1990                 break;
1991
1992         case 2: /* logical unit addressing method */
1993                 if (*lun & 0x3f) {
1994                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1995                                     "addressing method 0x%02x, expected 0",
1996                                     *lun & 0x3f);
1997                         break;
1998                 }
1999                 if (*(lun + 1) & 0xe0) {
2000                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
2001                                     "addressing method 0x%02x, expected 0",
2002                                     (*(lun + 1) & 0xf8) >> 5);
2003                         break;
2004                 }
2005                 res = *(lun + 1) & 0x1f;
2006                 break;
2007
2008         case 3: /* extended logical unit addressing method */
2009         default:
2010                 PRINT_ERROR("Unimplemented LUN addressing method %u",
2011                             address_method);
2012                 break;
2013         }
2014
2015 out:
2016         TRACE_EXIT_RES((int)res);
2017         return res;
2018
2019 out_err:
2020         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2021         goto out;
2022 }
2023
2024 int scst_calc_block_shift(int sector_size)
2025 {
2026         int block_shift = 0;
2027         int t;
2028
2029         if (sector_size == 0)
2030                 sector_size = 512;
2031
2032         t = sector_size;
2033         while (1) {
2034                 if ((t & 1) != 0)
2035                         break;
2036                 t >>= 1;
2037                 block_shift++;
2038         }
2039         if (block_shift < 9) {
2040                 PRINT_ERROR("Wrong sector size %d", sector_size);
2041                 block_shift = -1;
2042         }
2043
2044         TRACE_EXIT_RES(block_shift);
2045         return block_shift;
2046 }
2047 EXPORT_SYMBOL(scst_calc_block_shift);
2048
2049 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2050         int (*get_block_shift)(struct scst_cmd *cmd))
2051 {
2052         int res = 0;
2053
2054         TRACE_ENTRY();
2055
2056         /*
2057          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2058          * therefore change them only if necessary
2059          */
2060
2061         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2062               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2063
2064         switch (cmd->cdb[0]) {
2065         case SERVICE_ACTION_IN:
2066                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2067                         cmd->bufflen = READ_CAP16_LEN;
2068                         cmd->data_direction = SCST_DATA_READ;
2069                 }
2070                 break;
2071         case VERIFY_6:
2072         case VERIFY:
2073         case VERIFY_12:
2074         case VERIFY_16:
2075                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2076                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2077                         cmd->bufflen = 0;
2078                         goto set_timeout;
2079                 } else
2080                         cmd->data_len = 0;
2081                 break;
2082         default:
2083                 /* It's all good */
2084                 break;
2085         }
2086
2087         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2088                 /*
2089                  * No need for locks here, since *_detach() can not be
2090                  * called, when there are existing commands.
2091                  */
2092                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2093         }
2094
2095 set_timeout:
2096         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2097                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2098         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2099                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2100         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2101                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2102
2103         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2104               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2105
2106         TRACE_EXIT_RES(res);
2107         return res;
2108 }
2109 EXPORT_SYMBOL(scst_sbc_generic_parse);
2110
2111 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2112         int (*get_block_shift)(struct scst_cmd *cmd))
2113 {
2114         int res = 0;
2115
2116         TRACE_ENTRY();
2117
2118         /*
2119          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2120          * therefore change them only if necessary
2121          */
2122
2123         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2124               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2125
2126         cmd->cdb[1] &= 0x1f;
2127
2128         switch (cmd->cdb[0]) {
2129         case VERIFY_6:
2130         case VERIFY:
2131         case VERIFY_12:
2132         case VERIFY_16:
2133                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2134                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2135                         cmd->bufflen = 0;
2136                         goto set_timeout;
2137                 }
2138                 break;
2139         default:
2140                 /* It's all good */
2141                 break;
2142         }
2143
2144         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2145                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2146
2147 set_timeout:
2148         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2149                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2150         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2151                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2152         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2153                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2154
2155         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2156                 cmd->data_direction);
2157
2158         TRACE_EXIT();
2159         return res;
2160 }
2161 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2162
2163 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2164         int (*get_block_shift)(struct scst_cmd *cmd))
2165 {
2166         int res = 0;
2167
2168         TRACE_ENTRY();
2169
2170         /*
2171          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2172          * therefore change them only if necessary
2173          */
2174
2175         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2176               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2177
2178         cmd->cdb[1] &= 0x1f;
2179
2180         switch (cmd->cdb[0]) {
2181         case VERIFY_6:
2182         case VERIFY:
2183         case VERIFY_12:
2184         case VERIFY_16:
2185                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2186                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2187                         cmd->bufflen = 0;
2188                         goto set_timeout;
2189                 }
2190                 break;
2191         default:
2192                 /* It's all good */
2193                 break;
2194         }
2195
2196         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2197                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2198
2199 set_timeout:
2200         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2201                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2202         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2203                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2204         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2205                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2206
2207         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2208                 cmd->data_direction);
2209
2210         TRACE_EXIT_RES(res);
2211         return res;
2212 }
2213 EXPORT_SYMBOL(scst_modisk_generic_parse);
2214
2215 int scst_tape_generic_parse(struct scst_cmd *cmd,
2216         int (*get_block_size)(struct scst_cmd *cmd))
2217 {
2218         int res = 0;
2219
2220         TRACE_ENTRY();
2221
2222         /*
2223          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2224          * therefore change them only if necessary
2225          */
2226
2227         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2228               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2229
2230         if (cmd->cdb[0] == READ_POSITION) {
2231                 int tclp = cmd->cdb[1] & TCLP_BIT;
2232                 int long_bit = cmd->cdb[1] & LONG_BIT;
2233                 int bt = cmd->cdb[1] & BT_BIT;
2234
2235                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2236                         cmd->bufflen =
2237                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2238                         cmd->data_direction = SCST_DATA_READ;
2239                 } else {
2240                         cmd->bufflen = 0;
2241                         cmd->data_direction = SCST_DATA_NONE;
2242                 }
2243         }
2244
2245         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2246                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2247
2248         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2249                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2250         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2251                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2252         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2253                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2254
2255         TRACE_EXIT_RES(res);
2256         return res;
2257 }
2258 EXPORT_SYMBOL(scst_tape_generic_parse);
2259
2260 static int scst_null_parse(struct scst_cmd *cmd)
2261 {
2262         int res = 0;
2263
2264         TRACE_ENTRY();
2265
2266         /*
2267          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2268          * therefore change them only if necessary
2269          */
2270
2271         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2272               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2273 #if 0
2274         switch (cmd->cdb[0]) {
2275         default:
2276                 /* It's all good */
2277                 break;
2278         }
2279 #endif
2280         TRACE_DBG("res %d bufflen %d direct %d",
2281               res, cmd->bufflen, cmd->data_direction);
2282
2283         TRACE_EXIT();
2284         return res;
2285 }
2286
2287 int scst_changer_generic_parse(struct scst_cmd *cmd,
2288         int (*nothing)(struct scst_cmd *cmd))
2289 {
2290         int res = scst_null_parse(cmd);
2291
2292         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2293                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2294         else
2295                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2296
2297         return res;
2298 }
2299 EXPORT_SYMBOL(scst_changer_generic_parse);
2300
2301 int scst_processor_generic_parse(struct scst_cmd *cmd,
2302         int (*nothing)(struct scst_cmd *cmd))
2303 {
2304         int res = scst_null_parse(cmd);
2305
2306         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2307                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2308         else
2309                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2310
2311         return res;
2312 }
2313 EXPORT_SYMBOL(scst_processor_generic_parse);
2314
2315 int scst_raid_generic_parse(struct scst_cmd *cmd,
2316         int (*nothing)(struct scst_cmd *cmd))
2317 {
2318         int res = scst_null_parse(cmd);
2319
2320         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2321                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2322         else
2323                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2324
2325         return res;
2326 }
2327 EXPORT_SYMBOL(scst_raid_generic_parse);
2328
2329 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2330         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2331 {
2332         int opcode = cmd->cdb[0];
2333         int status = cmd->status;
2334         int res = SCST_CMD_STATE_DEFAULT;
2335
2336         TRACE_ENTRY();
2337
2338         /*
2339          * SCST sets good defaults for cmd->is_send_status and
2340          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2341          * therefore change them only if necessary
2342          */
2343
2344         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2345                 switch (opcode) {
2346                 case READ_CAPACITY:
2347                 {
2348                         /* Always keep track of disk capacity */
2349                         int buffer_size, sector_size, sh;
2350                         uint8_t *buffer;
2351
2352                         buffer_size = scst_get_buf_first(cmd, &buffer);
2353                         if (unlikely(buffer_size <= 0)) {
2354                                 if (buffer_size < 0) {
2355                                         PRINT_ERROR("%s: Unable to get the"
2356                                         " buffer (%d)", __func__, buffer_size);
2357                                 }
2358                                 goto out;
2359                         }
2360
2361                         sector_size =
2362                             ((buffer[4] << 24) | (buffer[5] << 16) |
2363                              (buffer[6] << 8) | (buffer[7] << 0));
2364                         scst_put_buf(cmd, buffer);
2365                         if (sector_size != 0)
2366                                 sh = scst_calc_block_shift(sector_size);
2367                         else
2368                                 sh = 0;
2369                         set_block_shift(cmd, sh);
2370                         TRACE_DBG("block_shift %d", sh);
2371                         break;
2372                 }
2373                 default:
2374                         /* It's all good */
2375                         break;
2376                 }
2377         }
2378
2379         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2380               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2381
2382 out:
2383         TRACE_EXIT_RES(res);
2384         return res;
2385 }
2386 EXPORT_SYMBOL(scst_block_generic_dev_done);
2387
2388 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2389         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2390 {
2391         int opcode = cmd->cdb[0];
2392         int res = SCST_CMD_STATE_DEFAULT;
2393         int buffer_size, bs;
2394         uint8_t *buffer = NULL;
2395
2396         TRACE_ENTRY();
2397
2398         /*
2399          * SCST sets good defaults for cmd->is_send_status and
2400          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2401          * therefore change them only if necessary
2402          */
2403
2404         switch (opcode) {
2405         case MODE_SENSE:
2406         case MODE_SELECT:
2407                 buffer_size = scst_get_buf_first(cmd, &buffer);
2408                 if (unlikely(buffer_size <= 0)) {
2409                         if (buffer_size < 0) {
2410                                 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2411                                         __func__, buffer_size);
2412                         }
2413                         goto out;
2414                 }
2415                 break;
2416         }
2417
2418         switch (opcode) {
2419         case MODE_SENSE:
2420                 TRACE_DBG("%s", "MODE_SENSE");
2421                 if ((cmd->cdb[2] & 0xC0) == 0) {
2422                         if (buffer[3] == 8) {
2423                                 bs = (buffer[9] << 16) |
2424                                     (buffer[10] << 8) | buffer[11];
2425                                 set_block_size(cmd, bs);
2426                         }
2427                 }
2428                 break;
2429         case MODE_SELECT:
2430                 TRACE_DBG("%s", "MODE_SELECT");
2431                 if (buffer[3] == 8) {
2432                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2433                             (buffer[11]);
2434                         set_block_size(cmd, bs);
2435                 }
2436                 break;
2437         default:
2438                 /* It's all good */
2439                 break;
2440         }
2441
2442         switch (opcode) {
2443         case MODE_SENSE:
2444         case MODE_SELECT:
2445                 scst_put_buf(cmd, buffer);
2446                 break;
2447         }
2448
2449 out:
2450         TRACE_EXIT_RES(res);
2451         return res;
2452 }
2453 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2454
2455 static void scst_check_internal_sense(struct scst_device *dev, int result,
2456         uint8_t *sense, int sense_len)
2457 {
2458         TRACE_ENTRY();
2459
2460         if (host_byte(result) == DID_RESET) {
2461                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2462                         "reset UA");
2463                 scst_set_sense(sense, sense_len,
2464                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2465                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2466         } else if ((status_byte(result) == CHECK_CONDITION) &&
2467                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2468                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2469
2470         TRACE_EXIT();
2471         return;
2472 }
2473
2474 int scst_obtain_device_parameters(struct scst_device *dev)
2475 {
2476         int res = 0, i;
2477         uint8_t cmd[16];
2478         uint8_t buffer[4+0x0A];
2479         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2480
2481         TRACE_ENTRY();
2482
2483         sBUG_ON(in_interrupt() || in_atomic());
2484         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2485
2486         for (i = 0; i < 5; i++) {
2487                 /* Get control mode page */
2488                 memset(cmd, 0, sizeof(cmd));
2489                 cmd[0] = MODE_SENSE;
2490                 cmd[1] = 8; /* DBD */
2491                 cmd[2] = 0x0A;
2492                 cmd[4] = sizeof(buffer);
2493
2494                 memset(buffer, 0, sizeof(buffer));
2495                 memset(sense_buffer, 0, sizeof(sense_buffer));
2496
2497                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2498                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2499                                 sizeof(buffer), sense_buffer, 15, 0, 0);
2500
2501                 TRACE_DBG("MODE_SENSE done: %x", res);
2502
2503                 if (scsi_status_is_good(res)) {
2504                         int q;
2505
2506                         PRINT_BUFF_FLAG(TRACE_SCSI,
2507                                 "Returned control mode page data",
2508                                 buffer, sizeof(buffer));
2509
2510                         dev->tst = buffer[4+2] >> 5;
2511                         q = buffer[4+3] >> 4;
2512                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2513                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2514                                         "%d:%d:%d:%d", dev->queue_alg,
2515                                         dev->scsi_dev->host->host_no,
2516                                         dev->scsi_dev->channel,
2517                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2518                         }
2519                         dev->queue_alg = q;
2520                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2521                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2522
2523                         /*
2524                          * Unfortunately, SCSI ML doesn't provide a way to
2525                          * specify commands task attribute, so we can rely on
2526                          * device's restricted reordering only.
2527                          */
2528                         dev->has_own_order_mgmt = !dev->queue_alg;
2529
2530                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2531                                 "Device %d:%d:%d:%d: TST %x, "
2532                                 "QUEUE ALG %x, SWP %x, TAS %x, "
2533                                 "has_own_order_mgmt %d",
2534                                 dev->scsi_dev->host->host_no,
2535                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2536                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2537                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2538
2539                         goto out;
2540                 } else {
2541 #if 0
2542                         if ((status_byte(res) == CHECK_CONDITION) &&
2543 #else
2544                         /*
2545                          * 3ware controller is buggy and returns CONDITION_GOOD
2546                          * instead of CHECK_CONDITION
2547                          */
2548                         if (
2549 #endif
2550                             SCST_SENSE_VALID(sense_buffer)) {
2551                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2552                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2553                                                 "Device %d:%d:%d:%d doesn't"
2554                                                 " support control mode page,"
2555                                                 " using defaults: TST %x,"
2556                                                 " QUEUE ALG %x, SWP %x, TAS %x,"
2557                                                 " has_own_order_mgmt %d",
2558                                                 dev->scsi_dev->host->host_no,
2559                                                 dev->scsi_dev->channel,
2560                                                 dev->scsi_dev->id,
2561                                                 dev->scsi_dev->lun,
2562                                                 dev->tst,
2563                                                 dev->queue_alg,
2564                                                 dev->swp,
2565                                                 dev->tas,
2566                                                 dev->has_own_order_mgmt);
2567                                         res = 0;
2568                                         goto out;
2569                                 } else if (sense_buffer[2] == NOT_READY) {
2570                                         TRACE(TRACE_SCSI,
2571                                                 "Device %d:%d:%d:%d not ready",
2572                                                 dev->scsi_dev->host->host_no,
2573                                                 dev->scsi_dev->channel,
2574                                                 dev->scsi_dev->id,
2575                                                 dev->scsi_dev->lun);
2576                                         res = 0;
2577                                         goto out;
2578                                 }
2579                         } else {
2580                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2581                                         "Internal MODE SENSE to "
2582                                         "device %d:%d:%d:%d failed: %x",
2583                                         dev->scsi_dev->host->host_no,
2584                                         dev->scsi_dev->channel,
2585                                         dev->scsi_dev->id,
2586                                         dev->scsi_dev->lun, res);
2587                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
2588                                         "MODE SENSE sense",
2589                                         sense_buffer, sizeof(sense_buffer));
2590                         }
2591                         scst_check_internal_sense(dev, res, sense_buffer,
2592                                         sizeof(sense_buffer));
2593                 }
2594         }
2595         res = -ENODEV;
2596
2597 out:
2598         TRACE_EXIT_RES(res);
2599         return res;
2600 }
2601 EXPORT_SYMBOL(scst_obtain_device_parameters);
2602
2603 /* Called under dev_lock and BH off */
2604 void scst_process_reset(struct scst_device *dev,
2605         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2606         struct scst_mgmt_cmd *mcmd, bool setUA)
2607 {
2608         struct scst_tgt_dev *tgt_dev;
2609         struct scst_cmd *cmd, *tcmd;
2610
2611         TRACE_ENTRY();
2612
2613         /* Clear RESERVE'ation, if necessary */
2614         if (dev->dev_reserved) {
2615                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2616                                     dev_tgt_dev_list_entry) {
2617                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2618                                 "lun %lld",
2619                                 (long long unsigned int)tgt_dev->lun);
2620                         clear_bit(SCST_TGT_DEV_RESERVED,
2621                                   &tgt_dev->tgt_dev_flags);
2622                 }
2623                 dev->dev_reserved = 0;
2624                 /*
2625                  * There is no need to send RELEASE, since the device is going
2626                  * to be resetted. Actually, since we can be in RESET TM
2627                  * function, it might be dangerous.
2628                  */
2629         }
2630
2631         dev->dev_double_ua_possible = 1;
2632         dev->dev_serialized = 1;
2633
2634         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2635                 dev_tgt_dev_list_entry) {
2636                 struct scst_session *sess = tgt_dev->sess;
2637
2638                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2639                 scst_free_all_UA(tgt_dev);
2640                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2641
2642                 spin_lock_irq(&sess->sess_list_lock);
2643
2644                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2645                 list_for_each_entry(cmd, &sess->search_cmd_list,
2646                                 search_cmd_list_entry) {
2647                         if (cmd == exclude_cmd)
2648                                 continue;
2649                         if ((cmd->tgt_dev == tgt_dev) ||
2650                             ((cmd->tgt_dev == NULL) &&
2651                              (cmd->lun == tgt_dev->lun))) {
2652                                 scst_abort_cmd(cmd, mcmd,
2653                                         (tgt_dev->sess != originator), 0);
2654                         }
2655                 }
2656                 spin_unlock_irq(&sess->sess_list_lock);
2657         }
2658
2659         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2660                                 blocked_cmd_list_entry) {
2661                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2662                         list_del(&cmd->blocked_cmd_list_entry);
2663                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2664                                 "to active cmd list", cmd);
2665                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2666                         list_add_tail(&cmd->cmd_list_entry,
2667                                 &cmd->cmd_lists->active_cmd_list);
2668                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2669                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2670                 }
2671         }
2672
2673         if (setUA) {
2674                 /* BH already off */
2675                 spin_lock(&scst_temp_UA_lock);
2676                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2677                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2678                 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2679                         sizeof(scst_temp_UA));
2680                 spin_unlock(&scst_temp_UA_lock);
2681         }
2682
2683         TRACE_EXIT();
2684         return;
2685 }
2686
2687 int scst_set_pending_UA(struct scst_cmd *cmd)
2688 {
2689         int res = 0;
2690         struct scst_tgt_dev_UA *UA_entry;
2691
2692         TRACE_ENTRY();
2693
2694         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2695
2696         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2697
2698         /* UA list could be cleared behind us, so retest */
2699         if (list_empty(&cmd->tgt_dev->UA_list)) {
2700                 TRACE_DBG("%s",
2701                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2702                 res = -1;
2703                 goto out_unlock;
2704         }
2705
2706         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2707                               UA_list_entry);
2708
2709         TRACE_DBG("next %p UA_entry %p",
2710               cmd->tgt_dev->UA_list.next, UA_entry);
2711
2712         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2713                 sizeof(UA_entry->UA_sense_buffer));
2714
2715         cmd->ua_ignore = 1;
2716
2717         list_del(&UA_entry->UA_list_entry);
2718
2719         mempool_free(UA_entry, scst_ua_mempool);
2720
2721         if (list_empty(&cmd->tgt_dev->UA_list)) {
2722                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2723                           &cmd->tgt_dev->tgt_dev_flags);
2724         }
2725
2726         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2727
2728 out:
2729         TRACE_EXIT_RES(res);
2730         return res;
2731
2732 out_unlock:
2733         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2734         goto out;
2735 }
2736
2737 /* Called under tgt_dev_lock and BH off */
2738 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2739         const uint8_t *sense, int sense_len, int head)
2740 {
2741         struct scst_tgt_dev_UA *UA_entry = NULL;
2742
2743         TRACE_ENTRY();
2744
2745         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2746         if (UA_entry == NULL) {
2747                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2748                      "allocation failed. The UNIT ATTENTION "
2749                      "on some sessions will be missed");
2750                 PRINT_BUFFER("Lost UA", sense, sense_len);
2751                 goto out;
2752         }
2753         memset(UA_entry, 0, sizeof(*UA_entry));
2754
2755         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2756                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2757         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2758
2759         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2760
2761         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2762
2763         if (head)
2764                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2765         else
2766                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2767
2768 out:
2769         TRACE_EXIT();
2770         return;
2771 }
2772
2773 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2774         const uint8_t *sense, int sense_len, int head)
2775 {
2776         int skip_UA = 0;
2777         struct scst_tgt_dev_UA *UA_entry_tmp;
2778
2779         TRACE_ENTRY();
2780
2781         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2782
2783         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2784                             UA_list_entry) {
2785                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer,
2786                            sense_len) == 0) {
2787                         TRACE_MGMT_DBG("%s", "UA already exists");
2788                         skip_UA = 1;
2789                         break;
2790                 }
2791         }
2792
2793         if (skip_UA == 0)
2794                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2795
2796         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2797
2798         TRACE_EXIT();
2799         return;
2800 }
2801
2802 /* Called under dev_lock and BH off */
2803 void scst_dev_check_set_local_UA(struct scst_device *dev,
2804         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2805 {
2806         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2807
2808         TRACE_ENTRY();
2809
2810         if (exclude != NULL)
2811                 exclude_tgt_dev = exclude->tgt_dev;
2812
2813         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2814                         dev_tgt_dev_list_entry) {
2815                 if (tgt_dev != exclude_tgt_dev)
2816                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2817         }
2818
2819         TRACE_EXIT();
2820         return;
2821 }
2822
2823 /* Called under dev_lock and BH off */
2824 void __scst_dev_check_set_UA(struct scst_device *dev,
2825         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2826 {
2827         TRACE_ENTRY();
2828
2829         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2830
2831         /* Check for reset UA */
2832         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2833                 scst_process_reset(dev,
2834                                    (exclude != NULL) ? exclude->sess : NULL,
2835                                    exclude, NULL, false);
2836
2837         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2838
2839         TRACE_EXIT();
2840         return;
2841 }
2842
2843 /* Called under tgt_dev_lock or when tgt_dev is unused */
2844 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2845 {
2846         struct scst_tgt_dev_UA *UA_entry, *t;
2847
2848         TRACE_ENTRY();
2849
2850         list_for_each_entry_safe(UA_entry, t,
2851                                  &tgt_dev->UA_list, UA_list_entry) {
2852                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
2853                                (long long unsigned int)tgt_dev->lun);
2854                 list_del(&UA_entry->UA_list_entry);
2855                 kfree(UA_entry);
2856         }
2857         INIT_LIST_HEAD(&tgt_dev->UA_list);
2858         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2859
2860         TRACE_EXIT();
2861         return;
2862 }
2863
2864 /* No locks */
2865 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2866 {
2867         struct scst_cmd *res = NULL, *cmd, *t;
2868         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2869
2870         spin_lock_irq(&tgt_dev->sn_lock);
2871
2872         if (unlikely(tgt_dev->hq_cmd_count != 0))
2873                 goto out_unlock;
2874
2875 restart:
2876         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2877                                 sn_cmd_list_entry) {
2878                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2879                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2880                 if (cmd->sn == expected_sn) {
2881                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2882                                 cmd, cmd->sn, cmd->sn_set);
2883                         tgt_dev->def_cmd_count--;
2884                         list_del(&cmd->sn_cmd_list_entry);
2885                         if (res == NULL)
2886                                 res = cmd;
2887                         else {
2888                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2889                                 TRACE_SN("Adding cmd %p to active cmd list",
2890                                         cmd);
2891                                 list_add_tail(&cmd->cmd_list_entry,
2892                                         &cmd->cmd_lists->active_cmd_list);
2893                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2894                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2895                         }
2896                 }
2897         }
2898         if (res != NULL)
2899                 goto out_unlock;
2900
2901         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2902                                 sn_cmd_list_entry) {
2903                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2904                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2905                 if (cmd->sn == expected_sn) {
2906                         atomic_t *slot = cmd->sn_slot;
2907                         /*
2908                          * !! At this point any pointer in cmd, except !!
2909                          * !! sn_slot and sn_cmd_list_entry, could be   !!
2910                          * !! already destroyed                         !!
2911                          */
2912                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2913                                  cmd,
2914                                  (long long unsigned int)cmd->tag,
2915                                  cmd->sn);
2916                         tgt_dev->def_cmd_count--;
2917                         list_del(&cmd->sn_cmd_list_entry);
2918                         spin_unlock_irq(&tgt_dev->sn_lock);
2919                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2920                                              &cmd->cmd_flags))
2921                                 scst_destroy_put_cmd(cmd);
2922                         scst_inc_expected_sn(tgt_dev, slot);
2923                         expected_sn = tgt_dev->expected_sn;
2924                         spin_lock_irq(&tgt_dev->sn_lock);
2925                         goto restart;
2926                 }
2927         }
2928
2929 out_unlock:
2930         spin_unlock_irq(&tgt_dev->sn_lock);
2931         return res;
2932 }
2933
2934 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2935         struct scst_thr_data_hdr *data,
2936         void (*free_fn) (struct scst_thr_data_hdr *data))
2937 {
2938         data->pid = current->pid;
2939         atomic_set(&data->ref, 1);
2940         EXTRACHECKS_BUG_ON(free_fn == NULL);
2941         data->free_fn = free_fn;
2942         spin_lock(&tgt_dev->thr_data_lock);
2943         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2944         spin_unlock(&tgt_dev->thr_data_lock);
2945 }
2946 EXPORT_SYMBOL(scst_add_thr_data);
2947
2948 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2949 {
2950         spin_lock(&tgt_dev->thr_data_lock);
2951         while (!list_empty(&tgt_dev->thr_data_list)) {
2952                 struct scst_thr_data_hdr *d = list_entry(
2953                                 tgt_dev->thr_data_list.next, typeof(*d),
2954                                 thr_data_list_entry);
2955                 list_del(&d->thr_data_list_entry);
2956                 spin_unlock(&tgt_dev->thr_data_lock);
2957                 scst_thr_data_put(d);
2958                 spin_lock(&tgt_dev->thr_data_lock);
2959         }
2960         spin_unlock(&tgt_dev->thr_data_lock);
2961         return;
2962 }
2963 EXPORT_SYMBOL(scst_del_all_thr_data);
2964
2965 void scst_dev_del_all_thr_data(struct scst_device *dev)
2966 {
2967         struct scst_tgt_dev *tgt_dev;
2968
2969         TRACE_ENTRY();
2970
2971         mutex_lock(&scst_mutex);
2972
2973         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2974                                 dev_tgt_dev_list_entry) {
2975                 scst_del_all_thr_data(tgt_dev);
2976         }
2977
2978         mutex_unlock(&scst_mutex);
2979
2980         TRACE_EXIT();
2981         return;
2982 }
2983 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
2984
2985 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2986 {
2987         struct scst_thr_data_hdr *res = NULL, *d;
2988
2989         spin_lock(&tgt_dev->thr_data_lock);
2990         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2991                 if (d->pid == current->pid) {
2992                         res = d;
2993                         scst_thr_data_get(res);
2994                         break;
2995                 }
2996         }
2997         spin_unlock(&tgt_dev->thr_data_lock);
2998         return res;
2999 }
3000 EXPORT_SYMBOL(scst_find_thr_data);
3001
3002 /* dev_lock supposed to be held and BH disabled */
3003 void __scst_block_dev(struct scst_device *dev)
3004 {
3005         dev->block_count++;
3006         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
3007 }
3008
3009 /* No locks */
3010 void scst_block_dev(struct scst_device *dev, int outstanding)
3011 {
3012         spin_lock_bh(&dev->dev_lock);
3013         __scst_block_dev(dev);
3014         spin_unlock_bh(&dev->dev_lock);
3015
3016         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
3017         smp_mb();
3018
3019         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
3020                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
3021         wait_event(dev->on_dev_waitQ,
3022                 atomic_read(&dev->on_dev_count) <= outstanding);
3023         TRACE_MGMT_DBG("%s", "wait_event() returned");
3024 }
3025
3026 /* No locks */
3027 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3028 {
3029         sBUG_ON(cmd->needs_unblocking);
3030
3031         cmd->needs_unblocking = 1;
3032         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3033                        cmd, (long long unsigned int)cmd->tag);
3034
3035         scst_block_dev(cmd->dev, outstanding);
3036 }
3037
3038 /* No locks */
3039 void scst_unblock_dev(struct scst_device *dev)
3040 {
3041         spin_lock_bh(&dev->dev_lock);
3042         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3043                 dev->block_count-1, dev);
3044         if (--dev->block_count == 0)
3045                 scst_unblock_cmds(dev);
3046         spin_unlock_bh(&dev->dev_lock);
3047         sBUG_ON(dev->block_count < 0);
3048 }
3049
3050 /* No locks */
3051 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3052 {
3053         scst_unblock_dev(cmd->dev);
3054         cmd->needs_unblocking = 0;
3055 }
3056
3057 /* No locks */
3058 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3059 {
3060         int res = 0;
3061         struct scst_device *dev = cmd->dev;
3062
3063         TRACE_ENTRY();
3064
3065         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3066
3067         atomic_inc(&dev->on_dev_count);
3068         cmd->dec_on_dev_needed = 1;
3069         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3070
3071         if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
3072                 /*
3073                  * The original command can already block the device, so
3074                  * REQUEST SENSE command should always pass.
3075                  */
3076                 goto out;
3077         }
3078
3079 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3080         spin_lock_bh(&dev->dev_lock);
3081         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3082                 goto out_unlock;
3083         if (dev->block_count > 0) {
3084                 scst_dec_on_dev_cmd(cmd);
3085                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3086                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3087                 list_add_tail(&cmd->blocked_cmd_list_entry,
3088                               &dev->blocked_cmd_list);
3089                 res = 1;
3090         } else {
3091                 __scst_block_dev(dev);
3092                 cmd->inc_blocking = 1;
3093         }
3094         spin_unlock_bh(&dev->dev_lock);
3095         goto out;
3096 #else
3097 repeat:
3098         if (unlikely(dev->block_count > 0)) {
3099                 spin_lock_bh(&dev->dev_lock);
3100                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3101                         goto out_unlock;
3102                 barrier(); /* to reread block_count */
3103                 if (dev->block_count > 0) {
3104                         scst_dec_on_dev_cmd(cmd);
3105                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
3106                                 "serializing (tag %llu, dev %p)", cmd,
3107                                 (long long unsigned int)cmd->tag, dev);
3108                         list_add_tail(&cmd->blocked_cmd_list_entry,
3109                                       &dev->blocked_cmd_list);
3110                         res = 1;
3111                         spin_unlock_bh(&dev->dev_lock);
3112                         goto out;
3113                 } else {
3114                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3115                                 "continuing");
3116                 }
3117                 spin_unlock_bh(&dev->dev_lock);
3118         }
3119         if (unlikely(dev->dev_serialized)) {
3120                 spin_lock_bh(&dev->dev_lock);
3121                 barrier(); /* to reread block_count */
3122                 if (dev->block_count == 0) {
3123                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3124                                 "cmds due to serializing (dev %p)", cmd,
3125                                 (long long unsigned int)cmd->tag, dev);
3126                         __scst_block_dev(dev);
3127                         cmd->inc_blocking = 1;
3128                 } else {
3129                         spin_unlock_bh(&dev->dev_lock);
3130                         TRACE_MGMT_DBG("Somebody blocked the device, "
3131                                 "repeating (count %d)", dev->block_count);
3132                         goto repeat;
3133                 }
3134                 spin_unlock_bh(&dev->dev_lock);
3135         }
3136 #endif
3137
3138 out:
3139         TRACE_EXIT_RES(res);
3140         return res;
3141
3142 out_unlock:
3143         spin_unlock_bh(&dev->dev_lock);
3144         goto out;
3145 }
3146
3147 /* Called under dev_lock */
3148 void scst_unblock_cmds(struct scst_device *dev)
3149 {
3150 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3151         struct scst_cmd *cmd, *t;
3152         unsigned long flags;
3153
3154         TRACE_ENTRY();
3155
3156         local_irq_save(flags);
3157         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3158                                  blocked_cmd_list_entry) {
3159                 int brk = 0;
3160                 /*
3161                  * Since only one cmd per time is being executed, expected_sn
3162                  * can't change behind us, if the corresponding cmd is in
3163                  * blocked_cmd_list, but we could be called before
3164                  * scst_inc_expected_sn().
3165                  *
3166                  * For HQ commands SN is not set.
3167                  */
3168                 if (likely(!cmd->internal && cmd->sn_set)) {
3169                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3170                         if (cmd->tgt_dev == NULL)
3171                                 sBUG();
3172                         expected_sn = cmd->tgt_dev->expected_sn;
3173                         if (cmd->sn == expected_sn)
3174                                 brk = 1;
3175                         else if (cmd->sn != (expected_sn+1))
3176                                 continue;
3177                 }
3178
3179                 list_del(&cmd->blocked_cmd_list_entry);
3180                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3181                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3182                 list_add(&cmd->cmd_list_entry,
3183                          &cmd->cmd_lists->active_cmd_list);
3184                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3185                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3186                 if (brk)
3187                         break;
3188         }
3189         local_irq_restore(flags);
3190 #else /* CONFIG_SCST_STRICT_SERIALIZING */
3191         struct scst_cmd *cmd, *tcmd;
3192         unsigned long flags;
3193
3194         TRACE_ENTRY();
3195
3196         local_irq_save(flags);
3197         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3198                                  blocked_cmd_list_entry) {
3199                 list_del(&cmd->blocked_cmd_list_entry);
3200                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3201                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3202                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3203                         list_add(&cmd->cmd_list_entry,
3204                                 &cmd->cmd_lists->active_cmd_list);
3205                 else
3206                         list_add_tail(&cmd->cmd_list_entry,
3207                                 &cmd->cmd_lists->active_cmd_list);
3208                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3209                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3210         }
3211         local_irq_restore(flags);
3212 #endif /* CONFIG_SCST_STRICT_SERIALIZING */
3213
3214         TRACE_EXIT();
3215         return;
3216 }
3217
3218 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3219         struct scst_cmd *out_of_sn_cmd)
3220 {
3221         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3222
3223         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3224                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3225                 scst_make_deferred_commands_active(tgt_dev);
3226         } else {
3227                 out_of_sn_cmd->out_of_sn = 1;
3228                 spin_lock_irq(&tgt_dev->sn_lock);
3229                 tgt_dev->def_cmd_count++;
3230                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3231                               &tgt_dev->skipped_sn_list);
3232                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list"
3233                         " (expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3234                         tgt_dev->expected_sn);
3235                 spin_unlock_irq(&tgt_dev->sn_lock);
3236         }
3237
3238         return;
3239 }
3240
3241 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3242         struct scst_cmd *out_of_sn_cmd)
3243 {
3244         TRACE_ENTRY();
3245
3246         if (!out_of_sn_cmd->sn_set) {
3247                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3248                 goto out;
3249         }
3250
3251         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3252
3253 out:
3254         TRACE_EXIT();
3255         return;
3256 }
3257
3258 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3259 {
3260         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3261
3262         TRACE_ENTRY();
3263
3264         if (!cmd->hq_cmd_inced)
3265                 goto out;
3266
3267         spin_lock_irq(&tgt_dev->sn_lock);
3268         tgt_dev->hq_cmd_count--;
3269         spin_unlock_irq(&tgt_dev->sn_lock);
3270
3271         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3272
3273         /*
3274          * There is no problem in checking hq_cmd_count in the
3275          * non-locked state. In the worst case we will only have
3276          * unneeded run of the deferred commands.
3277          */
3278         if (tgt_dev->hq_cmd_count == 0)
3279                 scst_make_deferred_commands_active(tgt_dev);
3280
3281 out:
3282         TRACE_EXIT();
3283         return;
3284 }
3285
3286 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3287 {
3288         TRACE_ENTRY();
3289
3290         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3291                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3292                 atomic_read(&scst_cmd_count));
3293
3294         scst_done_cmd_mgmt(cmd);
3295
3296         smp_rmb();
3297         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3298                 if (cmd->completed) {
3299                         /* It's completed and it's OK to return its result */
3300                         goto out;
3301                 }
3302
3303                 if (cmd->dev->tas) {
3304                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3305                                 "(tag %llu), returning TASK ABORTED ", cmd,
3306                                 (long long unsigned int)cmd->tag);
3307                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3308                 } else {
3309                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3310                                 "(tag %llu), aborting without delivery or "
3311                                 "notification",
3312                                 cmd, (long long unsigned int)cmd->tag);
3313                         /*
3314                          * There is no need to check/requeue possible UA,
3315                          * because, if it exists, it will be delivered
3316                          * by the "completed" branch above.
3317                          */
3318                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3319                 }
3320         }
3321
3322 out:
3323         TRACE_EXIT();
3324         return;
3325 }
3326
3327 void __init scst_scsi_op_list_init(void)
3328 {
3329         int i;
3330         uint8_t op = 0xff;
3331
3332         TRACE_ENTRY();
3333
3334         for (i = 0; i < 256; i++)
3335                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3336
3337         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3338                 if (scst_scsi_op_table[i].ops != op) {
3339                         op = scst_scsi_op_table[i].ops;
3340                         scst_scsi_op_list[op] = i;
3341                 }
3342         }
3343
3344         TRACE_EXIT();
3345         return;
3346 }
3347
3348 #ifdef CONFIG_SCST_DEBUG
3349 /* Original taken from the XFS code */
3350 unsigned long scst_random(void)
3351 {
3352         static int Inited;
3353         static unsigned long RandomValue;
3354         static DEFINE_SPINLOCK(lock);
3355         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3356         register long rv;
3357         register long lo;
3358         register long hi;
3359         unsigned long flags;
3360
3361         spin_lock_irqsave(&lock, flags);
3362         if (!Inited) {
3363                 RandomValue = jiffies;
3364                 Inited = 1;
3365         }
3366         rv = RandomValue;
3367         hi = rv / 127773;
3368         lo = rv % 127773;
3369         rv = 16807 * lo - 2836 * hi;
3370         if (rv <= 0)
3371                 rv += 2147483647;
3372         RandomValue = rv;
3373         spin_unlock_irqrestore(&lock, flags);
3374         return rv;
3375 }
3376 EXPORT_SYMBOL(scst_random);
3377 #endif
3378
3379 #ifdef CONFIG_SCST_DEBUG_TM
3380
3381 #define TM_DBG_STATE_ABORT              0
3382 #define TM_DBG_STATE_RESET              1
3383 #define TM_DBG_STATE_OFFLINE            2
3384
3385 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3386
3387 static void tm_dbg_timer_fn(unsigned long arg);
3388
3389 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3390 /* All serialized by scst_tm_dbg_lock */
3391 static struct {
3392         unsigned int tm_dbg_release:1;
3393         unsigned int tm_dbg_blocked:1;
3394 } tm_dbg_flags;
3395 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3396 static int tm_dbg_delayed_cmds_count;
3397 static int tm_dbg_passed_cmds_count;
3398 static int tm_dbg_state;
3399 static int tm_dbg_on_state_passes;
3400 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3401 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3402
3403 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3404
3405 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3406         struct scst_acg_dev *acg_dev)
3407 {
3408         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3409                 unsigned long flags;
3410                 /* Do TM debugging only for LUN 0 */
3411                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3412                 tm_dbg_p_cmd_list_waitQ =
3413                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3414                 tm_dbg_state = INIT_TM_DBG_STATE;
3415                 tm_dbg_on_state_passes =
3416                         tm_dbg_on_state_num_passes[tm_dbg_state];
3417                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3418                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3419                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3420                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3421         }
3422 }
3423
3424 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3425 {
3426         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3427                 unsigned long flags;
3428                 del_timer_sync(&tm_dbg_timer);
3429                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3430                 tm_dbg_p_cmd_list_waitQ = NULL;
3431                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3432         }
3433 }
3434
3435 static void tm_dbg_timer_fn(unsigned long arg)
3436 {
3437         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3438         tm_dbg_flags.tm_dbg_release = 1;
3439         smp_wmb();
3440         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3441 }
3442
3443 /* Called under scst_tm_dbg_lock and IRQs off */
3444 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3445 {
3446         switch (tm_dbg_state) {
3447         case TM_DBG_STATE_ABORT:
3448                 if (tm_dbg_delayed_cmds_count == 0) {
3449                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3450                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
3451                                 " for %ld.%ld seconds (%ld HZ), "
3452                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3453                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3454                         mod_timer(&tm_dbg_timer, jiffies + d);
3455 #if 0
3456                         tm_dbg_flags.tm_dbg_blocked = 1;
3457 #endif
3458                 } else {
3459                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3460                                 "(tag %llu), delayed_cmds_count=%d, "
3461                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3462                                 tm_dbg_delayed_cmds_count,
3463                                 tm_dbg_on_state_passes);
3464                         if (tm_dbg_delayed_cmds_count == 2)
3465                                 tm_dbg_flags.tm_dbg_blocked = 0;
3466                 }
3467                 break;
3468
3469         case TM_DBG_STATE_RESET:
3470         case TM_DBG_STATE_OFFLINE:
3471                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3472                         "(tag %llu), delayed_cmds_count=%d, "
3473                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3474                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3475                 tm_dbg_flags.tm_dbg_blocked = 1;
3476                 break;
3477
3478         default:
3479                 sBUG();
3480         }
3481         /* IRQs already off */
3482         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3483         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3484         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3485         cmd->tm_dbg_delayed = 1;
3486         tm_dbg_delayed_cmds_count++;
3487         return;
3488 }
3489
3490 /* No locks */
3491 void tm_dbg_check_released_cmds(void)
3492 {
3493         if (tm_dbg_flags.tm_dbg_release) {
3494                 struct scst_cmd *cmd, *tc;
3495                 spin_lock_irq(&scst_tm_dbg_lock);
3496                 list_for_each_entry_safe_reverse(cmd, tc,
3497                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3498                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3499                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3500                                 tm_dbg_delayed_cmds_count);
3501                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3502                         list_move(&cmd->cmd_list_entry,
3503                                 &cmd->cmd_lists->active_cmd_list);
3504                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3505                 }
3506                 tm_dbg_flags.tm_dbg_release = 0;
3507                 spin_unlock_irq(&scst_tm_dbg_lock);
3508         }
3509 }
3510
3511 /* Called under scst_tm_dbg_lock */
3512 static void tm_dbg_change_state(void)
3513 {
3514         tm_dbg_flags.tm_dbg_blocked = 0;
3515         if (--tm_dbg_on_state_passes == 0) {
3516                 switch (tm_dbg_state) {
3517                 case TM_DBG_STATE_ABORT:
3518                         TRACE_MGMT_DBG("%s", "Changing "
3519                             "tm_dbg_state to RESET");
3520                         tm_dbg_state =
3521                                 TM_DBG_STATE_RESET;
3522                         tm_dbg_flags.tm_dbg_blocked = 0;
3523                         break;
3524                 case TM_DBG_STATE_RESET:
3525                 case TM_DBG_STATE_OFFLINE:
3526 #ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
3527                             TRACE_MGMT_DBG("%s", "Changing "
3528                                     "tm_dbg_state to OFFLINE");
3529                             tm_dbg_state =
3530                                 TM_DBG_STATE_OFFLINE;
3531 #else
3532                             TRACE_MGMT_DBG("%s", "Changing "
3533                                     "tm_dbg_state to ABORT");
3534                             tm_dbg_state =
3535                                 TM_DBG_STATE_ABORT;
3536 #endif
3537                         break;
3538                 default:
3539                         sBUG();
3540                 }
3541                 tm_dbg_on_state_passes =
3542                     tm_dbg_on_state_num_passes[tm_dbg_state];
3543         }
3544
3545         TRACE_MGMT_DBG("%s", "Deleting timer");
3546         del_timer(&tm_dbg_timer);
3547 }
3548
3549 /* No locks */
3550 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3551 {
3552         int res = 0;
3553         unsigned long flags;
3554
3555         if (cmd->tm_dbg_immut)
3556                 goto out;
3557
3558         if (cmd->tm_dbg_delayed) {
3559                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3560                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3561                         "delayed_cmds_count=%d", cmd, cmd->tag,
3562                         tm_dbg_delayed_cmds_count);
3563
3564                 cmd->tm_dbg_immut = 1;
3565                 tm_dbg_delayed_cmds_count--;
3566                 if ((tm_dbg_delayed_cmds_count == 0) &&
3567                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3568                         tm_dbg_change_state();
3569                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3570         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3571                                         &cmd->tgt_dev->tgt_dev_flags)) {
3572                 /* Delay 50th command */
3573                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3574                 if (tm_dbg_flags.tm_dbg_blocked ||
3575                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3576                         tm_dbg_delay_cmd(cmd);
3577                         res = 1;
3578                 } else
3579                         cmd->tm_dbg_immut = 1;
3580                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3581         }
3582
3583 out:
3584         return res;
3585 }
3586
3587 /* No locks */
3588 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3589 {
3590         struct scst_cmd *c;
3591         unsigned long flags;
3592
3593         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3594         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3595                                 cmd_list_entry) {
3596                 if (c == cmd) {
3597                         TRACE_MGMT_DBG("Abort request for "
3598                                 "delayed cmd %p (tag=%llu), moving it to "
3599                                 "active cmd list (delayed_cmds_count=%d)",
3600                                 c, c->tag, tm_dbg_delayed_cmds_count);
3601
3602                         if (!test_bit(SCST_CMD_ABORTED_OTHER,
3603                                             &cmd->cmd_flags)) {
3604                                 /* Test how completed commands handled */
3605                                 if (((scst_random() % 10) == 5)) {
3606                                         scst_set_cmd_error(cmd,
3607                                                 SCST_LOAD_SENSE(
3608                                                 scst_sense_hardw_error));
3609                                         /* It's completed now */
3610                                 }
3611                         }
3612
3613                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3614                         list_move(&c->cmd_list_entry,
3615                                 &c->cmd_lists->active_cmd_list);
3616                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3617                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3618                         break;
3619                 }
3620         }
3621         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3622 }
3623
3624 /* Might be called under scst_mutex */
3625 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3626 {
3627         unsigned long flags;
3628
3629         if (dev != NULL) {
3630                 struct scst_tgt_dev *tgt_dev;
3631                 bool found = 0;
3632
3633                 spin_lock_bh(&dev->dev_lock);
3634                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3635                                             dev_tgt_dev_list_entry) {
3636                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3637                                         &tgt_dev->tgt_dev_flags)) {
3638                                 found = 1;
3639                                 break;
3640                         }
3641                 }
3642                 spin_unlock_bh(&dev->dev_lock);
3643
3644                 if (!found)
3645                         goto out;
3646         }
3647
3648         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3649         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3650                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3651                         tm_dbg_delayed_cmds_count);
3652                 tm_dbg_change_state();
3653                 tm_dbg_flags.tm_dbg_release = 1;
3654                 smp_wmb();
3655                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3656                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3657         } else {
3658                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3659         }
3660         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3661
3662 out:
3663         return;
3664 }
3665
3666 int tm_dbg_is_release(void)
3667 {
3668         return tm_dbg_flags.tm_dbg_release;
3669 }
3670 #endif /* CONFIG_SCST_DEBUG_TM */
3671
3672 #ifdef CONFIG_SCST_DEBUG_SN
3673 void scst_check_debug_sn(struct scst_cmd *cmd)
3674 {
3675         static DEFINE_SPINLOCK(lock);
3676         static int type;
3677         static int cnt;
3678         unsigned long flags;
3679         int old = cmd->queue_type;
3680
3681         spin_lock_irqsave(&lock, flags);
3682
3683         if (cnt == 0) {
3684                 if ((scst_random() % 1000) == 500) {
3685                         if ((scst_random() % 3) == 1)
3686                                 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3687                         else
3688                                 type = SCST_CMD_QUEUE_ORDERED;
3689                         do {
3690                                 cnt = scst_random() % 10;
3691                         } while (cnt == 0);
3692                 } else
3693                         goto out_unlock;
3694         }
3695
3696         cmd->queue_type = type;
3697         cnt--;
3698
3699         if (((scst_random() % 1000) == 750))
3700                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3701         else if (((scst_random() % 1000) == 751))
3702                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3703         else if (((scst_random() % 1000) == 752))
3704                 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3705
3706         TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3707                 cmd->queue_type, cnt);
3708
3709 out_unlock:
3710         spin_unlock_irqrestore(&lock, flags);
3711         return;
3712 }
3713 #endif /* CONFIG_SCST_DEBUG_SN */