- Performance increase
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #include "scst_cdbprobe.h"
36
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39         uint8_t *sense, int sense_len);
40
41 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
42 {
43         int res = 0;
44         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
45
46         TRACE_ENTRY();
47
48         sBUG_ON(cmd->sense != NULL);
49
50         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
51         if (cmd->sense == NULL) {
52                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
53                         "The sense data will be lost!!", cmd->cdb[0]);
54                 res = -ENOMEM;
55                 goto out;
56         }
57
58         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
59
60 out:
61         TRACE_EXIT_RES(res);
62         return res;
63 }
64 EXPORT_SYMBOL(scst_alloc_sense);
65
66 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
67         const uint8_t *sense, unsigned int len)
68 {
69         int res;
70
71         TRACE_ENTRY();
72
73         res = scst_alloc_sense(cmd, atomic);
74         if (res != 0) {
75                 PRINT_BUFFER("Lost sense", sense, len);
76                 goto out;
77         }
78
79         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
80         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
81
82 out:
83         TRACE_EXIT_RES(res);
84         return res;
85 }
86 EXPORT_SYMBOL(scst_alloc_set_sense);
87
88 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
89 {
90         TRACE_ENTRY();
91
92         cmd->status = status;
93         cmd->host_status = DID_OK;
94
95         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
96         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
97
98         cmd->data_direction = SCST_DATA_NONE;
99         cmd->resp_data_len = 0;
100         cmd->is_send_status = 1;
101
102         cmd->completed = 1;
103
104         TRACE_EXIT();
105         return;
106 }
107 EXPORT_SYMBOL(scst_set_cmd_error_status);
108
109 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
110 {
111         int rc;
112
113         TRACE_ENTRY();
114
115         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
116
117         rc = scst_alloc_sense(cmd, 1);
118         if (rc != 0) {
119                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
120                         key, asc, ascq);
121                 goto out;
122         }
123
124         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
125         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
126
127 out:
128         TRACE_EXIT();
129         return;
130 }
131 EXPORT_SYMBOL(scst_set_cmd_error);
132
133 void scst_set_sense(uint8_t *buffer, int len, int key,
134         int asc, int ascq)
135 {
136         memset(buffer, 0, len);
137         buffer[0] = 0x70;       /* Error Code                   */
138         buffer[2] = key;        /* Sense Key                    */
139         buffer[7] = 0x0a;       /* Additional Sense Length      */
140         buffer[12] = asc;       /* ASC                          */
141         buffer[13] = ascq;      /* ASCQ                         */
142         TRACE_BUFFER("Sense set", buffer, len);
143         return;
144 }
145 EXPORT_SYMBOL(scst_set_sense);
146
147 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
148         unsigned int len)
149 {
150         TRACE_ENTRY();
151
152         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
153         scst_alloc_set_sense(cmd, 1, sense, len);
154
155         TRACE_EXIT();
156         return;
157 }
158 EXPORT_SYMBOL(scst_set_cmd_error_sense);
159
160 void scst_set_busy(struct scst_cmd *cmd)
161 {
162         int c = atomic_read(&cmd->sess->sess_cmd_count);
163
164         TRACE_ENTRY();
165
166         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
167                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
168                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
169                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
170                         cmd->sess->initiator_name, c,
171                         cmd->queue_type, cmd->sess->init_phase);
172         } else {
173                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
174                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
175                         "initiator %s (cmds count %d, queue_type %x, "
176                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
177                         cmd->queue_type, cmd->sess->init_phase);
178         }
179
180         TRACE_EXIT();
181         return;
182 }
183 EXPORT_SYMBOL(scst_set_busy);
184
185 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
186 {
187         int res;
188
189         TRACE_ENTRY();
190
191         switch (cmd->state) {
192         case SCST_CMD_STATE_INIT_WAIT:
193         case SCST_CMD_STATE_INIT:
194         case SCST_CMD_STATE_PRE_PARSE:
195         case SCST_CMD_STATE_DEV_PARSE:
196                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
197                 break;
198
199         default:
200                 res = SCST_CMD_STATE_PRE_DEV_DONE;
201                 break;
202         }
203
204         TRACE_EXIT_RES(res);
205         return res;
206 }
207 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
208
209 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
210 {
211         TRACE_ENTRY();
212
213 #ifdef CONFIG_SCST_EXTRACHECKS
214         switch (cmd->state) {
215         case SCST_CMD_STATE_PRE_XMIT_RESP:
216         case SCST_CMD_STATE_XMIT_RESP:
217         case SCST_CMD_STATE_FINISHED:
218         case SCST_CMD_STATE_XMIT_WAIT:
219                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
220                         cmd->state, cmd, cmd->cdb[0]);
221                 sBUG();
222         }
223 #endif
224
225         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
226
227         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
228                            (cmd->tgt_dev == NULL));
229
230         TRACE_EXIT();
231         return;
232 }
233 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
234
235 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
236 {
237         int i, l;
238
239         TRACE_ENTRY();
240
241         scst_check_restore_sg_buff(cmd);
242         cmd->resp_data_len = resp_data_len;
243
244         if (resp_data_len == cmd->bufflen)
245                 goto out;
246
247         l = 0;
248         for (i = 0; i < cmd->sg_cnt; i++) {
249                 l += cmd->sg[i].length;
250                 if (l >= resp_data_len) {
251                         int left = resp_data_len - (l - cmd->sg[i].length);
252 #ifdef CONFIG_SCST_DEBUG
253                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
254                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
255                                 "left %d",
256                                 cmd, (long long unsigned int)cmd->tag,
257                                 resp_data_len, i,
258                                 cmd->sg[i].length, left);
259 #endif
260                         cmd->orig_sg_cnt = cmd->sg_cnt;
261                         cmd->orig_sg_entry = i;
262                         cmd->orig_entry_len = cmd->sg[i].length;
263                         cmd->sg_cnt = (left > 0) ? i+1 : i;
264                         cmd->sg[i].length = left;
265                         cmd->sg_buff_modified = 1;
266                         break;
267                 }
268         }
269
270 out:
271         TRACE_EXIT();
272         return;
273 }
274 EXPORT_SYMBOL(scst_set_resp_data_len);
275
276 /* Called under scst_mutex and suspended activity */
277 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
278 {
279         struct scst_device *dev;
280         int res = 0;
281         static int dev_num; /* protected by scst_mutex */
282
283         TRACE_ENTRY();
284
285         dev = kzalloc(sizeof(*dev), gfp_mask);
286         if (dev == NULL) {
287                 TRACE(TRACE_OUT_OF_MEM, "%s",
288                         "Allocation of scst_device failed");
289                 res = -ENOMEM;
290                 goto out;
291         }
292
293         dev->handler = &scst_null_devtype;
294         dev->p_cmd_lists = &scst_main_cmd_lists;
295         atomic_set(&dev->dev_cmd_count, 0);
296         atomic_set(&dev->write_cmd_count, 0);
297         scst_init_mem_lim(&dev->dev_mem_lim);
298         spin_lock_init(&dev->dev_lock);
299         atomic_set(&dev->on_dev_count, 0);
300         INIT_LIST_HEAD(&dev->blocked_cmd_list);
301         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
302         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
303         INIT_LIST_HEAD(&dev->threads_list);
304         init_waitqueue_head(&dev->on_dev_waitQ);
305         dev->dev_double_ua_possible = 1;
306         dev->dev_serialized = 1;
307         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
308         dev->dev_num = dev_num++;
309
310         *out_dev = dev;
311
312 out:
313         TRACE_EXIT_RES(res);
314         return res;
315 }
316
317 /* Called under scst_mutex and suspended activity */
318 void scst_free_device(struct scst_device *dev)
319 {
320         TRACE_ENTRY();
321
322 #ifdef CONFIG_SCST_EXTRACHECKS
323         if (!list_empty(&dev->dev_tgt_dev_list) ||
324             !list_empty(&dev->dev_acg_dev_list)) {
325                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
326                         "is not empty!", __func__);
327                 sBUG();
328         }
329 #endif
330
331         kfree(dev);
332
333         TRACE_EXIT();
334         return;
335 }
336
337 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
338 {
339         atomic_set(&mem_lim->alloced_pages, 0);
340         mem_lim->max_allowed_pages =
341                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
342 }
343 EXPORT_SYMBOL(scst_init_mem_lim);
344
345 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
346                                         struct scst_device *dev, uint64_t lun)
347 {
348         struct scst_acg_dev *res;
349
350         TRACE_ENTRY();
351
352 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
353         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
354 #else
355         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
356 #endif
357         if (res == NULL) {
358                 TRACE(TRACE_OUT_OF_MEM,
359                       "%s", "Allocation of scst_acg_dev failed");
360                 goto out;
361         }
362 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
363         memset(res, 0, sizeof(*res));
364 #endif
365
366         res->dev = dev;
367         res->acg = acg;
368         res->lun = lun;
369
370 out:
371         TRACE_EXIT_HRES(res);
372         return res;
373 }
374
375 /* The activity supposed to be suspended and scst_mutex held */
376 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
377 {
378         TRACE_ENTRY();
379
380         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
381                 acg_dev);
382         list_del(&acg_dev->acg_dev_list_entry);
383         list_del(&acg_dev->dev_acg_dev_list_entry);
384
385         kmem_cache_free(scst_acgd_cachep, acg_dev);
386
387         TRACE_EXIT();
388         return;
389 }
390
391 /* The activity supposed to be suspended and scst_mutex held */
392 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
393 {
394         struct scst_acg *acg;
395
396         TRACE_ENTRY();
397
398         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
399         if (acg == NULL) {
400                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
401                 goto out;
402         }
403
404         INIT_LIST_HEAD(&acg->acg_dev_list);
405         INIT_LIST_HEAD(&acg->acg_sess_list);
406         INIT_LIST_HEAD(&acg->acn_list);
407         acg->acg_name = acg_name;
408
409         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
410         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
411
412 out:
413         TRACE_EXIT_HRES(acg);
414         return acg;
415 }
416
417 /* The activity supposed to be suspended and scst_mutex held */
418 int scst_destroy_acg(struct scst_acg *acg)
419 {
420         struct scst_acn *n, *nn;
421         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
422         int res = 0;
423
424         TRACE_ENTRY();
425
426         if (!list_empty(&acg->acg_sess_list)) {
427                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
428                 res = -EBUSY;
429                 goto out;
430         }
431
432         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
433         list_del(&acg->scst_acg_list_entry);
434
435         /* Freeing acg_devs */
436         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
437                         acg_dev_list_entry) {
438                 struct scst_tgt_dev *tgt_dev, *tt;
439                 list_for_each_entry_safe(tgt_dev, tt,
440                                  &acg_dev->dev->dev_tgt_dev_list,
441                                  dev_tgt_dev_list_entry) {
442                         if (tgt_dev->acg_dev == acg_dev)
443                                 scst_free_tgt_dev(tgt_dev);
444                 }
445                 scst_free_acg_dev(acg_dev);
446         }
447
448         /* Freeing names */
449         list_for_each_entry_safe(n, nn, &acg->acn_list,
450                         acn_list_entry) {
451                 list_del(&n->acn_list_entry);
452                 kfree(n->name);
453                 kfree(n);
454         }
455         INIT_LIST_HEAD(&acg->acn_list);
456
457         kfree(acg);
458 out:
459         TRACE_EXIT_RES(res);
460         return res;
461 }
462
463 /*
464  * scst_mutex supposed to be held, there must not be parallel activity in this
465  * session.
466  */
467 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
468         struct scst_acg_dev *acg_dev)
469 {
470         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
471         struct scst_tgt_dev *tgt_dev;
472         struct scst_device *dev = acg_dev->dev;
473         struct list_head *sess_tgt_dev_list_head;
474         struct scst_tgt_template *vtt = sess->tgt->tgtt;
475         int rc, i;
476
477         TRACE_ENTRY();
478
479 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
480         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
481 #else
482         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
483 #endif
484         if (tgt_dev == NULL) {
485                 TRACE(TRACE_OUT_OF_MEM, "%s",
486                       "Allocation of scst_tgt_dev failed");
487                 goto out;
488         }
489 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
490         memset(tgt_dev, 0, sizeof(*tgt_dev));
491 #endif
492
493         tgt_dev->dev = dev;
494         tgt_dev->lun = acg_dev->lun;
495         tgt_dev->acg_dev = acg_dev;
496         tgt_dev->sess = sess;
497         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
498
499         scst_sgv_pool_use_norm(tgt_dev);
500
501         if (dev->scsi_dev != NULL) {
502                 ini_sg = dev->scsi_dev->host->sg_tablesize;
503                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
504                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
505                                 ENABLE_CLUSTERING);
506         } else {
507                 ini_sg = (1 << 15) /* infinite */;
508                 ini_unchecked_isa_dma = 0;
509                 ini_use_clustering = 0;
510         }
511         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
512
513         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
514             !sess->tgt->tgtt->no_clustering)
515                 scst_sgv_pool_use_norm_clust(tgt_dev);
516
517         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
518                 scst_sgv_pool_use_dma(tgt_dev);
519
520         if (dev->scsi_dev != NULL) {
521                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
522                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
523                       dev->scsi_dev->channel, dev->scsi_dev->id,
524                       dev->scsi_dev->lun,
525                       (long long unsigned int)tgt_dev->lun);
526         } else {
527                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
528                                dev->virt_name,
529                                (long long unsigned int)tgt_dev->lun);
530         }
531
532         spin_lock_init(&tgt_dev->tgt_dev_lock);
533         INIT_LIST_HEAD(&tgt_dev->UA_list);
534         spin_lock_init(&tgt_dev->thr_data_lock);
535         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
536         spin_lock_init(&tgt_dev->sn_lock);
537         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
538         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
539         tgt_dev->expected_sn = 1;
540         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
541         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
542         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
543                 atomic_set(&tgt_dev->sn_slots[i], 0);
544
545         if (dev->handler->parse_atomic &&
546             (sess->tgt->tgtt->preprocessing_done == NULL)) {
547                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
548                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
549                                 &tgt_dev->tgt_dev_flags);
550                 if (dev->handler->exec_atomic)
551                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
552                                 &tgt_dev->tgt_dev_flags);
553         }
554         if (dev->handler->exec_atomic) {
555                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
556                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
557                                 &tgt_dev->tgt_dev_flags);
558                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
559                                 &tgt_dev->tgt_dev_flags);
560                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
561                         &tgt_dev->tgt_dev_flags);
562         }
563         if (dev->handler->dev_done_atomic &&
564             sess->tgt->tgtt->xmit_response_atomic) {
565                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
566                         &tgt_dev->tgt_dev_flags);
567         }
568
569         spin_lock_bh(&scst_temp_UA_lock);
570         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
571                 SCST_LOAD_SENSE(scst_sense_reset_UA));
572         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
573         spin_unlock_bh(&scst_temp_UA_lock);
574
575         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
576
577         if (vtt->threads_num > 0) {
578                 rc = 0;
579                 if (dev->handler->threads_num > 0)
580                         rc = scst_add_dev_threads(dev, vtt->threads_num);
581                 else if (dev->handler->threads_num == 0)
582                         rc = scst_add_cmd_threads(vtt->threads_num);
583                 if (rc != 0)
584                         goto out_free;
585         }
586
587         if (dev->handler && dev->handler->attach_tgt) {
588                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
589                       tgt_dev);
590                 rc = dev->handler->attach_tgt(tgt_dev);
591                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
592                 if (rc != 0) {
593                         PRINT_ERROR("Device handler's %s attach_tgt() "
594                             "failed: %d", dev->handler->name, rc);
595                         goto out_thr_free;
596                 }
597         }
598
599         spin_lock_bh(&dev->dev_lock);
600         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
601         if (dev->dev_reserved)
602                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
603         spin_unlock_bh(&dev->dev_lock);
604
605         sess_tgt_dev_list_head =
606                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
607         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
608                       sess_tgt_dev_list_head);
609
610 out:
611         TRACE_EXIT();
612         return tgt_dev;
613
614 out_thr_free:
615         if (vtt->threads_num > 0) {
616                 if (dev->handler->threads_num > 0)
617                         scst_del_dev_threads(dev, vtt->threads_num);
618                 else if (dev->handler->threads_num == 0)
619                         scst_del_cmd_threads(vtt->threads_num);
620         }
621
622 out_free:
623         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
624         tgt_dev = NULL;
625         goto out;
626 }
627
628 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
629
630 /* No locks supposed to be held, scst_mutex - held */
631 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
632 {
633         TRACE_ENTRY();
634
635         scst_clear_reservation(tgt_dev);
636
637         /* With activity suspended the lock isn't needed, but let's be safe */
638         spin_lock_bh(&tgt_dev->tgt_dev_lock);
639         scst_free_all_UA(tgt_dev);
640         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
641
642         spin_lock_bh(&scst_temp_UA_lock);
643         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
644                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
645         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
646         spin_unlock_bh(&scst_temp_UA_lock);
647
648         TRACE_EXIT();
649         return;
650 }
651
652 /*
653  * scst_mutex supposed to be held, there must not be parallel activity in this
654  * session.
655  */
656 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
657 {
658         struct scst_device *dev = tgt_dev->dev;
659         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
660
661         TRACE_ENTRY();
662
663         tm_dbg_deinit_tgt_dev(tgt_dev);
664
665         spin_lock_bh(&dev->dev_lock);
666         list_del(&tgt_dev->dev_tgt_dev_list_entry);
667         spin_unlock_bh(&dev->dev_lock);
668
669         list_del(&tgt_dev->sess_tgt_dev_list_entry);
670
671         scst_clear_reservation(tgt_dev);
672         scst_free_all_UA(tgt_dev);
673
674         if (dev->handler && dev->handler->detach_tgt) {
675                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
676                       tgt_dev);
677                 dev->handler->detach_tgt(tgt_dev);
678                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
679         }
680
681         if (vtt->threads_num > 0) {
682                 if (dev->handler->threads_num > 0)
683                         scst_del_dev_threads(dev, vtt->threads_num);
684                 else if (dev->handler->threads_num == 0)
685                         scst_del_cmd_threads(vtt->threads_num);
686         }
687
688         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
689
690         TRACE_EXIT();
691         return;
692 }
693
694 /* scst_mutex supposed to be held */
695 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
696 {
697         int res = 0;
698         struct scst_acg_dev *acg_dev;
699         struct scst_tgt_dev *tgt_dev;
700
701         TRACE_ENTRY();
702
703         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
704                         acg_dev_list_entry) {
705                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
706                 if (tgt_dev == NULL) {
707                         res = -ENOMEM;
708                         goto out_free;
709                 }
710         }
711
712 out:
713         TRACE_EXIT();
714         return res;
715
716 out_free:
717         scst_sess_free_tgt_devs(sess);
718         goto out;
719 }
720
721 /*
722  * scst_mutex supposed to be held, there must not be parallel activity in this
723  * session.
724  */
725 void scst_sess_free_tgt_devs(struct scst_session *sess)
726 {
727         int i;
728         struct scst_tgt_dev *tgt_dev, *t;
729
730         TRACE_ENTRY();
731
732         /* The session is going down, no users, so no locks */
733         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
734                 struct list_head *sess_tgt_dev_list_head =
735                         &sess->sess_tgt_dev_list_hash[i];
736                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
737                                 sess_tgt_dev_list_entry) {
738                         scst_free_tgt_dev(tgt_dev);
739                 }
740                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
741         }
742
743         TRACE_EXIT();
744         return;
745 }
746
747 /* The activity supposed to be suspended and scst_mutex held */
748 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
749                      uint64_t lun, int read_only)
750 {
751         int res = 0;
752         struct scst_acg_dev *acg_dev;
753         struct scst_tgt_dev *tgt_dev;
754         struct scst_session *sess;
755         LIST_HEAD(tmp_tgt_dev_list);
756
757         TRACE_ENTRY();
758
759         INIT_LIST_HEAD(&tmp_tgt_dev_list);
760
761 #ifdef CONFIG_SCST_EXTRACHECKS
762         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
763                 if (acg_dev->dev == dev) {
764                         PRINT_ERROR("Device is already in group %s",
765                                 acg->acg_name);
766                         res = -EINVAL;
767                         goto out;
768                 }
769         }
770 #endif
771
772         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
773         if (acg_dev == NULL) {
774                 res = -ENOMEM;
775                 goto out;
776         }
777         acg_dev->rd_only_flag = read_only;
778
779         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
780                 acg_dev);
781         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
782         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
783
784         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
785                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
786                 if (tgt_dev == NULL) {
787                         res = -ENOMEM;
788                         goto out_free;
789                 }
790                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
791                               &tmp_tgt_dev_list);
792         }
793
794 out:
795         if (res == 0) {
796                 if (dev->virt_name != NULL) {
797                         PRINT_INFO("Added device %s to group %s (LUN %lld, "
798                                 "rd_only %d)", dev->virt_name, acg->acg_name,
799                                 (long long unsigned int)lun,
800                                 read_only);
801                 } else {
802                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
803                                 "%lld, rd_only %d)",
804                                 dev->scsi_dev->host->host_no,
805                                 dev->scsi_dev->channel, dev->scsi_dev->id,
806                                 dev->scsi_dev->lun, acg->acg_name,
807                                 (long long unsigned int)lun,
808                                 read_only);
809                 }
810         }
811
812         TRACE_EXIT_RES(res);
813         return res;
814
815 out_free:
816         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
817                          extra_tgt_dev_list_entry) {
818                 scst_free_tgt_dev(tgt_dev);
819         }
820         scst_free_acg_dev(acg_dev);
821         goto out;
822 }
823
824 /* The activity supposed to be suspended and scst_mutex held */
825 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
826 {
827         int res = 0;
828         struct scst_acg_dev *acg_dev = NULL, *a;
829         struct scst_tgt_dev *tgt_dev, *tt;
830
831         TRACE_ENTRY();
832
833         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
834                 if (a->dev == dev) {
835                         acg_dev = a;
836                         break;
837                 }
838         }
839
840         if (acg_dev == NULL) {
841                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
842                 res = -EINVAL;
843                 goto out;
844         }
845
846         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
847                          dev_tgt_dev_list_entry) {
848                 if (tgt_dev->acg_dev == acg_dev)
849                         scst_free_tgt_dev(tgt_dev);
850         }
851         scst_free_acg_dev(acg_dev);
852
853 out:
854         if (res == 0) {
855                 if (dev->virt_name != NULL) {
856                         PRINT_INFO("Removed device %s from group %s",
857                                 dev->virt_name, acg->acg_name);
858                 } else {
859                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
860                                 dev->scsi_dev->host->host_no,
861                                 dev->scsi_dev->channel, dev->scsi_dev->id,
862                                 dev->scsi_dev->lun, acg->acg_name);
863                 }
864         }
865
866         TRACE_EXIT_RES(res);
867         return res;
868 }
869
870 /* scst_mutex supposed to be held */
871 int scst_acg_add_name(struct scst_acg *acg, const char *name)
872 {
873         int res = 0;
874         struct scst_acn *n;
875         int len;
876         char *nm;
877
878         TRACE_ENTRY();
879
880         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
881         {
882                 if (strcmp(n->name, name) == 0) {
883                         PRINT_ERROR("Name %s already exists in group %s",
884                                 name, acg->acg_name);
885                         res = -EINVAL;
886                         goto out;
887                 }
888         }
889
890         n = kmalloc(sizeof(*n), GFP_KERNEL);
891         if (n == NULL) {
892                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
893                 res = -ENOMEM;
894                 goto out;
895         }
896
897         len = strlen(name);
898         nm = kmalloc(len + 1, GFP_KERNEL);
899         if (nm == NULL) {
900                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
901                 res = -ENOMEM;
902                 goto out_free;
903         }
904
905         strcpy(nm, name);
906         n->name = nm;
907
908         list_add_tail(&n->acn_list_entry, &acg->acn_list);
909
910 out:
911         if (res == 0)
912                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
913
914         TRACE_EXIT_RES(res);
915         return res;
916
917 out_free:
918         kfree(n);
919         goto out;
920 }
921
922 /* scst_mutex supposed to be held */
923 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
924 {
925         int res = -EINVAL;
926         struct scst_acn *n;
927
928         TRACE_ENTRY();
929
930         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
931         {
932                 if (strcmp(n->name, name) == 0) {
933                         list_del(&n->acn_list_entry);
934                         kfree(n->name);
935                         kfree(n);
936                         res = 0;
937                         break;
938                 }
939         }
940
941         if (res == 0) {
942                 PRINT_INFO("Removed name %s from group %s", name,
943                         acg->acg_name);
944         } else {
945                 PRINT_ERROR("Unable to find name %s in group %s", name,
946                         acg->acg_name);
947         }
948
949         TRACE_EXIT_RES(res);
950         return res;
951 }
952
953 struct scst_cmd *scst_create_prepare_internal_cmd(
954         struct scst_cmd *orig_cmd, int bufsize)
955 {
956         struct scst_cmd *res;
957         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
958
959         TRACE_ENTRY();
960
961         res = scst_alloc_cmd(gfp_mask);
962         if (res == NULL)
963                 goto out;
964
965         res->cmd_lists = orig_cmd->cmd_lists;
966         res->sess = orig_cmd->sess;
967         res->atomic = scst_cmd_atomic(orig_cmd);
968         res->internal = 1;
969         res->tgtt = orig_cmd->tgtt;
970         res->tgt = orig_cmd->tgt;
971         res->dev = orig_cmd->dev;
972         res->tgt_dev = orig_cmd->tgt_dev;
973         res->lun = orig_cmd->lun;
974         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
975         res->data_direction = SCST_DATA_UNKNOWN;
976         res->orig_cmd = orig_cmd;
977         res->bufflen = bufsize;
978
979         res->state = SCST_CMD_STATE_PRE_PARSE;
980
981 out:
982         TRACE_EXIT_HRES((unsigned long)res);
983         return res;
984 }
985
986 void scst_free_internal_cmd(struct scst_cmd *cmd)
987 {
988         TRACE_ENTRY();
989
990         __scst_cmd_put(cmd);
991
992         TRACE_EXIT();
993         return;
994 }
995
996 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
997 {
998         int res = 0;
999 #define sbuf_size 252
1000         static const uint8_t request_sense[6] =
1001             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1002         struct scst_cmd *rs_cmd;
1003
1004         TRACE_ENTRY();
1005
1006         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1007         if (rs_cmd == NULL)
1008                 goto out_error;
1009
1010         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1011         rs_cmd->cdb_len = sizeof(request_sense);
1012         rs_cmd->data_direction = SCST_DATA_READ;
1013         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1014         rs_cmd->expected_transfer_len = sbuf_size;
1015         rs_cmd->expected_values_set = 1;
1016
1017         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1018                 "cmd list ", rs_cmd);
1019         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1020         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1021         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1022         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1023
1024 out:
1025         TRACE_EXIT_RES(res);
1026         return res;
1027
1028 out_error:
1029         res = -1;
1030         goto out;
1031 #undef sbuf_size
1032 }
1033
1034 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1035 {
1036         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1037         uint8_t *buf;
1038         int len;
1039
1040         TRACE_ENTRY();
1041
1042         sBUG_ON(orig_cmd == NULL);
1043
1044         len = scst_get_buf_first(req_cmd, &buf);
1045
1046         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1047             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1048                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1049                         buf, len);
1050                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1051                         len);
1052         } else {
1053                 PRINT_ERROR("%s", "Unable to get the sense via "
1054                         "REQUEST SENSE, returning HARDWARE ERROR");
1055                 scst_set_cmd_error(orig_cmd,
1056                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1057         }
1058
1059         if (len > 0)
1060                 scst_put_buf(req_cmd, buf);
1061
1062         scst_free_internal_cmd(req_cmd);
1063
1064         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1065         return orig_cmd;
1066 }
1067
1068 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1069 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1070 {
1071         struct scsi_request *req;
1072
1073         TRACE_ENTRY();
1074
1075         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1076                 if (req) {
1077                         if (req->sr_bufflen)
1078                                 kfree(req->sr_buffer);
1079                         scsi_release_request(req);
1080                 }
1081         }
1082
1083         TRACE_EXIT();
1084         return;
1085 }
1086
1087 static void scst_send_release(struct scst_device *dev)
1088 {
1089         struct scsi_request *req;
1090         struct scsi_device *scsi_dev;
1091         uint8_t cdb[6];
1092
1093         TRACE_ENTRY();
1094
1095         if (dev->scsi_dev == NULL)
1096                 goto out;
1097
1098         scsi_dev = dev->scsi_dev;
1099
1100         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1101         if (req == NULL) {
1102                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1103                             "to RELEASE device %d:%d:%d:%d",
1104                             scsi_dev->host->host_no, scsi_dev->channel,
1105                             scsi_dev->id, scsi_dev->lun);
1106                 goto out;
1107         }
1108
1109         memset(cdb, 0, sizeof(cdb));
1110         cdb[0] = RELEASE;
1111         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1112             ((scsi_dev->lun << 5) & 0xe0) : 0;
1113         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1114         req->sr_cmd_len = sizeof(cdb);
1115         req->sr_data_direction = SCST_DATA_NONE;
1116         req->sr_use_sg = 0;
1117         req->sr_bufflen = 0;
1118         req->sr_buffer = NULL;
1119         req->sr_request->rq_disk = dev->rq_disk;
1120         req->sr_sense_buffer[0] = 0;
1121
1122         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1123                 "mid-level", req);
1124         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1125                     scst_req_done, 15, 3);
1126
1127 out:
1128         TRACE_EXIT();
1129         return;
1130 }
1131 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1132 static void scst_send_release(struct scst_device *dev)
1133 {
1134         struct scsi_device *scsi_dev;
1135         unsigned char cdb[6];
1136         unsigned char *sense;
1137         int rc, i;
1138
1139         TRACE_ENTRY();
1140
1141         if (dev->scsi_dev == NULL)
1142                 goto out;
1143
1144         /* We can't afford missing RELEASE due to memory shortage */
1145         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1146
1147         scsi_dev = dev->scsi_dev;
1148
1149         for (i = 0; i < 5; i++) {
1150                 memset(cdb, 0, sizeof(cdb));
1151                 cdb[0] = RELEASE;
1152                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1153                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1154
1155                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1156
1157                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1158                         "SCSI mid-level");
1159                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1160                                 sense, 15, 0, 0);
1161                 TRACE_DBG("MODE_SENSE done: %x", rc);
1162
1163                 if (scsi_status_is_good(rc)) {
1164                         break;
1165                 } else {
1166                         PRINT_ERROR("RELEASE failed: %d", rc);
1167                         PRINT_BUFFER("RELEASE sense", sense,
1168                                 SCST_SENSE_BUFFERSIZE);
1169                         scst_check_internal_sense(dev, rc,
1170                                         sense, SCST_SENSE_BUFFERSIZE);
1171                 }
1172         }
1173
1174         kfree(sense);
1175
1176 out:
1177         TRACE_EXIT();
1178         return;
1179 }
1180 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1181
1182 /* scst_mutex supposed to be held */
1183 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1184 {
1185         struct scst_device *dev = tgt_dev->dev;
1186         int release = 0;
1187
1188         TRACE_ENTRY();
1189
1190         spin_lock_bh(&dev->dev_lock);
1191         if (dev->dev_reserved &&
1192             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1193                 /* This is one who holds the reservation */
1194                 struct scst_tgt_dev *tgt_dev_tmp;
1195                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1196                                     dev_tgt_dev_list_entry) {
1197                         clear_bit(SCST_TGT_DEV_RESERVED,
1198                                     &tgt_dev_tmp->tgt_dev_flags);
1199                 }
1200                 dev->dev_reserved = 0;
1201                 release = 1;
1202         }
1203         spin_unlock_bh(&dev->dev_lock);
1204
1205         if (release)
1206                 scst_send_release(dev);
1207
1208         TRACE_EXIT();
1209         return;
1210 }
1211
1212 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1213         const char *initiator_name)
1214 {
1215         struct scst_session *sess;
1216         int i;
1217         int len;
1218         char *nm;
1219
1220         TRACE_ENTRY();
1221
1222 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1223         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1224 #else
1225         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1226 #endif
1227         if (sess == NULL) {
1228                 TRACE(TRACE_OUT_OF_MEM, "%s",
1229                       "Allocation of scst_session failed");
1230                 goto out;
1231         }
1232 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1233         memset(sess, 0, sizeof(*sess));
1234 #endif
1235
1236         sess->init_phase = SCST_SESS_IPH_INITING;
1237         sess->shut_phase = SCST_SESS_SPH_READY;
1238         atomic_set(&sess->refcnt, 0);
1239         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1240                 struct list_head *sess_tgt_dev_list_head =
1241                          &sess->sess_tgt_dev_list_hash[i];
1242                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1243         }
1244         spin_lock_init(&sess->sess_list_lock);
1245         INIT_LIST_HEAD(&sess->search_cmd_list);
1246         sess->tgt = tgt;
1247         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1248         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1249
1250 #ifdef CONFIG_SCST_MEASURE_LATENCY
1251         spin_lock_init(&sess->meas_lock);
1252 #endif
1253
1254         len = strlen(initiator_name);
1255         nm = kmalloc(len + 1, gfp_mask);
1256         if (nm == NULL) {
1257                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1258                 goto out_free;
1259         }
1260
1261         strcpy(nm, initiator_name);
1262         sess->initiator_name = nm;
1263
1264 out:
1265         TRACE_EXIT();
1266         return sess;
1267
1268 out_free:
1269         kmem_cache_free(scst_sess_cachep, sess);
1270         sess = NULL;
1271         goto out;
1272 }
1273
1274 void scst_free_session(struct scst_session *sess)
1275 {
1276         TRACE_ENTRY();
1277
1278         mutex_lock(&scst_mutex);
1279
1280         TRACE_DBG("Removing sess %p from the list", sess);
1281         list_del(&sess->sess_list_entry);
1282         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1283         list_del(&sess->acg_sess_list_entry);
1284
1285         scst_sess_free_tgt_devs(sess);
1286
1287         wake_up_all(&sess->tgt->unreg_waitQ);
1288
1289         mutex_unlock(&scst_mutex);
1290
1291         kfree(sess->initiator_name);
1292         kmem_cache_free(scst_sess_cachep, sess);
1293
1294         TRACE_EXIT();
1295         return;
1296 }
1297
1298 void scst_free_session_callback(struct scst_session *sess)
1299 {
1300         struct completion *c;
1301
1302         TRACE_ENTRY();
1303
1304         TRACE_DBG("Freeing session %p", sess);
1305
1306         c = sess->shutdown_compl;
1307
1308         if (sess->unreg_done_fn) {
1309                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1310                 sess->unreg_done_fn(sess);
1311                 TRACE_DBG("%s", "unreg_done_fn() returned");
1312         }
1313         scst_free_session(sess);
1314
1315         if (c)
1316                 complete_all(c);
1317
1318         TRACE_EXIT();
1319         return;
1320 }
1321
1322 void scst_sched_session_free(struct scst_session *sess)
1323 {
1324         unsigned long flags;
1325
1326         TRACE_ENTRY();
1327
1328         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1329                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1330                         "shut phase %lx", sess, sess->shut_phase);
1331                 sBUG();
1332         }
1333
1334         spin_lock_irqsave(&scst_mgmt_lock, flags);
1335         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1336         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1337         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1338
1339         wake_up(&scst_mgmt_waitQ);
1340
1341         TRACE_EXIT();
1342         return;
1343 }
1344
1345 void scst_cmd_get(struct scst_cmd *cmd)
1346 {
1347         __scst_cmd_get(cmd);
1348 }
1349 EXPORT_SYMBOL(scst_cmd_get);
1350
1351 void scst_cmd_put(struct scst_cmd *cmd)
1352 {
1353         __scst_cmd_put(cmd);
1354 }
1355 EXPORT_SYMBOL(scst_cmd_put);
1356
1357 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1358 {
1359         struct scst_cmd *cmd;
1360
1361         TRACE_ENTRY();
1362
1363 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1364         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1365 #else
1366         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1367 #endif
1368         if (cmd == NULL) {
1369                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1370                 goto out;
1371         }
1372 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1373         memset(cmd, 0, sizeof(*cmd));
1374 #endif
1375
1376         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1377         cmd->start_time = jiffies;
1378         atomic_set(&cmd->cmd_ref, 1);
1379         cmd->cmd_lists = &scst_main_cmd_lists;
1380         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1381         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1382         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1383         cmd->retries = 0;
1384         cmd->data_len = -1;
1385         cmd->is_send_status = 1;
1386         cmd->resp_data_len = -1;
1387
1388         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1389         cmd->dbl_ua_orig_resp_data_len = -1;
1390
1391 out:
1392         TRACE_EXIT();
1393         return cmd;
1394 }
1395
1396 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1397 {
1398         scst_sess_put(cmd->sess);
1399
1400         /*
1401          * At this point tgt_dev can be dead, but the pointer remains non-NULL
1402          */
1403         if (likely(cmd->tgt_dev != NULL))
1404                 __scst_put();
1405
1406         scst_destroy_cmd(cmd);
1407         return;
1408 }
1409
1410 /* No locks supposed to be held */
1411 void scst_free_cmd(struct scst_cmd *cmd)
1412 {
1413         int destroy = 1;
1414
1415         TRACE_ENTRY();
1416
1417         TRACE_DBG("Freeing cmd %p (tag %llu)",
1418                   cmd, (long long unsigned int)cmd->tag);
1419
1420         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1421                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1422                         cmd, atomic_read(&scst_cmd_count));
1423         }
1424
1425         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1426                 cmd->dec_on_dev_needed);
1427
1428 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1429 #if defined(CONFIG_SCST_EXTRACHECKS)
1430         if (cmd->scsi_req) {
1431                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1432                         "scsi_req!");
1433                 scst_release_request(cmd);
1434         }
1435 #endif
1436 #endif
1437
1438         /*
1439          * Target driver can already free sg buffer before calling
1440          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
1441          */
1442         if (!cmd->tgt_data_buf_alloced)
1443                 scst_check_restore_sg_buff(cmd);
1444
1445         if (unlikely(cmd->internal)) {
1446                 if (cmd->bufflen > 0)
1447                         scst_release_space(cmd);
1448                 scst_destroy_cmd(cmd);
1449                 goto out;
1450         }
1451
1452         if (cmd->tgtt->on_free_cmd != NULL) {
1453                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1454                 cmd->tgtt->on_free_cmd(cmd);
1455                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1456         }
1457
1458         if (likely(cmd->dev != NULL)) {
1459                 struct scst_dev_type *handler = cmd->dev->handler;
1460                 if (handler->on_free_cmd != NULL) {
1461                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1462                               handler->name, cmd);
1463                         handler->on_free_cmd(cmd);
1464                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1465                                 handler->name);
1466                 }
1467         }
1468
1469         scst_release_space(cmd);
1470
1471         if (unlikely(cmd->sense != NULL)) {
1472                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1473                 mempool_free(cmd->sense, scst_sense_mempool);
1474                 cmd->sense = NULL;
1475         }
1476
1477         if (likely(cmd->tgt_dev != NULL)) {
1478 #ifdef CONFIG_SCST_EXTRACHECKS
1479                 if (unlikely(!cmd->sent_for_exec)) {
1480                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1481                             "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1482                             cmd, cmd->cdb[0], cmd->tgtt->name,
1483                             (long long unsigned int)cmd->lun,
1484                             cmd->sn, cmd->tgt_dev->expected_sn);
1485                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1486                 }
1487 #endif
1488
1489                 if (unlikely(cmd->out_of_sn)) {
1490                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1491                                 "destroy=%d", cmd,
1492                                 (long long unsigned int)cmd->tag,
1493                                 cmd->sn, destroy);
1494                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1495                                         &cmd->cmd_flags);
1496                 }
1497         }
1498
1499         if (likely(destroy))
1500                 scst_destroy_put_cmd(cmd);
1501
1502 out:
1503         TRACE_EXIT();
1504         return;
1505 }
1506
1507 /* No locks supposed to be held. */
1508 void scst_check_retries(struct scst_tgt *tgt)
1509 {
1510         int need_wake_up = 0;
1511
1512         TRACE_ENTRY();
1513
1514         /*
1515          * We don't worry about overflow of finished_cmds, because we check
1516          * only for its change
1517          */
1518         atomic_inc(&tgt->finished_cmds);
1519         smp_mb__after_atomic_inc();
1520         if (unlikely(tgt->retry_cmds > 0)) {
1521                 struct scst_cmd *c, *tc;
1522                 unsigned long flags;
1523
1524                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1525                       tgt->retry_cmds);
1526
1527                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1528                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1529                                 cmd_list_entry)
1530                 {
1531                         tgt->retry_cmds--;
1532
1533                         TRACE_RETRY("Moving retry cmd %p to head of active "
1534                                 "cmd list (retry_cmds left %d)",
1535                                 c, tgt->retry_cmds);
1536                         spin_lock(&c->cmd_lists->cmd_list_lock);
1537                         list_move(&c->cmd_list_entry,
1538                                   &c->cmd_lists->active_cmd_list);
1539                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1540                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1541
1542                         need_wake_up++;
1543                         if (need_wake_up >= 2) /* "slow start" */
1544                                 break;
1545                 }
1546                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1547         }
1548
1549         TRACE_EXIT();
1550         return;
1551 }
1552
1553 void scst_tgt_retry_timer_fn(unsigned long arg)
1554 {
1555         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1556         unsigned long flags;
1557
1558         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1559
1560         spin_lock_irqsave(&tgt->tgt_lock, flags);
1561         tgt->retry_timer_active = 0;
1562         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1563
1564         scst_check_retries(tgt);
1565
1566         TRACE_EXIT();
1567         return;
1568 }
1569
1570 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
1571 {
1572         struct scst_mgmt_cmd *mcmd;
1573
1574         TRACE_ENTRY();
1575
1576         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1577         if (mcmd == NULL) {
1578                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1579                         "failed, some commands and their data could leak");
1580                 goto out;
1581         }
1582         memset(mcmd, 0, sizeof(*mcmd));
1583
1584 out:
1585         TRACE_EXIT();
1586         return mcmd;
1587 }
1588
1589 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1590 {
1591         unsigned long flags;
1592
1593         TRACE_ENTRY();
1594
1595         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1596         atomic_dec(&mcmd->sess->sess_cmd_count);
1597         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1598
1599         scst_sess_put(mcmd->sess);
1600
1601         if (mcmd->mcmd_tgt_dev != NULL)
1602                 __scst_put();
1603
1604         mempool_free(mcmd, scst_mgmt_mempool);
1605
1606         TRACE_EXIT();
1607         return;
1608 }
1609
1610 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1611 int scst_alloc_request(struct scst_cmd *cmd)
1612 {
1613         int res = 0;
1614         struct scsi_request *req;
1615         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1616
1617         TRACE_ENTRY();
1618
1619         /* cmd->dev->scsi_dev must be non-NULL here */
1620         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1621         if (req == NULL) {
1622                 TRACE(TRACE_OUT_OF_MEM, "%s",
1623                       "Allocation of scsi_request failed");
1624                 res = -ENOMEM;
1625                 goto out;
1626         }
1627
1628         cmd->scsi_req = req;
1629
1630         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1631         req->sr_cmd_len = cmd->cdb_len;
1632         req->sr_data_direction = cmd->data_direction;
1633         req->sr_use_sg = cmd->sg_cnt;
1634         req->sr_bufflen = cmd->bufflen;
1635         req->sr_buffer = cmd->sg;
1636         req->sr_request->rq_disk = cmd->dev->rq_disk;
1637         req->sr_sense_buffer[0] = 0;
1638
1639         cmd->scsi_req->upper_private_data = cmd;
1640
1641 out:
1642         TRACE_EXIT();
1643         return res;
1644 }
1645
1646 void scst_release_request(struct scst_cmd *cmd)
1647 {
1648         scsi_release_request(cmd->scsi_req);
1649         cmd->scsi_req = NULL;
1650 }
1651 #endif
1652
1653 int scst_alloc_space(struct scst_cmd *cmd)
1654 {
1655         gfp_t gfp_mask;
1656         int res = -ENOMEM;
1657         int atomic = scst_cmd_atomic(cmd);
1658         int flags;
1659         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1660
1661         TRACE_ENTRY();
1662
1663         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1664
1665         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1666         if (cmd->no_sgv)
1667                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1668
1669         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1670                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1671         if (cmd->sg == NULL)
1672                 goto out;
1673
1674         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1675                 static int ll;
1676                 if (ll < 10) {
1677                         PRINT_INFO("Unable to complete command due to "
1678                                 "SG IO count limitation (requested %d, "
1679                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1680                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1681                         ll++;
1682                 }
1683                 goto out_sg_free;
1684         }
1685
1686         res = 0;
1687
1688 out:
1689         TRACE_EXIT();
1690         return res;
1691
1692 out_sg_free:
1693         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1694         cmd->sgv = NULL;
1695         cmd->sg = NULL;
1696         cmd->sg_cnt = 0;
1697         goto out;
1698 }
1699
1700 void scst_release_space(struct scst_cmd *cmd)
1701 {
1702         TRACE_ENTRY();
1703
1704         if (cmd->sgv == NULL)
1705                 goto out;
1706
1707         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
1708                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
1709                 goto out;
1710         }
1711
1712         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1713
1714         cmd->sgv = NULL;
1715         cmd->sg_cnt = 0;
1716         cmd->sg = NULL;
1717         cmd->bufflen = 0;
1718         cmd->data_len = 0;
1719
1720 out:
1721         TRACE_EXIT();
1722         return;
1723 }
1724
1725 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
1726 {
1727         struct scatterlist *src_sg, *dst_sg;
1728         unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
1729         struct page *src, *dst;
1730         unsigned int s, d, to_copy;
1731
1732         TRACE_ENTRY();
1733
1734         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
1735                 src_sg = cmd->tgt_sg;
1736                 src_sg_cnt = cmd->tgt_sg_cnt;
1737                 dst_sg = cmd->sg;
1738                 to_copy = cmd->bufflen;
1739         } else {
1740                 src_sg = cmd->sg;
1741                 src_sg_cnt = cmd->sg_cnt;
1742                 dst_sg = cmd->tgt_sg;
1743                 to_copy = cmd->resp_data_len;
1744         }
1745
1746         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
1747                 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
1748                 to_copy);
1749
1750         dst = sg_page(dst_sg);
1751         dst_len = dst_sg->length;
1752         dst_offs = dst_sg->offset;
1753
1754         s = 0;
1755         d = 0;
1756         src_offs = 0;
1757         while (s < src_sg_cnt) {
1758                 src = sg_page(&src_sg[s]);
1759                 src_len = src_sg[s].length;
1760                 src_offs += src_sg[s].offset;
1761
1762                 do {
1763                         unsigned int n;
1764
1765                         /*
1766                          * Himem pages are not allowed here, see the
1767                          * corresponding #warning in scst_main.c. Correct
1768                          * your target driver or dev handler to not alloc
1769                          * such pages!
1770                          */
1771                         EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
1772                                            PageHighMem(src));
1773
1774                         TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
1775                                 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
1776                                 cmd, to_copy, src, src_len, src_offs, dst,
1777                                 dst_len, dst_offs);
1778
1779                         if ((src_offs == 0) && (dst_offs == 0) &&
1780                             (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
1781                                 copy_page(page_address(dst), page_address(src));
1782                                 n = PAGE_SIZE;
1783                         } else {
1784                                 n = min(PAGE_SIZE - dst_offs,
1785                                         PAGE_SIZE - src_offs);
1786                                 n = min(n, src_len);
1787                                 n = min(n, dst_len);
1788                                 memcpy(page_address(dst) + dst_offs,
1789                                        page_address(src) + src_offs, n);
1790                                 dst_offs -= min(n, dst_offs);
1791                                 src_offs -= min(n, src_offs);
1792                         }
1793
1794                         TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
1795
1796                         to_copy -= n;
1797                         if (to_copy <= 0)
1798                                 goto out;
1799
1800                         src_len -= n;
1801                         dst_len -= n;
1802                         if (dst_len == 0) {
1803                                 d++;
1804                                 dst = sg_page(&dst_sg[d]);
1805                                 dst_len = dst_sg[d].length;
1806                                 dst_offs += dst_sg[d].offset;
1807                         }
1808                 } while (src_len > 0);
1809
1810                 s++;
1811         }
1812
1813 out:
1814         TRACE_EXIT();
1815         return;
1816 }
1817
1818 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1819
1820 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1821 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1822
1823 int scst_get_cdb_len(const uint8_t *cdb)
1824 {
1825         return SCST_GET_CDB_LEN(cdb[0]);
1826 }
1827
1828 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1829
1830 /* for special commands */
1831 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1832 {
1833         cmd->bufflen = 6;
1834         return 0;
1835 }
1836
1837 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1838 {
1839         cmd->bufflen = READ_CAP_LEN;
1840         return 0;
1841 }
1842
1843 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1844 {
1845         cmd->bufflen = 1;
1846         return 0;
1847 }
1848
1849 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1850 {
1851         uint8_t *p = (uint8_t *)cmd->cdb + off;
1852         int res = 0;
1853
1854         cmd->bufflen = 0;
1855         cmd->bufflen |= ((u32)p[0]) << 8;
1856         cmd->bufflen |= ((u32)p[1]);
1857
1858         switch (cmd->cdb[1] & 0x1f) {
1859         case 0:
1860         case 1:
1861         case 6:
1862                 if (cmd->bufflen != 0) {
1863                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1864                                 "allocation length for service action %x",
1865                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1866                         goto out_inval;
1867                 }
1868                 break;
1869         }
1870
1871         switch (cmd->cdb[1] & 0x1f) {
1872         case 0:
1873         case 1:
1874                 cmd->bufflen = 20;
1875                 break;
1876         case 6:
1877                 cmd->bufflen = 32;
1878                 break;
1879         case 8:
1880                 cmd->bufflen = max(28, cmd->bufflen);
1881                 break;
1882         default:
1883                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1884                         cmd->cdb[1] & 0x1f);
1885                 goto out_inval;
1886         }
1887
1888 out:
1889         return res;
1890
1891 out_inval:
1892         scst_set_cmd_error(cmd,
1893                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1894         res = 1;
1895         goto out;
1896 }
1897
1898 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1899 {
1900         cmd->bufflen = (u32)cmd->cdb[off];
1901         return 0;
1902 }
1903
1904 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1905 {
1906         cmd->bufflen = (u32)cmd->cdb[off];
1907         if (cmd->bufflen == 0)
1908                 cmd->bufflen = 256;
1909         return 0;
1910 }
1911
1912 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1913 {
1914         const uint8_t *p = cmd->cdb + off;
1915
1916         cmd->bufflen = 0;
1917         cmd->bufflen |= ((u32)p[0]) << 8;
1918         cmd->bufflen |= ((u32)p[1]);
1919
1920         return 0;
1921 }
1922
1923 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1924 {
1925         const uint8_t *p = cmd->cdb + off;
1926
1927         cmd->bufflen = 0;
1928         cmd->bufflen |= ((u32)p[0]) << 16;
1929         cmd->bufflen |= ((u32)p[1]) << 8;
1930         cmd->bufflen |= ((u32)p[2]);
1931
1932         return 0;
1933 }
1934
1935 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1936 {
1937         const uint8_t *p = cmd->cdb + off;
1938
1939         cmd->bufflen = 0;
1940         cmd->bufflen |= ((u32)p[0]) << 24;
1941         cmd->bufflen |= ((u32)p[1]) << 16;
1942         cmd->bufflen |= ((u32)p[2]) << 8;
1943         cmd->bufflen |= ((u32)p[3]);
1944
1945         return 0;
1946 }
1947
1948 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1949 {
1950         cmd->bufflen = 0;
1951         return 0;
1952 }
1953
1954 int scst_get_cdb_info(struct scst_cmd *cmd)
1955 {
1956         int dev_type = cmd->dev->handler->type;
1957         int i, res = 0;
1958         uint8_t op;
1959         const struct scst_sdbops *ptr = NULL;
1960
1961         TRACE_ENTRY();
1962
1963         op = cmd->cdb[0];       /* get clear opcode */
1964
1965         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1966                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1967                 dev_type);
1968
1969         i = scst_scsi_op_list[op];
1970         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1971                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1972                         ptr = &scst_scsi_op_table[i];
1973                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1974                               ptr->ops, ptr->devkey[0], /* disk     */
1975                               ptr->devkey[1],   /* tape     */
1976                               ptr->devkey[2],   /* printer */
1977                               ptr->devkey[3],   /* cpu      */
1978                               ptr->devkey[4],   /* cdr      */
1979                               ptr->devkey[5],   /* cdrom    */
1980                               ptr->devkey[6],   /* scanner */
1981                               ptr->devkey[7],   /* worm     */
1982                               ptr->devkey[8],   /* changer */
1983                               ptr->devkey[9],   /* commdev */
1984                               ptr->op_name);
1985                         TRACE_DBG("direction=%d flags=%d off=%d",
1986                               ptr->direction,
1987                               ptr->flags,
1988                               ptr->off);
1989                         break;
1990                 }
1991                 i++;
1992         }
1993
1994         if (ptr == NULL) {
1995                 /* opcode not found or now not used !!! */
1996                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1997                       dev_type);
1998                 res = -1;
1999                 cmd->op_flags = SCST_INFO_INVALID;
2000                 goto out;
2001         }
2002
2003         cmd->cdb_len = SCST_GET_CDB_LEN(op);
2004         cmd->op_name = ptr->op_name;
2005         cmd->data_direction = ptr->direction;
2006         cmd->op_flags = ptr->flags;
2007         res = (*ptr->get_trans_len)(cmd, ptr->off);
2008
2009         if (cmd->bufflen == 0) {
2010                 /*
2011                  * According to SPC bufflen 0 for data transfer commands isn't
2012                  * an error, so we need to fix the transfer direction.
2013                  */
2014                 cmd->data_direction = SCST_DATA_NONE;
2015         }
2016
2017 out:
2018         TRACE_EXIT();
2019         return res;
2020 }
2021 EXPORT_SYMBOL(scst_get_cdb_info);
2022
2023 /*
2024  * Routine to extract a lun number from an 8-byte LUN structure
2025  * in network byte order (BE).
2026  * (see SAM-2, Section 4.12.3 page 40)
2027  * Supports 2 types of lun unpacking: peripheral and logical unit.
2028  */
2029 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2030 {
2031         uint64_t res = NO_SUCH_LUN;
2032         int address_method;
2033
2034         TRACE_ENTRY();
2035
2036         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2037
2038         if (unlikely(len < 2)) {
2039                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2040                         "more", len);
2041                 goto out;
2042         }
2043
2044         if (len > 2) {
2045                 switch (len) {
2046                 case 8:
2047                         if ((*((uint64_t *)lun) &
2048                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2049                                 goto out_err;
2050                         break;
2051                 case 4:
2052                         if (*((uint16_t *)&lun[2]) != 0)
2053                                 goto out_err;
2054                         break;
2055                 case 6:
2056                         if (*((uint32_t *)&lun[2]) != 0)
2057                                 goto out_err;
2058                         break;
2059                 default:
2060                         goto out_err;
2061                 }
2062         }
2063
2064         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
2065         switch (address_method) {
2066         case 0: /* peripheral device addressing method */
2067 #if 0
2068                 if (*lun) {
2069                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2070                              "peripheral device addressing method 0x%02x, "
2071                              "expected 0", *lun);
2072                         break;
2073                 }
2074                 res = *(lun + 1);
2075                 break;
2076 #else
2077                 /*
2078                  * Looks like it's legal to use it as flat space addressing
2079                  * method as well
2080                  */
2081
2082                 /* go through */
2083 #endif
2084
2085         case 1: /* flat space addressing method */
2086                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2087                 break;
2088
2089         case 2: /* logical unit addressing method */
2090                 if (*lun & 0x3f) {
2091                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2092                                     "addressing method 0x%02x, expected 0",
2093                                     *lun & 0x3f);
2094                         break;
2095                 }
2096                 if (*(lun + 1) & 0xe0) {
2097                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
2098                                     "addressing method 0x%02x, expected 0",
2099                                     (*(lun + 1) & 0xf8) >> 5);
2100                         break;
2101                 }
2102                 res = *(lun + 1) & 0x1f;
2103                 break;
2104
2105         case 3: /* extended logical unit addressing method */
2106         default:
2107                 PRINT_ERROR("Unimplemented LUN addressing method %u",
2108                             address_method);
2109                 break;
2110         }
2111
2112 out:
2113         TRACE_EXIT_RES((int)res);
2114         return res;
2115
2116 out_err:
2117         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2118         goto out;
2119 }
2120
2121 int scst_calc_block_shift(int sector_size)
2122 {
2123         int block_shift = 0;
2124         int t;
2125
2126         if (sector_size == 0)
2127                 sector_size = 512;
2128
2129         t = sector_size;
2130         while (1) {
2131                 if ((t & 1) != 0)
2132                         break;
2133                 t >>= 1;
2134                 block_shift++;
2135         }
2136         if (block_shift < 9) {
2137                 PRINT_ERROR("Wrong sector size %d", sector_size);
2138                 block_shift = -1;
2139         }
2140
2141         TRACE_EXIT_RES(block_shift);
2142         return block_shift;
2143 }
2144 EXPORT_SYMBOL(scst_calc_block_shift);
2145
2146 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2147         int (*get_block_shift)(struct scst_cmd *cmd))
2148 {
2149         int res = 0;
2150
2151         TRACE_ENTRY();
2152
2153         /*
2154          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2155          * therefore change them only if necessary
2156          */
2157
2158         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2159               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2160
2161         switch (cmd->cdb[0]) {
2162         case SERVICE_ACTION_IN:
2163                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2164                         cmd->bufflen = READ_CAP16_LEN;
2165                         cmd->data_direction = SCST_DATA_READ;
2166                 }
2167                 break;
2168         case VERIFY_6:
2169         case VERIFY:
2170         case VERIFY_12:
2171         case VERIFY_16:
2172                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2173                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2174                         cmd->bufflen = 0;
2175                         goto set_timeout;
2176                 } else
2177                         cmd->data_len = 0;
2178                 break;
2179         default:
2180                 /* It's all good */
2181                 break;
2182         }
2183
2184         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2185                 /*
2186                  * No need for locks here, since *_detach() can not be
2187                  * called, when there are existing commands.
2188                  */
2189                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2190         }
2191
2192 set_timeout:
2193         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2194                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2195         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2196                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2197         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2198                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2199
2200         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2201               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2202
2203         TRACE_EXIT_RES(res);
2204         return res;
2205 }
2206 EXPORT_SYMBOL(scst_sbc_generic_parse);
2207
2208 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2209         int (*get_block_shift)(struct scst_cmd *cmd))
2210 {
2211         int res = 0;
2212
2213         TRACE_ENTRY();
2214
2215         /*
2216          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2217          * therefore change them only if necessary
2218          */
2219
2220         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2221               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2222
2223         cmd->cdb[1] &= 0x1f;
2224
2225         switch (cmd->cdb[0]) {
2226         case VERIFY_6:
2227         case VERIFY:
2228         case VERIFY_12:
2229         case VERIFY_16:
2230                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2231                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2232                         cmd->bufflen = 0;
2233                         goto set_timeout;
2234                 }
2235                 break;
2236         default:
2237                 /* It's all good */
2238                 break;
2239         }
2240
2241         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2242                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2243
2244 set_timeout:
2245         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2246                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2247         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2248                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2249         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2250                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2251
2252         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2253                 cmd->data_direction);
2254
2255         TRACE_EXIT();
2256         return res;
2257 }
2258 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2259
2260 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2261         int (*get_block_shift)(struct scst_cmd *cmd))
2262 {
2263         int res = 0;
2264
2265         TRACE_ENTRY();
2266
2267         /*
2268          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2269          * therefore change them only if necessary
2270          */
2271
2272         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2273               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2274
2275         cmd->cdb[1] &= 0x1f;
2276
2277         switch (cmd->cdb[0]) {
2278         case VERIFY_6:
2279         case VERIFY:
2280         case VERIFY_12:
2281         case VERIFY_16:
2282                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2283                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2284                         cmd->bufflen = 0;
2285                         goto set_timeout;
2286                 }
2287                 break;
2288         default:
2289                 /* It's all good */
2290                 break;
2291         }
2292
2293         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2294                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2295
2296 set_timeout:
2297         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2298                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2299         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2300                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2301         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2302                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2303
2304         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2305                 cmd->data_direction);
2306
2307         TRACE_EXIT_RES(res);
2308         return res;
2309 }
2310 EXPORT_SYMBOL(scst_modisk_generic_parse);
2311
2312 int scst_tape_generic_parse(struct scst_cmd *cmd,
2313         int (*get_block_size)(struct scst_cmd *cmd))
2314 {
2315         int res = 0;
2316
2317         TRACE_ENTRY();
2318
2319         /*
2320          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2321          * therefore change them only if necessary
2322          */
2323
2324         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2325               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2326
2327         if (cmd->cdb[0] == READ_POSITION) {
2328                 int tclp = cmd->cdb[1] & TCLP_BIT;
2329                 int long_bit = cmd->cdb[1] & LONG_BIT;
2330                 int bt = cmd->cdb[1] & BT_BIT;
2331
2332                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2333                         cmd->bufflen =
2334                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2335                         cmd->data_direction = SCST_DATA_READ;
2336                 } else {
2337                         cmd->bufflen = 0;
2338                         cmd->data_direction = SCST_DATA_NONE;
2339                 }
2340         }
2341
2342         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2343                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2344
2345         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2346                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2347         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2348                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2349         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2350                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2351
2352         TRACE_EXIT_RES(res);
2353         return res;
2354 }
2355 EXPORT_SYMBOL(scst_tape_generic_parse);
2356
2357 static int scst_null_parse(struct scst_cmd *cmd)
2358 {
2359         int res = 0;
2360
2361         TRACE_ENTRY();
2362
2363         /*
2364          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2365          * therefore change them only if necessary
2366          */
2367
2368         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2369               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2370 #if 0
2371         switch (cmd->cdb[0]) {
2372         default:
2373                 /* It's all good */
2374                 break;
2375         }
2376 #endif
2377         TRACE_DBG("res %d bufflen %d direct %d",
2378               res, cmd->bufflen, cmd->data_direction);
2379
2380         TRACE_EXIT();
2381         return res;
2382 }
2383
2384 int scst_changer_generic_parse(struct scst_cmd *cmd,
2385         int (*nothing)(struct scst_cmd *cmd))
2386 {
2387         int res = scst_null_parse(cmd);
2388
2389         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2390                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2391         else
2392                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2393
2394         return res;
2395 }
2396 EXPORT_SYMBOL(scst_changer_generic_parse);
2397
2398 int scst_processor_generic_parse(struct scst_cmd *cmd,
2399         int (*nothing)(struct scst_cmd *cmd))
2400 {
2401         int res = scst_null_parse(cmd);
2402
2403         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2404                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2405         else
2406                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2407
2408         return res;
2409 }
2410 EXPORT_SYMBOL(scst_processor_generic_parse);
2411
2412 int scst_raid_generic_parse(struct scst_cmd *cmd,
2413         int (*nothing)(struct scst_cmd *cmd))
2414 {
2415         int res = scst_null_parse(cmd);
2416
2417         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2418                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2419         else
2420                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2421
2422         return res;
2423 }
2424 EXPORT_SYMBOL(scst_raid_generic_parse);
2425
2426 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2427         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2428 {
2429         int opcode = cmd->cdb[0];
2430         int status = cmd->status;
2431         int res = SCST_CMD_STATE_DEFAULT;
2432
2433         TRACE_ENTRY();
2434
2435         /*
2436          * SCST sets good defaults for cmd->is_send_status and
2437          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2438          * therefore change them only if necessary
2439          */
2440
2441         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2442                 switch (opcode) {
2443                 case READ_CAPACITY:
2444                 {
2445                         /* Always keep track of disk capacity */
2446                         int buffer_size, sector_size, sh;
2447                         uint8_t *buffer;
2448
2449                         buffer_size = scst_get_buf_first(cmd, &buffer);
2450                         if (unlikely(buffer_size <= 0)) {
2451                                 if (buffer_size < 0) {
2452                                         PRINT_ERROR("%s: Unable to get the"
2453                                         " buffer (%d)", __func__, buffer_size);
2454                                 }
2455                                 goto out;
2456                         }
2457
2458                         sector_size =
2459                             ((buffer[4] << 24) | (buffer[5] << 16) |
2460                              (buffer[6] << 8) | (buffer[7] << 0));
2461                         scst_put_buf(cmd, buffer);
2462                         if (sector_size != 0)
2463                                 sh = scst_calc_block_shift(sector_size);
2464                         else
2465                                 sh = 0;
2466                         set_block_shift(cmd, sh);
2467                         TRACE_DBG("block_shift %d", sh);
2468                         break;
2469                 }
2470                 default:
2471                         /* It's all good */
2472                         break;
2473                 }
2474         }
2475
2476         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2477               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2478
2479 out:
2480         TRACE_EXIT_RES(res);
2481         return res;
2482 }
2483 EXPORT_SYMBOL(scst_block_generic_dev_done);
2484
2485 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2486         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2487 {
2488         int opcode = cmd->cdb[0];
2489         int res = SCST_CMD_STATE_DEFAULT;
2490         int buffer_size, bs;
2491         uint8_t *buffer = NULL;
2492
2493         TRACE_ENTRY();
2494
2495         /*
2496          * SCST sets good defaults for cmd->is_send_status and
2497          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2498          * therefore change them only if necessary
2499          */
2500
2501         switch (opcode) {
2502         case MODE_SENSE:
2503         case MODE_SELECT:
2504                 buffer_size = scst_get_buf_first(cmd, &buffer);
2505                 if (unlikely(buffer_size <= 0)) {
2506                         if (buffer_size < 0) {
2507                                 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2508                                         __func__, buffer_size);
2509                         }
2510                         goto out;
2511                 }
2512                 break;
2513         }
2514
2515         switch (opcode) {
2516         case MODE_SENSE:
2517                 TRACE_DBG("%s", "MODE_SENSE");
2518                 if ((cmd->cdb[2] & 0xC0) == 0) {
2519                         if (buffer[3] == 8) {
2520                                 bs = (buffer[9] << 16) |
2521                                     (buffer[10] << 8) | buffer[11];
2522                                 set_block_size(cmd, bs);
2523                         }
2524                 }
2525                 break;
2526         case MODE_SELECT:
2527                 TRACE_DBG("%s", "MODE_SELECT");
2528                 if (buffer[3] == 8) {
2529                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2530                             (buffer[11]);
2531                         set_block_size(cmd, bs);
2532                 }
2533                 break;
2534         default:
2535                 /* It's all good */
2536                 break;
2537         }
2538
2539         switch (opcode) {
2540         case MODE_SENSE:
2541         case MODE_SELECT:
2542                 scst_put_buf(cmd, buffer);
2543                 break;
2544         }
2545
2546 out:
2547         TRACE_EXIT_RES(res);
2548         return res;
2549 }
2550 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2551
2552 static void scst_check_internal_sense(struct scst_device *dev, int result,
2553         uint8_t *sense, int sense_len)
2554 {
2555         TRACE_ENTRY();
2556
2557         if (host_byte(result) == DID_RESET) {
2558                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2559                         "reset UA");
2560                 scst_set_sense(sense, sense_len,
2561                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2562                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2563         } else if ((status_byte(result) == CHECK_CONDITION) &&
2564                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2565                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2566
2567         TRACE_EXIT();
2568         return;
2569 }
2570
2571 int scst_obtain_device_parameters(struct scst_device *dev)
2572 {
2573         int res = 0, i;
2574         uint8_t cmd[16];
2575         uint8_t buffer[4+0x0A];
2576         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2577
2578         TRACE_ENTRY();
2579
2580         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2581
2582         for (i = 0; i < 5; i++) {
2583                 /* Get control mode page */
2584                 memset(cmd, 0, sizeof(cmd));
2585                 cmd[0] = MODE_SENSE;
2586                 cmd[1] = 8; /* DBD */
2587                 cmd[2] = 0x0A;
2588                 cmd[4] = sizeof(buffer);
2589
2590                 memset(buffer, 0, sizeof(buffer));
2591                 memset(sense_buffer, 0, sizeof(sense_buffer));
2592
2593                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2594                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2595                                 sizeof(buffer), sense_buffer, 15, 0, 0);
2596
2597                 TRACE_DBG("MODE_SENSE done: %x", res);
2598
2599                 if (scsi_status_is_good(res)) {
2600                         int q;
2601
2602                         PRINT_BUFF_FLAG(TRACE_SCSI,
2603                                 "Returned control mode page data",
2604                                 buffer, sizeof(buffer));
2605
2606                         dev->tst = buffer[4+2] >> 5;
2607                         q = buffer[4+3] >> 4;
2608                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2609                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2610                                         "%d:%d:%d:%d", dev->queue_alg,
2611                                         dev->scsi_dev->host->host_no,
2612                                         dev->scsi_dev->channel,
2613                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2614                         }
2615                         dev->queue_alg = q;
2616                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2617                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2618
2619                         /*
2620                          * Unfortunately, SCSI ML doesn't provide a way to
2621                          * specify commands task attribute, so we can rely on
2622                          * device's restricted reordering only.
2623                          */
2624                         dev->has_own_order_mgmt = !dev->queue_alg;
2625
2626                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2627                                 "Device %d:%d:%d:%d: TST %x, "
2628                                 "QUEUE ALG %x, SWP %x, TAS %x, "
2629                                 "has_own_order_mgmt %d",
2630                                 dev->scsi_dev->host->host_no,
2631                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2632                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2633                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2634
2635                         goto out;
2636                 } else {
2637 #if 0
2638                         if ((status_byte(res) == CHECK_CONDITION) &&
2639 #else
2640                         /*
2641                          * 3ware controller is buggy and returns CONDITION_GOOD
2642                          * instead of CHECK_CONDITION
2643                          */
2644                         if (
2645 #endif
2646                             SCST_SENSE_VALID(sense_buffer)) {
2647                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2648                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2649                                                 "Device %d:%d:%d:%d doesn't"
2650                                                 " support control mode page,"
2651                                                 " using defaults: TST %x,"
2652                                                 " QUEUE ALG %x, SWP %x, TAS %x,"
2653                                                 " has_own_order_mgmt %d",
2654                                                 dev->scsi_dev->host->host_no,
2655                                                 dev->scsi_dev->channel,
2656                                                 dev->scsi_dev->id,
2657                                                 dev->scsi_dev->lun,
2658                                                 dev->tst,
2659                                                 dev->queue_alg,
2660                                                 dev->swp,
2661                                                 dev->tas,
2662                                                 dev->has_own_order_mgmt);
2663                                         res = 0;
2664                                         goto out;
2665                                 } else if (sense_buffer[2] == NOT_READY) {
2666                                         TRACE(TRACE_SCSI,
2667                                                 "Device %d:%d:%d:%d not ready",
2668                                                 dev->scsi_dev->host->host_no,
2669                                                 dev->scsi_dev->channel,
2670                                                 dev->scsi_dev->id,
2671                                                 dev->scsi_dev->lun);
2672                                         res = 0;
2673                                         goto out;
2674                                 }
2675                         } else {
2676                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2677                                         "Internal MODE SENSE to "
2678                                         "device %d:%d:%d:%d failed: %x",
2679                                         dev->scsi_dev->host->host_no,
2680                                         dev->scsi_dev->channel,
2681                                         dev->scsi_dev->id,
2682                                         dev->scsi_dev->lun, res);
2683                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
2684                                         "MODE SENSE sense",
2685                                         sense_buffer, sizeof(sense_buffer));
2686                         }
2687                         scst_check_internal_sense(dev, res, sense_buffer,
2688                                         sizeof(sense_buffer));
2689                 }
2690         }
2691         res = -ENODEV;
2692
2693 out:
2694         TRACE_EXIT_RES(res);
2695         return res;
2696 }
2697 EXPORT_SYMBOL(scst_obtain_device_parameters);
2698
2699 /* Called under dev_lock and BH off */
2700 void scst_process_reset(struct scst_device *dev,
2701         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2702         struct scst_mgmt_cmd *mcmd, bool setUA)
2703 {
2704         struct scst_tgt_dev *tgt_dev;
2705         struct scst_cmd *cmd, *tcmd;
2706
2707         TRACE_ENTRY();
2708
2709         /* Clear RESERVE'ation, if necessary */
2710         if (dev->dev_reserved) {
2711                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2712                                     dev_tgt_dev_list_entry) {
2713                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2714                                 "lun %lld",
2715                                 (long long unsigned int)tgt_dev->lun);
2716                         clear_bit(SCST_TGT_DEV_RESERVED,
2717                                   &tgt_dev->tgt_dev_flags);
2718                 }
2719                 dev->dev_reserved = 0;
2720                 /*
2721                  * There is no need to send RELEASE, since the device is going
2722                  * to be resetted. Actually, since we can be in RESET TM
2723                  * function, it might be dangerous.
2724                  */
2725         }
2726
2727         dev->dev_double_ua_possible = 1;
2728         dev->dev_serialized = 1;
2729
2730         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2731                 dev_tgt_dev_list_entry) {
2732                 struct scst_session *sess = tgt_dev->sess;
2733
2734                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2735                 scst_free_all_UA(tgt_dev);
2736                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2737
2738                 spin_lock_irq(&sess->sess_list_lock);
2739
2740                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2741                 list_for_each_entry(cmd, &sess->search_cmd_list,
2742                                 search_cmd_list_entry) {
2743                         if (cmd == exclude_cmd)
2744                                 continue;
2745                         if ((cmd->tgt_dev == tgt_dev) ||
2746                             ((cmd->tgt_dev == NULL) &&
2747                              (cmd->lun == tgt_dev->lun))) {
2748                                 scst_abort_cmd(cmd, mcmd,
2749                                         (tgt_dev->sess != originator), 0);
2750                         }
2751                 }
2752                 spin_unlock_irq(&sess->sess_list_lock);
2753         }
2754
2755         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2756                                 blocked_cmd_list_entry) {
2757                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2758                         list_del(&cmd->blocked_cmd_list_entry);
2759                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2760                                 "to active cmd list", cmd);
2761                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2762                         list_add_tail(&cmd->cmd_list_entry,
2763                                 &cmd->cmd_lists->active_cmd_list);
2764                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2765                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2766                 }
2767         }
2768
2769         if (setUA) {
2770                 /* BH already off */
2771                 spin_lock(&scst_temp_UA_lock);
2772                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2773                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2774                 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2775                         sizeof(scst_temp_UA));
2776                 spin_unlock(&scst_temp_UA_lock);
2777         }
2778
2779         TRACE_EXIT();
2780         return;
2781 }
2782
2783 int scst_set_pending_UA(struct scst_cmd *cmd)
2784 {
2785         int res = 0;
2786         struct scst_tgt_dev_UA *UA_entry;
2787
2788         TRACE_ENTRY();
2789
2790         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2791
2792         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2793
2794         /* UA list could be cleared behind us, so retest */
2795         if (list_empty(&cmd->tgt_dev->UA_list)) {
2796                 TRACE_DBG("%s",
2797                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2798                 res = -1;
2799                 goto out_unlock;
2800         }
2801
2802         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2803                               UA_list_entry);
2804
2805         TRACE_DBG("next %p UA_entry %p",
2806               cmd->tgt_dev->UA_list.next, UA_entry);
2807
2808         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2809                 sizeof(UA_entry->UA_sense_buffer));
2810
2811         cmd->ua_ignore = 1;
2812
2813         list_del(&UA_entry->UA_list_entry);
2814
2815         mempool_free(UA_entry, scst_ua_mempool);
2816
2817         if (list_empty(&cmd->tgt_dev->UA_list)) {
2818                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2819                           &cmd->tgt_dev->tgt_dev_flags);
2820         }
2821
2822         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2823
2824 out:
2825         TRACE_EXIT_RES(res);
2826         return res;
2827
2828 out_unlock:
2829         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2830         goto out;
2831 }
2832
2833 /* Called under tgt_dev_lock and BH off */
2834 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2835         const uint8_t *sense, int sense_len, int head)
2836 {
2837         struct scst_tgt_dev_UA *UA_entry = NULL;
2838
2839         TRACE_ENTRY();
2840
2841         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2842         if (UA_entry == NULL) {
2843                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2844                      "allocation failed. The UNIT ATTENTION "
2845                      "on some sessions will be missed");
2846                 PRINT_BUFFER("Lost UA", sense, sense_len);
2847                 goto out;
2848         }
2849         memset(UA_entry, 0, sizeof(*UA_entry));
2850
2851         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2852                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2853         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2854
2855         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2856
2857         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2858
2859         if (head)
2860                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2861         else
2862                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2863
2864 out:
2865         TRACE_EXIT();
2866         return;
2867 }
2868
2869 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2870         const uint8_t *sense, int sense_len, int head)
2871 {
2872         int skip_UA = 0;
2873         struct scst_tgt_dev_UA *UA_entry_tmp;
2874
2875         TRACE_ENTRY();
2876
2877         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2878
2879         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2880                             UA_list_entry) {
2881                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer,
2882                            sense_len) == 0) {
2883                         TRACE_MGMT_DBG("%s", "UA already exists");
2884                         skip_UA = 1;
2885                         break;
2886                 }
2887         }
2888
2889         if (skip_UA == 0)
2890                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2891
2892         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2893
2894         TRACE_EXIT();
2895         return;
2896 }
2897
2898 /* Called under dev_lock and BH off */
2899 void scst_dev_check_set_local_UA(struct scst_device *dev,
2900         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2901 {
2902         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2903
2904         TRACE_ENTRY();
2905
2906         if (exclude != NULL)
2907                 exclude_tgt_dev = exclude->tgt_dev;
2908
2909         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2910                         dev_tgt_dev_list_entry) {
2911                 if (tgt_dev != exclude_tgt_dev)
2912                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2913         }
2914
2915         TRACE_EXIT();
2916         return;
2917 }
2918
2919 /* Called under dev_lock and BH off */
2920 void __scst_dev_check_set_UA(struct scst_device *dev,
2921         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2922 {
2923         TRACE_ENTRY();
2924
2925         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2926
2927         /* Check for reset UA */
2928         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2929                 scst_process_reset(dev,
2930                                    (exclude != NULL) ? exclude->sess : NULL,
2931                                    exclude, NULL, false);
2932
2933         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2934
2935         TRACE_EXIT();
2936         return;
2937 }
2938
2939 /* Called under tgt_dev_lock or when tgt_dev is unused */
2940 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2941 {
2942         struct scst_tgt_dev_UA *UA_entry, *t;
2943
2944         TRACE_ENTRY();
2945
2946         list_for_each_entry_safe(UA_entry, t,
2947                                  &tgt_dev->UA_list, UA_list_entry) {
2948                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
2949                                (long long unsigned int)tgt_dev->lun);
2950                 list_del(&UA_entry->UA_list_entry);
2951                 kfree(UA_entry);
2952         }
2953         INIT_LIST_HEAD(&tgt_dev->UA_list);
2954         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2955
2956         TRACE_EXIT();
2957         return;
2958 }
2959
2960 /* No locks */
2961 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2962 {
2963         struct scst_cmd *res = NULL, *cmd, *t;
2964         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2965
2966         spin_lock_irq(&tgt_dev->sn_lock);
2967
2968         if (unlikely(tgt_dev->hq_cmd_count != 0))
2969                 goto out_unlock;
2970
2971 restart:
2972         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2973                                 sn_cmd_list_entry) {
2974                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2975                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2976                 if (cmd->sn == expected_sn) {
2977                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2978                                 cmd, cmd->sn, cmd->sn_set);
2979                         tgt_dev->def_cmd_count--;
2980                         list_del(&cmd->sn_cmd_list_entry);
2981                         if (res == NULL)
2982                                 res = cmd;
2983                         else {
2984                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2985                                 TRACE_SN("Adding cmd %p to active cmd list",
2986                                         cmd);
2987                                 list_add_tail(&cmd->cmd_list_entry,
2988                                         &cmd->cmd_lists->active_cmd_list);
2989                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2990                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2991                         }
2992                 }
2993         }
2994         if (res != NULL)
2995                 goto out_unlock;
2996
2997         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2998                                 sn_cmd_list_entry) {
2999                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3000                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3001                 if (cmd->sn == expected_sn) {
3002                         atomic_t *slot = cmd->sn_slot;
3003                         /*
3004                          * !! At this point any pointer in cmd, except !!
3005                          * !! sn_slot and sn_cmd_list_entry, could be   !!
3006                          * !! already destroyed                         !!
3007                          */
3008                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
3009                                  cmd,
3010                                  (long long unsigned int)cmd->tag,
3011                                  cmd->sn);
3012                         tgt_dev->def_cmd_count--;
3013                         list_del(&cmd->sn_cmd_list_entry);
3014                         spin_unlock_irq(&tgt_dev->sn_lock);
3015                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
3016                                              &cmd->cmd_flags))
3017                                 scst_destroy_put_cmd(cmd);
3018                         scst_inc_expected_sn(tgt_dev, slot);
3019                         expected_sn = tgt_dev->expected_sn;
3020                         spin_lock_irq(&tgt_dev->sn_lock);
3021                         goto restart;
3022                 }
3023         }
3024
3025 out_unlock:
3026         spin_unlock_irq(&tgt_dev->sn_lock);
3027         return res;
3028 }
3029
3030 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
3031         struct scst_thr_data_hdr *data,
3032         void (*free_fn) (struct scst_thr_data_hdr *data))
3033 {
3034         data->pid = current->pid;
3035         atomic_set(&data->ref, 1);
3036         EXTRACHECKS_BUG_ON(free_fn == NULL);
3037         data->free_fn = free_fn;
3038         spin_lock(&tgt_dev->thr_data_lock);
3039         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
3040         spin_unlock(&tgt_dev->thr_data_lock);
3041 }
3042 EXPORT_SYMBOL(scst_add_thr_data);
3043
3044 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
3045 {
3046         spin_lock(&tgt_dev->thr_data_lock);
3047         while (!list_empty(&tgt_dev->thr_data_list)) {
3048                 struct scst_thr_data_hdr *d = list_entry(
3049                                 tgt_dev->thr_data_list.next, typeof(*d),
3050                                 thr_data_list_entry);
3051                 list_del(&d->thr_data_list_entry);
3052                 spin_unlock(&tgt_dev->thr_data_lock);
3053                 scst_thr_data_put(d);
3054                 spin_lock(&tgt_dev->thr_data_lock);
3055         }
3056         spin_unlock(&tgt_dev->thr_data_lock);
3057         return;
3058 }
3059 EXPORT_SYMBOL(scst_del_all_thr_data);
3060
3061 void scst_dev_del_all_thr_data(struct scst_device *dev)
3062 {
3063         struct scst_tgt_dev *tgt_dev;
3064
3065         TRACE_ENTRY();
3066
3067         mutex_lock(&scst_mutex);
3068
3069         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3070                                 dev_tgt_dev_list_entry) {
3071                 scst_del_all_thr_data(tgt_dev);
3072         }
3073
3074         mutex_unlock(&scst_mutex);
3075
3076         TRACE_EXIT();
3077         return;
3078 }
3079 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
3080
3081 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
3082 {
3083         struct scst_thr_data_hdr *res = NULL, *d;
3084         struct task_struct *tsk = current;
3085
3086         spin_lock(&tgt_dev->thr_data_lock);
3087         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
3088                 if (d->pid == tsk->pid) {
3089                         res = d;
3090                         scst_thr_data_get(res);
3091                         break;
3092                 }
3093         }
3094         spin_unlock(&tgt_dev->thr_data_lock);
3095         return res;
3096 }
3097 EXPORT_SYMBOL(scst_find_thr_data);
3098
3099 /* dev_lock supposed to be held and BH disabled */
3100 void __scst_block_dev(struct scst_device *dev)
3101 {
3102         dev->block_count++;
3103         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
3104 }
3105
3106 /* No locks */
3107 void scst_block_dev(struct scst_device *dev, int outstanding)
3108 {
3109         spin_lock_bh(&dev->dev_lock);
3110         __scst_block_dev(dev);
3111         spin_unlock_bh(&dev->dev_lock);
3112
3113         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
3114         smp_mb();
3115
3116         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
3117                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
3118         wait_event(dev->on_dev_waitQ,
3119                 atomic_read(&dev->on_dev_count) <= outstanding);
3120         TRACE_MGMT_DBG("%s", "wait_event() returned");
3121 }
3122
3123 /* No locks */
3124 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3125 {
3126         sBUG_ON(cmd->needs_unblocking);
3127
3128         cmd->needs_unblocking = 1;
3129         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3130                        cmd, (long long unsigned int)cmd->tag);
3131
3132         scst_block_dev(cmd->dev, outstanding);
3133 }
3134
3135 /* No locks */
3136 void scst_unblock_dev(struct scst_device *dev)
3137 {
3138         spin_lock_bh(&dev->dev_lock);
3139         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3140                 dev->block_count-1, dev);
3141         if (--dev->block_count == 0)
3142                 scst_unblock_cmds(dev);
3143         spin_unlock_bh(&dev->dev_lock);
3144         sBUG_ON(dev->block_count < 0);
3145 }
3146
3147 /* No locks */
3148 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3149 {
3150         scst_unblock_dev(cmd->dev);
3151         cmd->needs_unblocking = 0;
3152 }
3153
3154 /* No locks */
3155 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3156 {
3157         int res = 0;
3158         struct scst_device *dev = cmd->dev;
3159
3160         TRACE_ENTRY();
3161
3162         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3163
3164         atomic_inc(&dev->on_dev_count);
3165         cmd->dec_on_dev_needed = 1;
3166         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3167
3168         if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
3169                 /*
3170                  * The original command can already block the device, so
3171                  * REQUEST SENSE command should always pass.
3172                  */
3173                 goto out;
3174         }
3175
3176 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3177         spin_lock_bh(&dev->dev_lock);
3178         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3179                 goto out_unlock;
3180         if (dev->block_count > 0) {
3181                 scst_dec_on_dev_cmd(cmd);
3182                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3183                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3184                 list_add_tail(&cmd->blocked_cmd_list_entry,
3185                               &dev->blocked_cmd_list);
3186                 res = 1;
3187         } else {
3188                 __scst_block_dev(dev);
3189                 cmd->inc_blocking = 1;
3190         }
3191         spin_unlock_bh(&dev->dev_lock);
3192         goto out;
3193 #else
3194 repeat:
3195         if (unlikely(dev->block_count > 0)) {
3196                 spin_lock_bh(&dev->dev_lock);
3197                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3198                         goto out_unlock;
3199                 barrier(); /* to reread block_count */
3200                 if (dev->block_count > 0) {
3201                         scst_dec_on_dev_cmd(cmd);
3202                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
3203                                 "serializing (tag %llu, dev %p)", cmd,
3204                                 (long long unsigned int)cmd->tag, dev);
3205                         list_add_tail(&cmd->blocked_cmd_list_entry,
3206                                       &dev->blocked_cmd_list);
3207                         res = 1;
3208                         spin_unlock_bh(&dev->dev_lock);
3209                         goto out;
3210                 } else {
3211                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3212                                 "continuing");
3213                 }
3214                 spin_unlock_bh(&dev->dev_lock);
3215         }
3216         if (unlikely(dev->dev_serialized)) {
3217                 spin_lock_bh(&dev->dev_lock);
3218                 barrier(); /* to reread block_count */
3219                 if (dev->block_count == 0) {
3220                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3221                                 "cmds due to serializing (dev %p)", cmd,
3222                                 (long long unsigned int)cmd->tag, dev);
3223                         __scst_block_dev(dev);
3224                         cmd->inc_blocking = 1;
3225                 } else {
3226                         spin_unlock_bh(&dev->dev_lock);
3227                         TRACE_MGMT_DBG("Somebody blocked the device, "
3228                                 "repeating (count %d)", dev->block_count);
3229                         goto repeat;
3230                 }
3231                 spin_unlock_bh(&dev->dev_lock);
3232         }
3233 #endif
3234
3235 out:
3236         TRACE_EXIT_RES(res);
3237         return res;
3238
3239 out_unlock:
3240         spin_unlock_bh(&dev->dev_lock);
3241         goto out;
3242 }
3243
3244 /* Called under dev_lock */
3245 void scst_unblock_cmds(struct scst_device *dev)
3246 {
3247 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3248         struct scst_cmd *cmd, *t;
3249         unsigned long flags;
3250
3251         TRACE_ENTRY();
3252
3253         local_irq_save(flags);
3254         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3255                                  blocked_cmd_list_entry) {
3256                 int brk = 0;
3257                 /*
3258                  * Since only one cmd per time is being executed, expected_sn
3259                  * can't change behind us, if the corresponding cmd is in
3260                  * blocked_cmd_list, but we could be called before
3261                  * scst_inc_expected_sn().
3262                  *
3263                  * For HQ commands SN is not set.
3264                  */
3265                 if (likely(!cmd->internal && cmd->sn_set)) {
3266                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3267                         if (cmd->tgt_dev == NULL)
3268                                 sBUG();
3269                         expected_sn = cmd->tgt_dev->expected_sn;
3270                         if (cmd->sn == expected_sn)
3271                                 brk = 1;
3272                         else if (cmd->sn != (expected_sn+1))
3273                                 continue;
3274                 }
3275
3276                 list_del(&cmd->blocked_cmd_list_entry);
3277                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3278                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3279                 list_add(&cmd->cmd_list_entry,
3280                          &cmd->cmd_lists->active_cmd_list);
3281                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3282                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3283                 if (brk)
3284                         break;
3285         }
3286         local_irq_restore(flags);
3287 #else /* CONFIG_SCST_STRICT_SERIALIZING */
3288         struct scst_cmd *cmd, *tcmd;
3289         unsigned long flags;
3290
3291         TRACE_ENTRY();
3292
3293         local_irq_save(flags);
3294         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3295                                  blocked_cmd_list_entry) {
3296                 list_del(&cmd->blocked_cmd_list_entry);
3297                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3298                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3299                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3300                         list_add(&cmd->cmd_list_entry,
3301                                 &cmd->cmd_lists->active_cmd_list);
3302                 else
3303                         list_add_tail(&cmd->cmd_list_entry,
3304                                 &cmd->cmd_lists->active_cmd_list);
3305                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3306                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3307         }
3308         local_irq_restore(flags);
3309 #endif /* CONFIG_SCST_STRICT_SERIALIZING */
3310
3311         TRACE_EXIT();
3312         return;
3313 }
3314
3315 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3316         struct scst_cmd *out_of_sn_cmd)
3317 {
3318         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3319
3320         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3321                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3322                 scst_make_deferred_commands_active(tgt_dev);
3323         } else {
3324                 out_of_sn_cmd->out_of_sn = 1;
3325                 spin_lock_irq(&tgt_dev->sn_lock);
3326                 tgt_dev->def_cmd_count++;
3327                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3328                               &tgt_dev->skipped_sn_list);
3329                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list"
3330                         " (expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3331                         tgt_dev->expected_sn);
3332                 spin_unlock_irq(&tgt_dev->sn_lock);
3333         }
3334
3335         return;
3336 }
3337
3338 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3339         struct scst_cmd *out_of_sn_cmd)
3340 {
3341         TRACE_ENTRY();
3342
3343         if (!out_of_sn_cmd->sn_set) {
3344                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3345                 goto out;
3346         }
3347
3348         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3349
3350 out:
3351         TRACE_EXIT();
3352         return;
3353 }
3354
3355 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3356 {
3357         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3358
3359         TRACE_ENTRY();
3360
3361         if (!cmd->hq_cmd_inced)
3362                 goto out;
3363
3364         spin_lock_irq(&tgt_dev->sn_lock);
3365         tgt_dev->hq_cmd_count--;
3366         spin_unlock_irq(&tgt_dev->sn_lock);
3367
3368         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3369
3370         /*
3371          * There is no problem in checking hq_cmd_count in the
3372          * non-locked state. In the worst case we will only have
3373          * unneeded run of the deferred commands.
3374          */
3375         if (tgt_dev->hq_cmd_count == 0)
3376                 scst_make_deferred_commands_active(tgt_dev);
3377
3378 out:
3379         TRACE_EXIT();
3380         return;
3381 }
3382
3383 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3384 {
3385         TRACE_ENTRY();
3386
3387         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3388                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3389                 atomic_read(&scst_cmd_count));
3390
3391         scst_done_cmd_mgmt(cmd);
3392
3393         smp_rmb();
3394         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3395                 if (cmd->completed) {
3396                         /* It's completed and it's OK to return its result */
3397                         goto out;
3398                 }
3399
3400                 if (cmd->dev->tas) {
3401                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3402                                 "(tag %llu), returning TASK ABORTED ", cmd,
3403                                 (long long unsigned int)cmd->tag);
3404                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3405                 } else {
3406                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3407                                 "(tag %llu), aborting without delivery or "
3408                                 "notification",
3409                                 cmd, (long long unsigned int)cmd->tag);
3410                         /*
3411                          * There is no need to check/requeue possible UA,
3412                          * because, if it exists, it will be delivered
3413                          * by the "completed" branch above.
3414                          */
3415                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3416                 }
3417         }
3418
3419 out:
3420         TRACE_EXIT();
3421         return;
3422 }
3423
3424 void __init scst_scsi_op_list_init(void)
3425 {
3426         int i;
3427         uint8_t op = 0xff;
3428
3429         TRACE_ENTRY();
3430
3431         for (i = 0; i < 256; i++)
3432                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3433
3434         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3435                 if (scst_scsi_op_table[i].ops != op) {
3436                         op = scst_scsi_op_table[i].ops;
3437                         scst_scsi_op_list[op] = i;
3438                 }
3439         }
3440
3441         TRACE_EXIT();
3442         return;
3443 }
3444
3445 #ifdef CONFIG_SCST_DEBUG
3446 /* Original taken from the XFS code */
3447 unsigned long scst_random(void)
3448 {
3449         static int Inited;
3450         static unsigned long RandomValue;
3451         static DEFINE_SPINLOCK(lock);
3452         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3453         register long rv;
3454         register long lo;
3455         register long hi;
3456         unsigned long flags;
3457
3458         spin_lock_irqsave(&lock, flags);
3459         if (!Inited) {
3460                 RandomValue = jiffies;
3461                 Inited = 1;
3462         }
3463         rv = RandomValue;
3464         hi = rv / 127773;
3465         lo = rv % 127773;
3466         rv = 16807 * lo - 2836 * hi;
3467         if (rv <= 0)
3468                 rv += 2147483647;
3469         RandomValue = rv;
3470         spin_unlock_irqrestore(&lock, flags);
3471         return rv;
3472 }
3473 EXPORT_SYMBOL(scst_random);
3474 #endif
3475
3476 #ifdef CONFIG_SCST_DEBUG_TM
3477
3478 #define TM_DBG_STATE_ABORT              0
3479 #define TM_DBG_STATE_RESET              1
3480 #define TM_DBG_STATE_OFFLINE            2
3481
3482 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3483
3484 static void tm_dbg_timer_fn(unsigned long arg);
3485
3486 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3487 /* All serialized by scst_tm_dbg_lock */
3488 static struct {
3489         unsigned int tm_dbg_release:1;
3490         unsigned int tm_dbg_blocked:1;
3491 } tm_dbg_flags;
3492 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3493 static int tm_dbg_delayed_cmds_count;
3494 static int tm_dbg_passed_cmds_count;
3495 static int tm_dbg_state;
3496 static int tm_dbg_on_state_passes;
3497 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3498 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3499
3500 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3501
3502 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3503         struct scst_acg_dev *acg_dev)
3504 {
3505         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3506                 unsigned long flags;
3507                 /* Do TM debugging only for LUN 0 */
3508                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3509                 tm_dbg_p_cmd_list_waitQ =
3510                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3511                 tm_dbg_state = INIT_TM_DBG_STATE;
3512                 tm_dbg_on_state_passes =
3513                         tm_dbg_on_state_num_passes[tm_dbg_state];
3514                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3515                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3516                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3517                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3518         }
3519 }
3520
3521 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3522 {
3523         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3524                 unsigned long flags;
3525                 del_timer_sync(&tm_dbg_timer);
3526                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3527                 tm_dbg_p_cmd_list_waitQ = NULL;
3528                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3529         }
3530 }
3531
3532 static void tm_dbg_timer_fn(unsigned long arg)
3533 {
3534         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3535         tm_dbg_flags.tm_dbg_release = 1;
3536         smp_wmb();
3537         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3538 }
3539
3540 /* Called under scst_tm_dbg_lock and IRQs off */
3541 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3542 {
3543         switch (tm_dbg_state) {
3544         case TM_DBG_STATE_ABORT:
3545                 if (tm_dbg_delayed_cmds_count == 0) {
3546                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3547                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
3548                                 " for %ld.%ld seconds (%ld HZ), "
3549                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3550                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3551                         mod_timer(&tm_dbg_timer, jiffies + d);
3552 #if 0
3553                         tm_dbg_flags.tm_dbg_blocked = 1;
3554 #endif
3555                 } else {
3556                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3557                                 "(tag %llu), delayed_cmds_count=%d, "
3558                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3559                                 tm_dbg_delayed_cmds_count,
3560                                 tm_dbg_on_state_passes);
3561                         if (tm_dbg_delayed_cmds_count == 2)
3562                                 tm_dbg_flags.tm_dbg_blocked = 0;
3563                 }
3564                 break;
3565
3566         case TM_DBG_STATE_RESET:
3567         case TM_DBG_STATE_OFFLINE:
3568                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3569                         "(tag %llu), delayed_cmds_count=%d, "
3570                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3571                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3572                 tm_dbg_flags.tm_dbg_blocked = 1;
3573                 break;
3574
3575         default:
3576                 sBUG();
3577         }
3578         /* IRQs already off */
3579         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3580         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3581         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3582         cmd->tm_dbg_delayed = 1;
3583         tm_dbg_delayed_cmds_count++;
3584         return;
3585 }
3586
3587 /* No locks */
3588 void tm_dbg_check_released_cmds(void)
3589 {
3590         if (tm_dbg_flags.tm_dbg_release) {
3591                 struct scst_cmd *cmd, *tc;
3592                 spin_lock_irq(&scst_tm_dbg_lock);
3593                 list_for_each_entry_safe_reverse(cmd, tc,
3594                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3595                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3596                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3597                                 tm_dbg_delayed_cmds_count);
3598                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3599                         list_move(&cmd->cmd_list_entry,
3600                                 &cmd->cmd_lists->active_cmd_list);
3601                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3602                 }
3603                 tm_dbg_flags.tm_dbg_release = 0;
3604                 spin_unlock_irq(&scst_tm_dbg_lock);
3605         }
3606 }
3607
3608 /* Called under scst_tm_dbg_lock */
3609 static void tm_dbg_change_state(void)
3610 {
3611         tm_dbg_flags.tm_dbg_blocked = 0;
3612         if (--tm_dbg_on_state_passes == 0) {
3613                 switch (tm_dbg_state) {
3614                 case TM_DBG_STATE_ABORT:
3615                         TRACE_MGMT_DBG("%s", "Changing "
3616                             "tm_dbg_state to RESET");
3617                         tm_dbg_state =
3618                                 TM_DBG_STATE_RESET;
3619                         tm_dbg_flags.tm_dbg_blocked = 0;
3620                         break;
3621                 case TM_DBG_STATE_RESET:
3622                 case TM_DBG_STATE_OFFLINE:
3623 #ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
3624                             TRACE_MGMT_DBG("%s", "Changing "
3625                                     "tm_dbg_state to OFFLINE");
3626                             tm_dbg_state =
3627                                 TM_DBG_STATE_OFFLINE;
3628 #else
3629                             TRACE_MGMT_DBG("%s", "Changing "
3630                                     "tm_dbg_state to ABORT");
3631                             tm_dbg_state =
3632                                 TM_DBG_STATE_ABORT;
3633 #endif
3634                         break;
3635                 default:
3636                         sBUG();
3637                 }
3638                 tm_dbg_on_state_passes =
3639                     tm_dbg_on_state_num_passes[tm_dbg_state];
3640         }
3641
3642         TRACE_MGMT_DBG("%s", "Deleting timer");
3643         del_timer(&tm_dbg_timer);
3644 }
3645
3646 /* No locks */
3647 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3648 {
3649         int res = 0;
3650         unsigned long flags;
3651
3652         if (cmd->tm_dbg_immut)
3653                 goto out;
3654
3655         if (cmd->tm_dbg_delayed) {
3656                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3657                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3658                         "delayed_cmds_count=%d", cmd, cmd->tag,
3659                         tm_dbg_delayed_cmds_count);
3660
3661                 cmd->tm_dbg_immut = 1;
3662                 tm_dbg_delayed_cmds_count--;
3663                 if ((tm_dbg_delayed_cmds_count == 0) &&
3664                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3665                         tm_dbg_change_state();
3666                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3667         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3668                                         &cmd->tgt_dev->tgt_dev_flags)) {
3669                 /* Delay 50th command */
3670                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3671                 if (tm_dbg_flags.tm_dbg_blocked ||
3672                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3673                         tm_dbg_delay_cmd(cmd);
3674                         res = 1;
3675                 } else
3676                         cmd->tm_dbg_immut = 1;
3677                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3678         }
3679
3680 out:
3681         return res;
3682 }
3683
3684 /* No locks */
3685 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3686 {
3687         struct scst_cmd *c;
3688         unsigned long flags;
3689
3690         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3691         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3692                                 cmd_list_entry) {
3693                 if (c == cmd) {
3694                         TRACE_MGMT_DBG("Abort request for "
3695                                 "delayed cmd %p (tag=%llu), moving it to "
3696                                 "active cmd list (delayed_cmds_count=%d)",
3697                                 c, c->tag, tm_dbg_delayed_cmds_count);
3698
3699                         if (!test_bit(SCST_CMD_ABORTED_OTHER,
3700                                             &cmd->cmd_flags)) {
3701                                 /* Test how completed commands handled */
3702                                 if (((scst_random() % 10) == 5)) {
3703                                         scst_set_cmd_error(cmd,
3704                                                 SCST_LOAD_SENSE(
3705                                                 scst_sense_hardw_error));
3706                                         /* It's completed now */
3707                                 }
3708                         }
3709
3710                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3711                         list_move(&c->cmd_list_entry,
3712                                 &c->cmd_lists->active_cmd_list);
3713                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3714                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3715                         break;
3716                 }
3717         }
3718         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3719 }
3720
3721 /* Might be called under scst_mutex */
3722 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3723 {
3724         unsigned long flags;
3725
3726         if (dev != NULL) {
3727                 struct scst_tgt_dev *tgt_dev;
3728                 bool found = 0;
3729
3730                 spin_lock_bh(&dev->dev_lock);
3731                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3732                                             dev_tgt_dev_list_entry) {
3733                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3734                                         &tgt_dev->tgt_dev_flags)) {
3735                                 found = 1;
3736                                 break;
3737                         }
3738                 }
3739                 spin_unlock_bh(&dev->dev_lock);
3740
3741                 if (!found)
3742                         goto out;
3743         }
3744
3745         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3746         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3747                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3748                         tm_dbg_delayed_cmds_count);
3749                 tm_dbg_change_state();
3750                 tm_dbg_flags.tm_dbg_release = 1;
3751                 smp_wmb();
3752                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3753                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3754         } else {
3755                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3756         }
3757         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3758
3759 out:
3760         return;
3761 }
3762
3763 int tm_dbg_is_release(void)
3764 {
3765         return tm_dbg_flags.tm_dbg_release;
3766 }
3767 #endif /* CONFIG_SCST_DEBUG_TM */
3768
3769 #ifdef CONFIG_SCST_DEBUG_SN
3770 void scst_check_debug_sn(struct scst_cmd *cmd)
3771 {
3772         static DEFINE_SPINLOCK(lock);
3773         static int type;
3774         static int cnt;