32e7dd065d8c97c0edb5337e31e784c31ba41c13
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #include "scst_cdbprobe.h"
36
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39         uint8_t *sense, int sense_len);
40 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
41         const uint8_t *sense, int sense_len, int head);
42 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
43 static void scst_release_space(struct scst_cmd *cmd);
44 static void scst_sess_free_tgt_devs(struct scst_session *sess);
45 static void scst_unblock_cmds(struct scst_device *dev);
46
47 #ifdef CONFIG_SCST_DEBUG_TM
48 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
49         struct scst_acg_dev *acg_dev);
50 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
51 #else
52 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
53         struct scst_acg_dev *acg_dev) {}
54 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
55 #endif /* CONFIG_SCST_DEBUG_TM */
56
57 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
58 {
59         int res = 0;
60         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
61
62         TRACE_ENTRY();
63
64         sBUG_ON(cmd->sense != NULL);
65
66         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
67         if (cmd->sense == NULL) {
68                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
69                         "The sense data will be lost!!", cmd->cdb[0]);
70                 res = -ENOMEM;
71                 goto out;
72         }
73
74         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
75
76 out:
77         TRACE_EXIT_RES(res);
78         return res;
79 }
80 EXPORT_SYMBOL(scst_alloc_sense);
81
82 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
83         const uint8_t *sense, unsigned int len)
84 {
85         int res;
86
87         TRACE_ENTRY();
88
89         res = scst_alloc_sense(cmd, atomic);
90         if (res != 0) {
91                 PRINT_BUFFER("Lost sense", sense, len);
92                 goto out;
93         }
94
95         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
96         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
97
98 out:
99         TRACE_EXIT_RES(res);
100         return res;
101 }
102 EXPORT_SYMBOL(scst_alloc_set_sense);
103
104 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
105 {
106         TRACE_ENTRY();
107
108         cmd->status = status;
109         cmd->host_status = DID_OK;
110
111         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
112         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
113
114         cmd->data_direction = SCST_DATA_NONE;
115         cmd->resp_data_len = 0;
116         cmd->is_send_status = 1;
117
118         cmd->completed = 1;
119
120         TRACE_EXIT();
121         return;
122 }
123 EXPORT_SYMBOL(scst_set_cmd_error_status);
124
125 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
126 {
127         int rc;
128
129         TRACE_ENTRY();
130
131         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
132
133         rc = scst_alloc_sense(cmd, 1);
134         if (rc != 0) {
135                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
136                         key, asc, ascq);
137                 goto out;
138         }
139
140         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
141         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
142
143 out:
144         TRACE_EXIT();
145         return;
146 }
147 EXPORT_SYMBOL(scst_set_cmd_error);
148
149 void scst_set_sense(uint8_t *buffer, int len, int key,
150         int asc, int ascq)
151 {
152         memset(buffer, 0, len);
153         buffer[0] = 0x70;       /* Error Code                   */
154         buffer[2] = key;        /* Sense Key                    */
155         buffer[7] = 0x0a;       /* Additional Sense Length      */
156         buffer[12] = asc;       /* ASC                          */
157         buffer[13] = ascq;      /* ASCQ                         */
158         TRACE_BUFFER("Sense set", buffer, len);
159         return;
160 }
161 EXPORT_SYMBOL(scst_set_sense);
162
163 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
164         unsigned int len)
165 {
166         TRACE_ENTRY();
167
168         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
169         scst_alloc_set_sense(cmd, 1, sense, len);
170
171         TRACE_EXIT();
172         return;
173 }
174 EXPORT_SYMBOL(scst_set_cmd_error_sense);
175
176 void scst_set_busy(struct scst_cmd *cmd)
177 {
178         int c = atomic_read(&cmd->sess->sess_cmd_count);
179
180         TRACE_ENTRY();
181
182         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
183                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
184                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
185                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
186                         cmd->sess->initiator_name, c,
187                         cmd->queue_type, cmd->sess->init_phase);
188         } else {
189                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
190                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
191                         "initiator %s (cmds count %d, queue_type %x, "
192                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
193                         cmd->queue_type, cmd->sess->init_phase);
194         }
195
196         TRACE_EXIT();
197         return;
198 }
199 EXPORT_SYMBOL(scst_set_busy);
200
201 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
202 {
203         int res;
204
205         TRACE_ENTRY();
206
207         switch (cmd->state) {
208         case SCST_CMD_STATE_INIT_WAIT:
209         case SCST_CMD_STATE_INIT:
210         case SCST_CMD_STATE_PRE_PARSE:
211         case SCST_CMD_STATE_DEV_PARSE:
212                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
213                 break;
214
215         default:
216                 res = SCST_CMD_STATE_PRE_DEV_DONE;
217                 break;
218         }
219
220         TRACE_EXIT_RES(res);
221         return res;
222 }
223 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
224
225 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
226 {
227         TRACE_ENTRY();
228
229 #ifdef CONFIG_SCST_EXTRACHECKS
230         switch (cmd->state) {
231         case SCST_CMD_STATE_PRE_XMIT_RESP:
232         case SCST_CMD_STATE_XMIT_RESP:
233         case SCST_CMD_STATE_FINISHED:
234         case SCST_CMD_STATE_XMIT_WAIT:
235                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
236                         cmd->state, cmd, cmd->cdb[0]);
237                 sBUG();
238         }
239 #endif
240
241         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
242
243         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
244                            (cmd->tgt_dev == NULL));
245
246         TRACE_EXIT();
247         return;
248 }
249 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
250
251 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
252 {
253         int i, l;
254
255         TRACE_ENTRY();
256
257         scst_check_restore_sg_buff(cmd);
258         cmd->resp_data_len = resp_data_len;
259
260         if (resp_data_len == cmd->bufflen)
261                 goto out;
262
263         l = 0;
264         for (i = 0; i < cmd->sg_cnt; i++) {
265                 l += cmd->sg[i].length;
266                 if (l >= resp_data_len) {
267                         int left = resp_data_len - (l - cmd->sg[i].length);
268 #ifdef CONFIG_SCST_DEBUG
269                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
270                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
271                                 "left %d",
272                                 cmd, (long long unsigned int)cmd->tag,
273                                 resp_data_len, i,
274                                 cmd->sg[i].length, left);
275 #endif
276                         cmd->orig_sg_cnt = cmd->sg_cnt;
277                         cmd->orig_sg_entry = i;
278                         cmd->orig_entry_len = cmd->sg[i].length;
279                         cmd->sg_cnt = (left > 0) ? i+1 : i;
280                         cmd->sg[i].length = left;
281                         cmd->sg_buff_modified = 1;
282                         break;
283                 }
284         }
285
286 out:
287         TRACE_EXIT();
288         return;
289 }
290 EXPORT_SYMBOL(scst_set_resp_data_len);
291
292 /* Called under scst_mutex and suspended activity */
293 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
294 {
295         struct scst_device *dev;
296         int res = 0;
297         static int dev_num; /* protected by scst_mutex */
298
299         TRACE_ENTRY();
300
301         dev = kzalloc(sizeof(*dev), gfp_mask);
302         if (dev == NULL) {
303                 TRACE(TRACE_OUT_OF_MEM, "%s",
304                         "Allocation of scst_device failed");
305                 res = -ENOMEM;
306                 goto out;
307         }
308
309         dev->handler = &scst_null_devtype;
310         dev->p_cmd_lists = &scst_main_cmd_lists;
311         atomic_set(&dev->dev_cmd_count, 0);
312         atomic_set(&dev->write_cmd_count, 0);
313         scst_init_mem_lim(&dev->dev_mem_lim);
314         spin_lock_init(&dev->dev_lock);
315         atomic_set(&dev->on_dev_count, 0);
316         INIT_LIST_HEAD(&dev->blocked_cmd_list);
317         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
318         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
319         INIT_LIST_HEAD(&dev->threads_list);
320         init_waitqueue_head(&dev->on_dev_waitQ);
321         dev->dev_double_ua_possible = 1;
322         dev->dev_serialized = 1;
323         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
324         dev->dev_num = dev_num++;
325
326         *out_dev = dev;
327
328 out:
329         TRACE_EXIT_RES(res);
330         return res;
331 }
332
333 /* Called under scst_mutex and suspended activity */
334 void scst_free_device(struct scst_device *dev)
335 {
336         TRACE_ENTRY();
337
338 #ifdef CONFIG_SCST_EXTRACHECKS
339         if (!list_empty(&dev->dev_tgt_dev_list) ||
340             !list_empty(&dev->dev_acg_dev_list)) {
341                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
342                         "is not empty!", __func__);
343                 sBUG();
344         }
345 #endif
346
347         kfree(dev);
348
349         TRACE_EXIT();
350         return;
351 }
352
353 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
354 {
355         atomic_set(&mem_lim->alloced_pages, 0);
356         mem_lim->max_allowed_pages =
357                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
358 }
359 EXPORT_SYMBOL(scst_init_mem_lim);
360
361 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
362                                         struct scst_device *dev, uint64_t lun)
363 {
364         struct scst_acg_dev *res;
365
366         TRACE_ENTRY();
367
368 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
369         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
370 #else
371         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
372 #endif
373         if (res == NULL) {
374                 TRACE(TRACE_OUT_OF_MEM,
375                       "%s", "Allocation of scst_acg_dev failed");
376                 goto out;
377         }
378 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
379         memset(res, 0, sizeof(*res));
380 #endif
381
382         res->dev = dev;
383         res->acg = acg;
384         res->lun = lun;
385
386 out:
387         TRACE_EXIT_HRES(res);
388         return res;
389 }
390
391 /* The activity supposed to be suspended and scst_mutex held */
392 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
393 {
394         TRACE_ENTRY();
395
396         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
397                 acg_dev);
398         list_del(&acg_dev->acg_dev_list_entry);
399         list_del(&acg_dev->dev_acg_dev_list_entry);
400
401         kmem_cache_free(scst_acgd_cachep, acg_dev);
402
403         TRACE_EXIT();
404         return;
405 }
406
407 /* The activity supposed to be suspended and scst_mutex held */
408 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
409 {
410         struct scst_acg *acg;
411
412         TRACE_ENTRY();
413
414         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
415         if (acg == NULL) {
416                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
417                 goto out;
418         }
419
420         INIT_LIST_HEAD(&acg->acg_dev_list);
421         INIT_LIST_HEAD(&acg->acg_sess_list);
422         INIT_LIST_HEAD(&acg->acn_list);
423         acg->acg_name = acg_name;
424
425         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
426         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
427
428 out:
429         TRACE_EXIT_HRES(acg);
430         return acg;
431 }
432
433 /* The activity supposed to be suspended and scst_mutex held */
434 int scst_destroy_acg(struct scst_acg *acg)
435 {
436         struct scst_acn *n, *nn;
437         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
438         int res = 0;
439
440         TRACE_ENTRY();
441
442         if (!list_empty(&acg->acg_sess_list)) {
443                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
444                 res = -EBUSY;
445                 goto out;
446         }
447
448         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
449         list_del(&acg->scst_acg_list_entry);
450
451         /* Freeing acg_devs */
452         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
453                         acg_dev_list_entry) {
454                 struct scst_tgt_dev *tgt_dev, *tt;
455                 list_for_each_entry_safe(tgt_dev, tt,
456                                  &acg_dev->dev->dev_tgt_dev_list,
457                                  dev_tgt_dev_list_entry) {
458                         if (tgt_dev->acg_dev == acg_dev)
459                                 scst_free_tgt_dev(tgt_dev);
460                 }
461                 scst_free_acg_dev(acg_dev);
462         }
463
464         /* Freeing names */
465         list_for_each_entry_safe(n, nn, &acg->acn_list,
466                         acn_list_entry) {
467                 list_del(&n->acn_list_entry);
468                 kfree(n->name);
469                 kfree(n);
470         }
471         INIT_LIST_HEAD(&acg->acn_list);
472
473         kfree(acg);
474 out:
475         TRACE_EXIT_RES(res);
476         return res;
477 }
478
479 /*
480  * scst_mutex supposed to be held, there must not be parallel activity in this
481  * session.
482  */
483 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
484         struct scst_acg_dev *acg_dev)
485 {
486         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
487         struct scst_tgt_dev *tgt_dev;
488         struct scst_device *dev = acg_dev->dev;
489         struct list_head *sess_tgt_dev_list_head;
490         struct scst_tgt_template *vtt = sess->tgt->tgtt;
491         int rc, i;
492
493         TRACE_ENTRY();
494
495 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
496         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
497 #else
498         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
499 #endif
500         if (tgt_dev == NULL) {
501                 TRACE(TRACE_OUT_OF_MEM, "%s",
502                       "Allocation of scst_tgt_dev failed");
503                 goto out;
504         }
505 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
506         memset(tgt_dev, 0, sizeof(*tgt_dev));
507 #endif
508
509         tgt_dev->dev = dev;
510         tgt_dev->lun = acg_dev->lun;
511         tgt_dev->acg_dev = acg_dev;
512         tgt_dev->sess = sess;
513         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
514
515         scst_sgv_pool_use_norm(tgt_dev);
516
517         if (dev->scsi_dev != NULL) {
518                 ini_sg = dev->scsi_dev->host->sg_tablesize;
519                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
520                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
521                                 ENABLE_CLUSTERING);
522         } else {
523                 ini_sg = (1 << 15) /* infinite */;
524                 ini_unchecked_isa_dma = 0;
525                 ini_use_clustering = 0;
526         }
527         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
528
529         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
530             !sess->tgt->tgtt->no_clustering)
531                 scst_sgv_pool_use_norm_clust(tgt_dev);
532
533         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
534                 scst_sgv_pool_use_dma(tgt_dev);
535
536         if (dev->scsi_dev != NULL) {
537                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
538                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
539                       dev->scsi_dev->channel, dev->scsi_dev->id,
540                       dev->scsi_dev->lun,
541                       (long long unsigned int)tgt_dev->lun);
542         } else {
543                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
544                                dev->virt_name,
545                                (long long unsigned int)tgt_dev->lun);
546         }
547
548         spin_lock_init(&tgt_dev->tgt_dev_lock);
549         INIT_LIST_HEAD(&tgt_dev->UA_list);
550         spin_lock_init(&tgt_dev->thr_data_lock);
551         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
552         spin_lock_init(&tgt_dev->sn_lock);
553         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
554         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
555         tgt_dev->expected_sn = 1;
556         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
557         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
558         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
559                 atomic_set(&tgt_dev->sn_slots[i], 0);
560
561         if (dev->handler->parse_atomic &&
562             (sess->tgt->tgtt->preprocessing_done == NULL)) {
563                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
564                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
565                                 &tgt_dev->tgt_dev_flags);
566                 if (dev->handler->exec_atomic)
567                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
568                                 &tgt_dev->tgt_dev_flags);
569         }
570         if (dev->handler->exec_atomic) {
571                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
572                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
573                                 &tgt_dev->tgt_dev_flags);
574                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
575                                 &tgt_dev->tgt_dev_flags);
576                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
577                         &tgt_dev->tgt_dev_flags);
578         }
579         if (dev->handler->dev_done_atomic &&
580             sess->tgt->tgtt->xmit_response_atomic) {
581                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
582                         &tgt_dev->tgt_dev_flags);
583         }
584
585         spin_lock_bh(&scst_temp_UA_lock);
586         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
587                 SCST_LOAD_SENSE(scst_sense_reset_UA));
588         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
589         spin_unlock_bh(&scst_temp_UA_lock);
590
591         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
592
593         if (vtt->threads_num > 0) {
594                 rc = 0;
595                 if (dev->handler->threads_num > 0)
596                         rc = scst_add_dev_threads(dev, vtt->threads_num);
597                 else if (dev->handler->threads_num == 0)
598                         rc = scst_add_cmd_threads(vtt->threads_num);
599                 if (rc != 0)
600                         goto out_free;
601         }
602
603         if (dev->handler && dev->handler->attach_tgt) {
604                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
605                       tgt_dev);
606                 rc = dev->handler->attach_tgt(tgt_dev);
607                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
608                 if (rc != 0) {
609                         PRINT_ERROR("Device handler's %s attach_tgt() "
610                             "failed: %d", dev->handler->name, rc);
611                         goto out_thr_free;
612                 }
613         }
614
615         spin_lock_bh(&dev->dev_lock);
616         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
617         if (dev->dev_reserved)
618                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
619         spin_unlock_bh(&dev->dev_lock);
620
621         sess_tgt_dev_list_head =
622                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
623         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
624                       sess_tgt_dev_list_head);
625
626 out:
627         TRACE_EXIT();
628         return tgt_dev;
629
630 out_thr_free:
631         if (vtt->threads_num > 0) {
632                 if (dev->handler->threads_num > 0)
633                         scst_del_dev_threads(dev, vtt->threads_num);
634                 else if (dev->handler->threads_num == 0)
635                         scst_del_cmd_threads(vtt->threads_num);
636         }
637
638 out_free:
639         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
640         tgt_dev = NULL;
641         goto out;
642 }
643
644 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
645
646 /* No locks supposed to be held, scst_mutex - held */
647 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
648 {
649         TRACE_ENTRY();
650
651         scst_clear_reservation(tgt_dev);
652
653         /* With activity suspended the lock isn't needed, but let's be safe */
654         spin_lock_bh(&tgt_dev->tgt_dev_lock);
655         scst_free_all_UA(tgt_dev);
656         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
657
658         spin_lock_bh(&scst_temp_UA_lock);
659         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
660                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
661         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
662         spin_unlock_bh(&scst_temp_UA_lock);
663
664         TRACE_EXIT();
665         return;
666 }
667
668 /*
669  * scst_mutex supposed to be held, there must not be parallel activity in this
670  * session.
671  */
672 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
673 {
674         struct scst_device *dev = tgt_dev->dev;
675         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
676
677         TRACE_ENTRY();
678
679         tm_dbg_deinit_tgt_dev(tgt_dev);
680
681         spin_lock_bh(&dev->dev_lock);
682         list_del(&tgt_dev->dev_tgt_dev_list_entry);
683         spin_unlock_bh(&dev->dev_lock);
684
685         list_del(&tgt_dev->sess_tgt_dev_list_entry);
686
687         scst_clear_reservation(tgt_dev);
688         scst_free_all_UA(tgt_dev);
689
690         if (dev->handler && dev->handler->detach_tgt) {
691                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
692                       tgt_dev);
693                 dev->handler->detach_tgt(tgt_dev);
694                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
695         }
696
697         if (vtt->threads_num > 0) {
698                 if (dev->handler->threads_num > 0)
699                         scst_del_dev_threads(dev, vtt->threads_num);
700                 else if (dev->handler->threads_num == 0)
701                         scst_del_cmd_threads(vtt->threads_num);
702         }
703
704         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
705
706         TRACE_EXIT();
707         return;
708 }
709
710 /* scst_mutex supposed to be held */
711 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
712 {
713         int res = 0;
714         struct scst_acg_dev *acg_dev;
715         struct scst_tgt_dev *tgt_dev;
716
717         TRACE_ENTRY();
718
719         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
720                         acg_dev_list_entry) {
721                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
722                 if (tgt_dev == NULL) {
723                         res = -ENOMEM;
724                         goto out_free;
725                 }
726         }
727
728 out:
729         TRACE_EXIT();
730         return res;
731
732 out_free:
733         scst_sess_free_tgt_devs(sess);
734         goto out;
735 }
736
737 /*
738  * scst_mutex supposed to be held, there must not be parallel activity in this
739  * session.
740  */
741 void scst_sess_free_tgt_devs(struct scst_session *sess)
742 {
743         int i;
744         struct scst_tgt_dev *tgt_dev, *t;
745
746         TRACE_ENTRY();
747
748         /* The session is going down, no users, so no locks */
749         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
750                 struct list_head *sess_tgt_dev_list_head =
751                         &sess->sess_tgt_dev_list_hash[i];
752                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
753                                 sess_tgt_dev_list_entry) {
754                         scst_free_tgt_dev(tgt_dev);
755                 }
756                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
757         }
758
759         TRACE_EXIT();
760         return;
761 }
762
763 /* The activity supposed to be suspended and scst_mutex held */
764 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
765                      uint64_t lun, int read_only)
766 {
767         int res = 0;
768         struct scst_acg_dev *acg_dev;
769         struct scst_tgt_dev *tgt_dev;
770         struct scst_session *sess;
771         LIST_HEAD(tmp_tgt_dev_list);
772
773         TRACE_ENTRY();
774
775         INIT_LIST_HEAD(&tmp_tgt_dev_list);
776
777 #ifdef CONFIG_SCST_EXTRACHECKS
778         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
779                 if (acg_dev->dev == dev) {
780                         PRINT_ERROR("Device is already in group %s",
781                                 acg->acg_name);
782                         res = -EINVAL;
783                         goto out;
784                 }
785         }
786 #endif
787
788         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
789         if (acg_dev == NULL) {
790                 res = -ENOMEM;
791                 goto out;
792         }
793         acg_dev->rd_only_flag = read_only;
794
795         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
796                 acg_dev);
797         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
798         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
799
800         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
801                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
802                 if (tgt_dev == NULL) {
803                         res = -ENOMEM;
804                         goto out_free;
805                 }
806                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
807                               &tmp_tgt_dev_list);
808         }
809
810 out:
811         if (res == 0) {
812                 if (dev->virt_name != NULL) {
813                         PRINT_INFO("Added device %s to group %s (LUN %lld, "
814                                 "rd_only %d)", dev->virt_name, acg->acg_name,
815                                 (long long unsigned int)lun,
816                                 read_only);
817                 } else {
818                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
819                                 "%lld, rd_only %d)",
820                                 dev->scsi_dev->host->host_no,
821                                 dev->scsi_dev->channel, dev->scsi_dev->id,
822                                 dev->scsi_dev->lun, acg->acg_name,
823                                 (long long unsigned int)lun,
824                                 read_only);
825                 }
826         }
827
828         TRACE_EXIT_RES(res);
829         return res;
830
831 out_free:
832         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
833                          extra_tgt_dev_list_entry) {
834                 scst_free_tgt_dev(tgt_dev);
835         }
836         scst_free_acg_dev(acg_dev);
837         goto out;
838 }
839
840 /* The activity supposed to be suspended and scst_mutex held */
841 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
842 {
843         int res = 0;
844         struct scst_acg_dev *acg_dev = NULL, *a;
845         struct scst_tgt_dev *tgt_dev, *tt;
846
847         TRACE_ENTRY();
848
849         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
850                 if (a->dev == dev) {
851                         acg_dev = a;
852                         break;
853                 }
854         }
855
856         if (acg_dev == NULL) {
857                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
858                 res = -EINVAL;
859                 goto out;
860         }
861
862         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
863                          dev_tgt_dev_list_entry) {
864                 if (tgt_dev->acg_dev == acg_dev)
865                         scst_free_tgt_dev(tgt_dev);
866         }
867         scst_free_acg_dev(acg_dev);
868
869 out:
870         if (res == 0) {
871                 if (dev->virt_name != NULL) {
872                         PRINT_INFO("Removed device %s from group %s",
873                                 dev->virt_name, acg->acg_name);
874                 } else {
875                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
876                                 dev->scsi_dev->host->host_no,
877                                 dev->scsi_dev->channel, dev->scsi_dev->id,
878                                 dev->scsi_dev->lun, acg->acg_name);
879                 }
880         }
881
882         TRACE_EXIT_RES(res);
883         return res;
884 }
885
886 /* scst_mutex supposed to be held */
887 int scst_acg_add_name(struct scst_acg *acg, const char *name)
888 {
889         int res = 0;
890         struct scst_acn *n;
891         int len;
892         char *nm;
893
894         TRACE_ENTRY();
895
896         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
897         {
898                 if (strcmp(n->name, name) == 0) {
899                         PRINT_ERROR("Name %s already exists in group %s",
900                                 name, acg->acg_name);
901                         res = -EINVAL;
902                         goto out;
903                 }
904         }
905
906         n = kmalloc(sizeof(*n), GFP_KERNEL);
907         if (n == NULL) {
908                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
909                 res = -ENOMEM;
910                 goto out;
911         }
912
913         len = strlen(name);
914         nm = kmalloc(len + 1, GFP_KERNEL);
915         if (nm == NULL) {
916                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
917                 res = -ENOMEM;
918                 goto out_free;
919         }
920
921         strcpy(nm, name);
922         n->name = nm;
923
924         list_add_tail(&n->acn_list_entry, &acg->acn_list);
925
926 out:
927         if (res == 0)
928                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
929
930         TRACE_EXIT_RES(res);
931         return res;
932
933 out_free:
934         kfree(n);
935         goto out;
936 }
937
938 /* scst_mutex supposed to be held */
939 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
940 {
941         int res = -EINVAL;
942         struct scst_acn *n;
943
944         TRACE_ENTRY();
945
946         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
947         {
948                 if (strcmp(n->name, name) == 0) {
949                         list_del(&n->acn_list_entry);
950                         kfree(n->name);
951                         kfree(n);
952                         res = 0;
953                         break;
954                 }
955         }
956
957         if (res == 0) {
958                 PRINT_INFO("Removed name %s from group %s", name,
959                         acg->acg_name);
960         } else {
961                 PRINT_ERROR("Unable to find name %s in group %s", name,
962                         acg->acg_name);
963         }
964
965         TRACE_EXIT_RES(res);
966         return res;
967 }
968
969 static struct scst_cmd *scst_create_prepare_internal_cmd(
970         struct scst_cmd *orig_cmd, int bufsize)
971 {
972         struct scst_cmd *res;
973         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
974
975         TRACE_ENTRY();
976
977         res = scst_alloc_cmd(gfp_mask);
978         if (res == NULL)
979                 goto out;
980
981         res->cmd_lists = orig_cmd->cmd_lists;
982         res->sess = orig_cmd->sess;
983         res->atomic = scst_cmd_atomic(orig_cmd);
984         res->internal = 1;
985         res->tgtt = orig_cmd->tgtt;
986         res->tgt = orig_cmd->tgt;
987         res->dev = orig_cmd->dev;
988         res->tgt_dev = orig_cmd->tgt_dev;
989         res->lun = orig_cmd->lun;
990         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
991         res->data_direction = SCST_DATA_UNKNOWN;
992         res->orig_cmd = orig_cmd;
993         res->bufflen = bufsize;
994
995         res->state = SCST_CMD_STATE_PRE_PARSE;
996
997 out:
998         TRACE_EXIT_HRES((unsigned long)res);
999         return res;
1000 }
1001
1002 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1003 {
1004         TRACE_ENTRY();
1005
1006         __scst_cmd_put(cmd);
1007
1008         TRACE_EXIT();
1009         return;
1010 }
1011
1012 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1013 {
1014         int res = 0;
1015 #define sbuf_size 252
1016         static const uint8_t request_sense[6] =
1017             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1018         struct scst_cmd *rs_cmd;
1019
1020         TRACE_ENTRY();
1021
1022         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1023         if (rs_cmd == NULL)
1024                 goto out_error;
1025
1026         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1027         rs_cmd->cdb_len = sizeof(request_sense);
1028         rs_cmd->data_direction = SCST_DATA_READ;
1029         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1030         rs_cmd->expected_transfer_len = sbuf_size;
1031         rs_cmd->expected_values_set = 1;
1032
1033         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1034                 "cmd list ", rs_cmd);
1035         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1036         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1037         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1038         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1039
1040 out:
1041         TRACE_EXIT_RES(res);
1042         return res;
1043
1044 out_error:
1045         res = -1;
1046         goto out;
1047 #undef sbuf_size
1048 }
1049
1050 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1051 {
1052         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1053         uint8_t *buf;
1054         int len;
1055
1056         TRACE_ENTRY();
1057
1058         sBUG_ON(orig_cmd == NULL);
1059
1060         len = scst_get_buf_first(req_cmd, &buf);
1061
1062         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1063             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1064                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1065                         buf, len);
1066                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1067                         len);
1068         } else {
1069                 PRINT_ERROR("%s", "Unable to get the sense via "
1070                         "REQUEST SENSE, returning HARDWARE ERROR");
1071                 scst_set_cmd_error(orig_cmd,
1072                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1073         }
1074
1075         if (len > 0)
1076                 scst_put_buf(req_cmd, buf);
1077
1078         scst_free_internal_cmd(req_cmd);
1079
1080         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1081         return orig_cmd;
1082 }
1083
1084 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1085 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1086 {
1087         struct scsi_request *req;
1088
1089         TRACE_ENTRY();
1090
1091         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1092                 if (req) {
1093                         if (req->sr_bufflen)
1094                                 kfree(req->sr_buffer);
1095                         scsi_release_request(req);
1096                 }
1097         }
1098
1099         TRACE_EXIT();
1100         return;
1101 }
1102
1103 static void scst_send_release(struct scst_device *dev)
1104 {
1105         struct scsi_request *req;
1106         struct scsi_device *scsi_dev;
1107         uint8_t cdb[6];
1108
1109         TRACE_ENTRY();
1110
1111         if (dev->scsi_dev == NULL)
1112                 goto out;
1113
1114         scsi_dev = dev->scsi_dev;
1115
1116         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1117         if (req == NULL) {
1118                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1119                             "to RELEASE device %d:%d:%d:%d",
1120                             scsi_dev->host->host_no, scsi_dev->channel,
1121                             scsi_dev->id, scsi_dev->lun);
1122                 goto out;
1123         }
1124
1125         memset(cdb, 0, sizeof(cdb));
1126         cdb[0] = RELEASE;
1127         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1128             ((scsi_dev->lun << 5) & 0xe0) : 0;
1129         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1130         req->sr_cmd_len = sizeof(cdb);
1131         req->sr_data_direction = SCST_DATA_NONE;
1132         req->sr_use_sg = 0;
1133         req->sr_bufflen = 0;
1134         req->sr_buffer = NULL;
1135         req->sr_request->rq_disk = dev->rq_disk;
1136         req->sr_sense_buffer[0] = 0;
1137
1138         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1139                 "mid-level", req);
1140         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1141                     scst_req_done, 15, 3);
1142
1143 out:
1144         TRACE_EXIT();
1145         return;
1146 }
1147 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1148 static void scst_send_release(struct scst_device *dev)
1149 {
1150         struct scsi_device *scsi_dev;
1151         unsigned char cdb[6];
1152         unsigned char *sense;
1153         int rc, i;
1154
1155         TRACE_ENTRY();
1156
1157         if (dev->scsi_dev == NULL)
1158                 goto out;
1159
1160         /* We can't afford missing RELEASE due to memory shortage */
1161         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1162
1163         scsi_dev = dev->scsi_dev;
1164
1165         for (i = 0; i < 5; i++) {
1166                 memset(cdb, 0, sizeof(cdb));
1167                 cdb[0] = RELEASE;
1168                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1169                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1170
1171                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1172
1173                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1174                         "SCSI mid-level");
1175                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1176                                 sense, 15, 0, 0);
1177                 TRACE_DBG("MODE_SENSE done: %x", rc);
1178
1179                 if (scsi_status_is_good(rc)) {
1180                         break;
1181                 } else {
1182                         PRINT_ERROR("RELEASE failed: %d", rc);
1183                         PRINT_BUFFER("RELEASE sense", sense,
1184                                 SCST_SENSE_BUFFERSIZE);
1185                         scst_check_internal_sense(dev, rc,
1186                                         sense, SCST_SENSE_BUFFERSIZE);
1187                 }
1188         }
1189
1190         kfree(sense);
1191
1192 out:
1193         TRACE_EXIT();
1194         return;
1195 }
1196 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1197
1198 /* scst_mutex supposed to be held */
1199 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1200 {
1201         struct scst_device *dev = tgt_dev->dev;
1202         int release = 0;
1203
1204         TRACE_ENTRY();
1205
1206         spin_lock_bh(&dev->dev_lock);
1207         if (dev->dev_reserved &&
1208             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1209                 /* This is one who holds the reservation */
1210                 struct scst_tgt_dev *tgt_dev_tmp;
1211                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1212                                     dev_tgt_dev_list_entry) {
1213                         clear_bit(SCST_TGT_DEV_RESERVED,
1214                                     &tgt_dev_tmp->tgt_dev_flags);
1215                 }
1216                 dev->dev_reserved = 0;
1217                 release = 1;
1218         }
1219         spin_unlock_bh(&dev->dev_lock);
1220
1221         if (release)
1222                 scst_send_release(dev);
1223
1224         TRACE_EXIT();
1225         return;
1226 }
1227
1228 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1229         const char *initiator_name)
1230 {
1231         struct scst_session *sess;
1232         int i;
1233         int len;
1234         char *nm;
1235
1236         TRACE_ENTRY();
1237
1238 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1239         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1240 #else
1241         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1242 #endif
1243         if (sess == NULL) {
1244                 TRACE(TRACE_OUT_OF_MEM, "%s",
1245                       "Allocation of scst_session failed");
1246                 goto out;
1247         }
1248 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1249         memset(sess, 0, sizeof(*sess));
1250 #endif
1251
1252         sess->init_phase = SCST_SESS_IPH_INITING;
1253         sess->shut_phase = SCST_SESS_SPH_READY;
1254         atomic_set(&sess->refcnt, 0);
1255         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1256                 struct list_head *sess_tgt_dev_list_head =
1257                          &sess->sess_tgt_dev_list_hash[i];
1258                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1259         }
1260         spin_lock_init(&sess->sess_list_lock);
1261         INIT_LIST_HEAD(&sess->search_cmd_list);
1262         sess->tgt = tgt;
1263         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1264         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1265
1266 #ifdef CONFIG_SCST_MEASURE_LATENCY
1267         spin_lock_init(&sess->meas_lock);
1268 #endif
1269
1270         len = strlen(initiator_name);
1271         nm = kmalloc(len + 1, gfp_mask);
1272         if (nm == NULL) {
1273                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1274                 goto out_free;
1275         }
1276
1277         strcpy(nm, initiator_name);
1278         sess->initiator_name = nm;
1279
1280 out:
1281         TRACE_EXIT();
1282         return sess;
1283
1284 out_free:
1285         kmem_cache_free(scst_sess_cachep, sess);
1286         sess = NULL;
1287         goto out;
1288 }
1289
1290 void scst_free_session(struct scst_session *sess)
1291 {
1292         TRACE_ENTRY();
1293
1294         mutex_lock(&scst_mutex);
1295
1296         TRACE_DBG("Removing sess %p from the list", sess);
1297         list_del(&sess->sess_list_entry);
1298         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1299         list_del(&sess->acg_sess_list_entry);
1300
1301         scst_sess_free_tgt_devs(sess);
1302
1303         wake_up_all(&sess->tgt->unreg_waitQ);
1304
1305         mutex_unlock(&scst_mutex);
1306
1307         kfree(sess->initiator_name);
1308         kmem_cache_free(scst_sess_cachep, sess);
1309
1310         TRACE_EXIT();
1311         return;
1312 }
1313
1314 void scst_free_session_callback(struct scst_session *sess)
1315 {
1316         struct completion *c;
1317
1318         TRACE_ENTRY();
1319
1320         TRACE_DBG("Freeing session %p", sess);
1321
1322         c = sess->shutdown_compl;
1323
1324         if (sess->unreg_done_fn) {
1325                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1326                 sess->unreg_done_fn(sess);
1327                 TRACE_DBG("%s", "unreg_done_fn() returned");
1328         }
1329         scst_free_session(sess);
1330
1331         if (c)
1332                 complete_all(c);
1333
1334         TRACE_EXIT();
1335         return;
1336 }
1337
1338 void scst_sched_session_free(struct scst_session *sess)
1339 {
1340         unsigned long flags;
1341
1342         TRACE_ENTRY();
1343
1344         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1345                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1346                         "shut phase %lx", sess, sess->shut_phase);
1347                 sBUG();
1348         }
1349
1350         spin_lock_irqsave(&scst_mgmt_lock, flags);
1351         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1352         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1353         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1354
1355         wake_up(&scst_mgmt_waitQ);
1356
1357         TRACE_EXIT();
1358         return;
1359 }
1360
1361 void scst_cmd_get(struct scst_cmd *cmd)
1362 {
1363         __scst_cmd_get(cmd);
1364 }
1365 EXPORT_SYMBOL(scst_cmd_get);
1366
1367 void scst_cmd_put(struct scst_cmd *cmd)
1368 {
1369         __scst_cmd_put(cmd);
1370 }
1371 EXPORT_SYMBOL(scst_cmd_put);
1372
1373 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1374 {
1375         struct scst_cmd *cmd;
1376
1377         TRACE_ENTRY();
1378
1379 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1380         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1381 #else
1382         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1383 #endif
1384         if (cmd == NULL) {
1385                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1386                 goto out;
1387         }
1388 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1389         memset(cmd, 0, sizeof(*cmd));
1390 #endif
1391
1392         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1393         cmd->start_time = jiffies;
1394         atomic_set(&cmd->cmd_ref, 1);
1395         cmd->cmd_lists = &scst_main_cmd_lists;
1396         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1397         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1398         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1399         cmd->retries = 0;
1400         cmd->data_len = -1;
1401         cmd->is_send_status = 1;
1402         cmd->resp_data_len = -1;
1403
1404         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1405         cmd->dbl_ua_orig_resp_data_len = -1;
1406
1407 out:
1408         TRACE_EXIT();
1409         return cmd;
1410 }
1411
1412 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1413 {
1414         scst_sess_put(cmd->sess);
1415
1416         /*
1417          * At this point tgt_dev can be dead, but the pointer remains non-NULL
1418          */
1419         if (likely(cmd->tgt_dev != NULL))
1420                 __scst_put();
1421
1422         scst_destroy_cmd(cmd);
1423         return;
1424 }
1425
1426 /* No locks supposed to be held */
1427 void scst_free_cmd(struct scst_cmd *cmd)
1428 {
1429         int destroy = 1;
1430
1431         TRACE_ENTRY();
1432
1433         TRACE_DBG("Freeing cmd %p (tag %llu)",
1434                   cmd, (long long unsigned int)cmd->tag);
1435
1436         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1437                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1438                         cmd, atomic_read(&scst_cmd_count));
1439         }
1440
1441         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1442                 cmd->dec_on_dev_needed);
1443
1444 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1445 #if defined(CONFIG_SCST_EXTRACHECKS)
1446         if (cmd->scsi_req) {
1447                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1448                         "scsi_req!");
1449                 scst_release_request(cmd);
1450         }
1451 #endif
1452 #endif
1453
1454         /*
1455          * Target driver can already free sg buffer before calling
1456          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
1457          */
1458         if (!cmd->tgt_data_buf_alloced)
1459                 scst_check_restore_sg_buff(cmd);
1460
1461         if (unlikely(cmd->internal)) {
1462                 if (cmd->bufflen > 0)
1463                         scst_release_space(cmd);
1464                 scst_destroy_cmd(cmd);
1465                 goto out;
1466         }
1467
1468         if (cmd->tgtt->on_free_cmd != NULL) {
1469                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1470                 cmd->tgtt->on_free_cmd(cmd);
1471                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1472         }
1473
1474         if (likely(cmd->dev != NULL)) {
1475                 struct scst_dev_type *handler = cmd->dev->handler;
1476                 if (handler->on_free_cmd != NULL) {
1477                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1478                               handler->name, cmd);
1479                         handler->on_free_cmd(cmd);
1480                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1481                                 handler->name);
1482                 }
1483         }
1484
1485         scst_release_space(cmd);
1486
1487         if (unlikely(cmd->sense != NULL)) {
1488                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1489                 mempool_free(cmd->sense, scst_sense_mempool);
1490                 cmd->sense = NULL;
1491         }
1492
1493         if (likely(cmd->tgt_dev != NULL)) {
1494 #ifdef CONFIG_SCST_EXTRACHECKS
1495                 if (unlikely(!cmd->sent_for_exec)) {
1496                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1497                             "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1498                             cmd, cmd->cdb[0], cmd->tgtt->name,
1499                             (long long unsigned int)cmd->lun,
1500                             cmd->sn, cmd->tgt_dev->expected_sn);
1501                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1502                 }
1503 #endif
1504
1505                 if (unlikely(cmd->out_of_sn)) {
1506                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1507                                 "destroy=%d", cmd,
1508                                 (long long unsigned int)cmd->tag,
1509                                 cmd->sn, destroy);
1510                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1511                                         &cmd->cmd_flags);
1512                 }
1513         }
1514
1515         if (likely(destroy))
1516                 scst_destroy_put_cmd(cmd);
1517
1518 out:
1519         TRACE_EXIT();
1520         return;
1521 }
1522
1523 /* No locks supposed to be held. */
1524 void scst_check_retries(struct scst_tgt *tgt)
1525 {
1526         int need_wake_up = 0;
1527
1528         TRACE_ENTRY();
1529
1530         /*
1531          * We don't worry about overflow of finished_cmds, because we check
1532          * only for its change
1533          */
1534         atomic_inc(&tgt->finished_cmds);
1535         smp_mb__after_atomic_inc();
1536         if (unlikely(tgt->retry_cmds > 0)) {
1537                 struct scst_cmd *c, *tc;
1538                 unsigned long flags;
1539
1540                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1541                       tgt->retry_cmds);
1542
1543                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1544                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1545                                 cmd_list_entry)
1546                 {
1547                         tgt->retry_cmds--;
1548
1549                         TRACE_RETRY("Moving retry cmd %p to head of active "
1550                                 "cmd list (retry_cmds left %d)",
1551                                 c, tgt->retry_cmds);
1552                         spin_lock(&c->cmd_lists->cmd_list_lock);
1553                         list_move(&c->cmd_list_entry,
1554                                   &c->cmd_lists->active_cmd_list);
1555                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1556                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1557
1558                         need_wake_up++;
1559                         if (need_wake_up >= 2) /* "slow start" */
1560                                 break;
1561                 }
1562                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1563         }
1564
1565         TRACE_EXIT();
1566         return;
1567 }
1568
1569 void scst_tgt_retry_timer_fn(unsigned long arg)
1570 {
1571         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1572         unsigned long flags;
1573
1574         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1575
1576         spin_lock_irqsave(&tgt->tgt_lock, flags);
1577         tgt->retry_timer_active = 0;
1578         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1579
1580         scst_check_retries(tgt);
1581
1582         TRACE_EXIT();
1583         return;
1584 }
1585
1586 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
1587 {
1588         struct scst_mgmt_cmd *mcmd;
1589
1590         TRACE_ENTRY();
1591
1592         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1593         if (mcmd == NULL) {
1594                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1595                         "failed, some commands and their data could leak");
1596                 goto out;
1597         }
1598         memset(mcmd, 0, sizeof(*mcmd));
1599
1600 out:
1601         TRACE_EXIT();
1602         return mcmd;
1603 }
1604
1605 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1606 {
1607         unsigned long flags;
1608
1609         TRACE_ENTRY();
1610
1611         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1612         atomic_dec(&mcmd->sess->sess_cmd_count);
1613         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1614
1615         scst_sess_put(mcmd->sess);
1616
1617         if (mcmd->mcmd_tgt_dev != NULL)
1618                 __scst_put();
1619
1620         mempool_free(mcmd, scst_mgmt_mempool);
1621
1622         TRACE_EXIT();
1623         return;
1624 }
1625
1626 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1627 int scst_alloc_request(struct scst_cmd *cmd)
1628 {
1629         int res = 0;
1630         struct scsi_request *req;
1631         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1632
1633         TRACE_ENTRY();
1634
1635         /* cmd->dev->scsi_dev must be non-NULL here */
1636         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1637         if (req == NULL) {
1638                 TRACE(TRACE_OUT_OF_MEM, "%s",
1639                       "Allocation of scsi_request failed");
1640                 res = -ENOMEM;
1641                 goto out;
1642         }
1643
1644         cmd->scsi_req = req;
1645
1646         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1647         req->sr_cmd_len = cmd->cdb_len;
1648         req->sr_data_direction = cmd->data_direction;
1649         req->sr_use_sg = cmd->sg_cnt;
1650         req->sr_bufflen = cmd->bufflen;
1651         req->sr_buffer = cmd->sg;
1652         req->sr_request->rq_disk = cmd->dev->rq_disk;
1653         req->sr_sense_buffer[0] = 0;
1654
1655         cmd->scsi_req->upper_private_data = cmd;
1656
1657 out:
1658         TRACE_EXIT();
1659         return res;
1660 }
1661
1662 void scst_release_request(struct scst_cmd *cmd)
1663 {
1664         scsi_release_request(cmd->scsi_req);
1665         cmd->scsi_req = NULL;
1666 }
1667 #endif
1668
1669 int scst_alloc_space(struct scst_cmd *cmd)
1670 {
1671         gfp_t gfp_mask;
1672         int res = -ENOMEM;
1673         int atomic = scst_cmd_atomic(cmd);
1674         int flags;
1675         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1676
1677         TRACE_ENTRY();
1678
1679         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1680
1681         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1682         if (cmd->no_sgv)
1683                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1684
1685         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1686                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1687         if (cmd->sg == NULL)
1688                 goto out;
1689
1690         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1691                 static int ll;
1692                 if (ll < 10) {
1693                         PRINT_INFO("Unable to complete command due to "
1694                                 "SG IO count limitation (requested %d, "
1695                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1696                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1697                         ll++;
1698                 }
1699                 goto out_sg_free;
1700         }
1701
1702         res = 0;
1703
1704 out:
1705         TRACE_EXIT();
1706         return res;
1707
1708 out_sg_free:
1709         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1710         cmd->sgv = NULL;
1711         cmd->sg = NULL;
1712         cmd->sg_cnt = 0;
1713         goto out;
1714 }
1715
1716 void scst_release_space(struct scst_cmd *cmd)
1717 {
1718         TRACE_ENTRY();
1719
1720         if (cmd->sgv == NULL)
1721                 goto out;
1722
1723         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
1724                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
1725                 goto out;
1726         }
1727
1728         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1729
1730         cmd->sgv = NULL;
1731         cmd->sg_cnt = 0;
1732         cmd->sg = NULL;
1733         cmd->bufflen = 0;
1734         cmd->data_len = 0;
1735
1736 out:
1737         TRACE_EXIT();
1738         return;
1739 }
1740
1741 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
1742 {
1743         struct scatterlist *src_sg, *dst_sg;
1744         unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
1745         struct page *src, *dst;
1746         unsigned int s, d, to_copy;
1747
1748         TRACE_ENTRY();
1749
1750         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
1751                 src_sg = cmd->tgt_sg;
1752                 src_sg_cnt = cmd->tgt_sg_cnt;
1753                 dst_sg = cmd->sg;
1754                 to_copy = cmd->bufflen;
1755         } else {
1756                 src_sg = cmd->sg;
1757                 src_sg_cnt = cmd->sg_cnt;
1758                 dst_sg = cmd->tgt_sg;
1759                 to_copy = cmd->resp_data_len;
1760         }
1761
1762         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
1763                 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
1764                 to_copy);
1765
1766         dst = sg_page(dst_sg);
1767         dst_len = dst_sg->length;
1768         dst_offs = dst_sg->offset;
1769
1770         s = 0;
1771         d = 0;
1772         src_offs = 0;
1773         while (s < src_sg_cnt) {
1774                 src = sg_page(&src_sg[s]);
1775                 src_len = src_sg[s].length;
1776                 src_offs += src_sg[s].offset;
1777
1778                 do {
1779                         unsigned int n;
1780
1781                         /*
1782                          * Himem pages are not allowed here, see the
1783                          * corresponding #warning in scst_main.c. Correct
1784                          * your target driver or dev handler to not alloc
1785                          * such pages!
1786                          */
1787                         EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
1788                                            PageHighMem(src));
1789
1790                         TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
1791                                 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
1792                                 cmd, to_copy, src, src_len, src_offs, dst,
1793                                 dst_len, dst_offs);
1794
1795                         if ((src_offs == 0) && (dst_offs == 0) &&
1796                             (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
1797                                 copy_page(page_address(dst), page_address(src));
1798                                 n = PAGE_SIZE;
1799                         } else {
1800                                 n = min(PAGE_SIZE - dst_offs,
1801                                         PAGE_SIZE - src_offs);
1802                                 n = min(n, src_len);
1803                                 n = min(n, dst_len);
1804                                 memcpy(page_address(dst) + dst_offs,
1805                                        page_address(src) + src_offs, n);
1806                                 dst_offs -= min(n, dst_offs);
1807                                 src_offs -= min(n, src_offs);
1808                         }
1809
1810                         TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
1811
1812                         to_copy -= n;
1813                         if (to_copy <= 0)
1814                                 goto out;
1815
1816                         src_len -= n;
1817                         dst_len -= n;
1818                         if (dst_len == 0) {
1819                                 d++;
1820                                 dst = sg_page(&dst_sg[d]);
1821                                 dst_len = dst_sg[d].length;
1822                                 dst_offs += dst_sg[d].offset;
1823                         }
1824                 } while (src_len > 0);
1825
1826                 s++;
1827         }
1828
1829 out:
1830         TRACE_EXIT();
1831         return;
1832 }
1833
1834 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1835
1836 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1837 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1838
1839 int scst_get_cdb_len(const uint8_t *cdb)
1840 {
1841         return SCST_GET_CDB_LEN(cdb[0]);
1842 }
1843
1844 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1845
1846 /* for special commands */
1847 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1848 {
1849         cmd->bufflen = 6;
1850         return 0;
1851 }
1852
1853 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1854 {
1855         cmd->bufflen = READ_CAP_LEN;
1856         return 0;
1857 }
1858
1859 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1860 {
1861         cmd->bufflen = 1;
1862         return 0;
1863 }
1864
1865 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1866 {
1867         uint8_t *p = (uint8_t *)cmd->cdb + off;
1868         int res = 0;
1869
1870         cmd->bufflen = 0;
1871         cmd->bufflen |= ((u32)p[0]) << 8;
1872         cmd->bufflen |= ((u32)p[1]);
1873
1874         switch (cmd->cdb[1] & 0x1f) {
1875         case 0:
1876         case 1:
1877         case 6:
1878                 if (cmd->bufflen != 0) {
1879                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1880                                 "allocation length for service action %x",
1881                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1882                         goto out_inval;
1883                 }
1884                 break;
1885         }
1886
1887         switch (cmd->cdb[1] & 0x1f) {
1888         case 0:
1889         case 1:
1890                 cmd->bufflen = 20;
1891                 break;
1892         case 6:
1893                 cmd->bufflen = 32;
1894                 break;
1895         case 8:
1896                 cmd->bufflen = max(28, cmd->bufflen);
1897                 break;
1898         default:
1899                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1900                         cmd->cdb[1] & 0x1f);
1901                 goto out_inval;
1902         }
1903
1904 out:
1905         return res;
1906
1907 out_inval:
1908         scst_set_cmd_error(cmd,
1909                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1910         res = 1;
1911         goto out;
1912 }
1913
1914 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1915 {
1916         cmd->bufflen = (u32)cmd->cdb[off];
1917         return 0;
1918 }
1919
1920 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1921 {
1922         cmd->bufflen = (u32)cmd->cdb[off];
1923         if (cmd->bufflen == 0)
1924                 cmd->bufflen = 256;
1925         return 0;
1926 }
1927
1928 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1929 {
1930         const uint8_t *p = cmd->cdb + off;
1931
1932         cmd->bufflen = 0;
1933         cmd->bufflen |= ((u32)p[0]) << 8;
1934         cmd->bufflen |= ((u32)p[1]);
1935
1936         return 0;
1937 }
1938
1939 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1940 {
1941         const uint8_t *p = cmd->cdb + off;
1942
1943         cmd->bufflen = 0;
1944         cmd->bufflen |= ((u32)p[0]) << 16;
1945         cmd->bufflen |= ((u32)p[1]) << 8;
1946         cmd->bufflen |= ((u32)p[2]);
1947
1948         return 0;
1949 }
1950
1951 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1952 {
1953         const uint8_t *p = cmd->cdb + off;
1954
1955         cmd->bufflen = 0;
1956         cmd->bufflen |= ((u32)p[0]) << 24;
1957         cmd->bufflen |= ((u32)p[1]) << 16;
1958         cmd->bufflen |= ((u32)p[2]) << 8;
1959         cmd->bufflen |= ((u32)p[3]);
1960
1961         return 0;
1962 }
1963
1964 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1965 {
1966         cmd->bufflen = 0;
1967         return 0;
1968 }
1969
1970 int scst_get_cdb_info(struct scst_cmd *cmd)
1971 {
1972         int dev_type = cmd->dev->handler->type;
1973         int i, res = 0;
1974         uint8_t op;
1975         const struct scst_sdbops *ptr = NULL;
1976
1977         TRACE_ENTRY();
1978
1979         op = cmd->cdb[0];       /* get clear opcode */
1980
1981         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1982                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1983                 dev_type);
1984
1985         i = scst_scsi_op_list[op];
1986         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1987                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1988                         ptr = &scst_scsi_op_table[i];
1989                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1990                               ptr->ops, ptr->devkey[0], /* disk     */
1991                               ptr->devkey[1],   /* tape     */
1992                               ptr->devkey[2],   /* printer */
1993                               ptr->devkey[3],   /* cpu      */
1994                               ptr->devkey[4],   /* cdr      */
1995                               ptr->devkey[5],   /* cdrom    */
1996                               ptr->devkey[6],   /* scanner */
1997                               ptr->devkey[7],   /* worm     */
1998                               ptr->devkey[8],   /* changer */
1999                               ptr->devkey[9],   /* commdev */
2000                               ptr->op_name);
2001                         TRACE_DBG("direction=%d flags=%d off=%d",
2002                               ptr->direction,
2003                               ptr->flags,
2004                               ptr->off);
2005                         break;
2006                 }
2007                 i++;
2008         }
2009
2010         if (ptr == NULL) {
2011                 /* opcode not found or now not used !!! */
2012                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
2013                       dev_type);
2014                 res = -1;
2015                 cmd->op_flags = SCST_INFO_INVALID;
2016                 goto out;
2017         }
2018
2019         cmd->cdb_len = SCST_GET_CDB_LEN(op);
2020         cmd->op_name = ptr->op_name;
2021         cmd->data_direction = ptr->direction;
2022         cmd->op_flags = ptr->flags;
2023         res = (*ptr->get_trans_len)(cmd, ptr->off);
2024
2025         if (cmd->bufflen == 0) {
2026                 /*
2027                  * According to SPC bufflen 0 for data transfer commands isn't
2028                  * an error, so we need to fix the transfer direction.
2029                  */
2030                 cmd->data_direction = SCST_DATA_NONE;
2031         }
2032
2033 out:
2034         TRACE_EXIT();
2035         return res;
2036 }
2037 EXPORT_SYMBOL(scst_get_cdb_info);
2038
2039 /*
2040  * Routine to extract a lun number from an 8-byte LUN structure
2041  * in network byte order (BE).
2042  * (see SAM-2, Section 4.12.3 page 40)
2043  * Supports 2 types of lun unpacking: peripheral and logical unit.
2044  */
2045 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2046 {
2047         uint64_t res = NO_SUCH_LUN;
2048         int address_method;
2049
2050         TRACE_ENTRY();
2051
2052         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2053
2054         if (unlikely(len < 2)) {
2055                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2056                         "more", len);
2057                 goto out;
2058         }
2059
2060         if (len > 2) {
2061                 switch (len) {
2062                 case 8:
2063                         if ((*((uint64_t *)lun) &
2064                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2065                                 goto out_err;
2066                         break;
2067                 case 4:
2068                         if (*((uint16_t *)&lun[2]) != 0)
2069                                 goto out_err;
2070                         break;
2071                 case 6:
2072                         if (*((uint32_t *)&lun[2]) != 0)
2073                                 goto out_err;
2074                         break;
2075                 default:
2076                         goto out_err;
2077                 }
2078         }
2079
2080         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
2081         switch (address_method) {
2082         case 0: /* peripheral device addressing method */
2083 #if 0
2084                 if (*lun) {
2085                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2086                              "peripheral device addressing method 0x%02x, "
2087                              "expected 0", *lun);
2088                         break;
2089                 }
2090                 res = *(lun + 1);
2091                 break;
2092 #else
2093                 /*
2094                  * Looks like it's legal to use it as flat space addressing
2095                  * method as well
2096                  */
2097
2098                 /* go through */
2099 #endif
2100
2101         case 1: /* flat space addressing method */
2102                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2103                 break;
2104
2105         case 2: /* logical unit addressing method */
2106                 if (*lun & 0x3f) {
2107                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2108                                     "addressing method 0x%02x, expected 0",
2109                                     *lun & 0x3f);
2110                         break;
2111                 }
2112                 if (*(lun + 1) & 0xe0) {
2113                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
2114                                     "addressing method 0x%02x, expected 0",
2115                                     (*(lun + 1) & 0xf8) >> 5);
2116                         break;
2117                 }
2118                 res = *(lun + 1) & 0x1f;
2119                 break;
2120
2121         case 3: /* extended logical unit addressing method */
2122         default:
2123                 PRINT_ERROR("Unimplemented LUN addressing method %u",
2124                             address_method);
2125                 break;
2126         }
2127
2128 out:
2129         TRACE_EXIT_RES((int)res);
2130         return res;
2131
2132 out_err:
2133         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2134         goto out;
2135 }
2136
2137 int scst_calc_block_shift(int sector_size)
2138 {
2139         int block_shift = 0;
2140         int t;
2141
2142         if (sector_size == 0)
2143                 sector_size = 512;
2144
2145         t = sector_size;
2146         while (1) {
2147                 if ((t & 1) != 0)
2148                         break;
2149                 t >>= 1;
2150                 block_shift++;
2151         }
2152         if (block_shift < 9) {
2153                 PRINT_ERROR("Wrong sector size %d", sector_size);
2154                 block_shift = -1;
2155         }
2156
2157         TRACE_EXIT_RES(block_shift);
2158         return block_shift;
2159 }
2160 EXPORT_SYMBOL(scst_calc_block_shift);
2161
2162 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2163         int (*get_block_shift)(struct scst_cmd *cmd))
2164 {
2165         int res = 0;
2166
2167         TRACE_ENTRY();
2168
2169         /*
2170          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2171          * therefore change them only if necessary
2172          */
2173
2174         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2175               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2176
2177         switch (cmd->cdb[0]) {
2178         case SERVICE_ACTION_IN:
2179                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2180                         cmd->bufflen = READ_CAP16_LEN;
2181                         cmd->data_direction = SCST_DATA_READ;
2182                 }
2183                 break;
2184         case VERIFY_6:
2185         case VERIFY:
2186         case VERIFY_12:
2187         case VERIFY_16:
2188                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2189                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2190                         cmd->bufflen = 0;
2191                         goto set_timeout;
2192                 } else
2193                         cmd->data_len = 0;
2194                 break;
2195         default:
2196                 /* It's all good */
2197                 break;
2198         }
2199
2200         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2201                 /*
2202                  * No need for locks here, since *_detach() can not be
2203                  * called, when there are existing commands.
2204                  */
2205                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2206         }
2207
2208 set_timeout:
2209         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2210                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2211         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2212                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2213         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2214                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2215
2216         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2217               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2218
2219         TRACE_EXIT_RES(res);
2220         return res;
2221 }
2222 EXPORT_SYMBOL(scst_sbc_generic_parse);
2223
2224 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2225         int (*get_block_shift)(struct scst_cmd *cmd))
2226 {
2227         int res = 0;
2228
2229         TRACE_ENTRY();
2230
2231         /*
2232          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2233          * therefore change them only if necessary
2234          */
2235
2236         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2237               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2238
2239         cmd->cdb[1] &= 0x1f;
2240
2241         switch (cmd->cdb[0]) {
2242         case VERIFY_6:
2243         case VERIFY:
2244         case VERIFY_12:
2245         case VERIFY_16:
2246                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2247                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2248                         cmd->bufflen = 0;
2249                         goto set_timeout;
2250                 }
2251                 break;
2252         default:
2253                 /* It's all good */
2254                 break;
2255         }
2256
2257         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2258                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2259
2260 set_timeout:
2261         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2262                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2263         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2264                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2265         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2266                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2267
2268         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2269                 cmd->data_direction);
2270
2271         TRACE_EXIT();
2272         return res;
2273 }
2274 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2275
2276 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2277         int (*get_block_shift)(struct scst_cmd *cmd))
2278 {
2279         int res = 0;
2280
2281         TRACE_ENTRY();
2282
2283         /*
2284          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2285          * therefore change them only if necessary
2286          */
2287
2288         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2289               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2290
2291         cmd->cdb[1] &= 0x1f;
2292
2293         switch (cmd->cdb[0]) {
2294         case VERIFY_6:
2295         case VERIFY:
2296         case VERIFY_12:
2297         case VERIFY_16:
2298                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2299                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2300                         cmd->bufflen = 0;
2301                         goto set_timeout;
2302                 }
2303                 break;
2304         default:
2305                 /* It's all good */
2306                 break;
2307         }
2308
2309         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2310                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2311
2312 set_timeout:
2313         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2314                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2315         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2316                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2317         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2318                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2319
2320         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2321                 cmd->data_direction);
2322
2323         TRACE_EXIT_RES(res);
2324         return res;
2325 }
2326 EXPORT_SYMBOL(scst_modisk_generic_parse);
2327
2328 int scst_tape_generic_parse(struct scst_cmd *cmd,
2329         int (*get_block_size)(struct scst_cmd *cmd))
2330 {
2331         int res = 0;
2332
2333         TRACE_ENTRY();
2334
2335         /*
2336          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2337          * therefore change them only if necessary
2338          */
2339
2340         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2341               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2342
2343         if (cmd->cdb[0] == READ_POSITION) {
2344                 int tclp = cmd->cdb[1] & TCLP_BIT;
2345                 int long_bit = cmd->cdb[1] & LONG_BIT;
2346                 int bt = cmd->cdb[1] & BT_BIT;
2347
2348                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2349                         cmd->bufflen =
2350                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2351                         cmd->data_direction = SCST_DATA_READ;
2352                 } else {
2353                         cmd->bufflen = 0;
2354                         cmd->data_direction = SCST_DATA_NONE;
2355                 }
2356         }
2357
2358         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2359                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2360
2361         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2362                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2363         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2364                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2365         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2366                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2367
2368         TRACE_EXIT_RES(res);
2369         return res;
2370 }
2371 EXPORT_SYMBOL(scst_tape_generic_parse);
2372
2373 static int scst_null_parse(struct scst_cmd *cmd)
2374 {
2375         int res = 0;
2376
2377         TRACE_ENTRY();
2378
2379         /*
2380          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2381          * therefore change them only if necessary
2382          */
2383
2384         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2385               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2386 #if 0
2387         switch (cmd->cdb[0]) {
2388         default:
2389                 /* It's all good */
2390                 break;
2391         }
2392 #endif
2393         TRACE_DBG("res %d bufflen %d direct %d",
2394               res, cmd->bufflen, cmd->data_direction);
2395
2396         TRACE_EXIT();
2397         return res;
2398 }
2399
2400 int scst_changer_generic_parse(struct scst_cmd *cmd,
2401         int (*nothing)(struct scst_cmd *cmd))
2402 {
2403         int res = scst_null_parse(cmd);
2404
2405         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2406                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2407         else
2408                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2409
2410         return res;
2411 }
2412 EXPORT_SYMBOL(scst_changer_generic_parse);
2413
2414 int scst_processor_generic_parse(struct scst_cmd *cmd,
2415         int (*nothing)(struct scst_cmd *cmd))
2416 {
2417         int res = scst_null_parse(cmd);
2418
2419         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2420                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2421         else
2422                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2423
2424         return res;
2425 }
2426 EXPORT_SYMBOL(scst_processor_generic_parse);
2427
2428 int scst_raid_generic_parse(struct scst_cmd *cmd,
2429         int (*nothing)(struct scst_cmd *cmd))
2430 {
2431         int res = scst_null_parse(cmd);
2432
2433         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2434                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2435         else
2436                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2437
2438         return res;
2439 }
2440 EXPORT_SYMBOL(scst_raid_generic_parse);
2441
2442 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2443         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2444 {
2445         int opcode = cmd->cdb[0];
2446         int status = cmd->status;
2447         int res = SCST_CMD_STATE_DEFAULT;
2448
2449         TRACE_ENTRY();
2450
2451         /*
2452          * SCST sets good defaults for cmd->is_send_status and
2453          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2454          * therefore change them only if necessary
2455          */
2456
2457         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2458                 switch (opcode) {
2459                 case READ_CAPACITY:
2460                 {
2461                         /* Always keep track of disk capacity */
2462                         int buffer_size, sector_size, sh;
2463                         uint8_t *buffer;
2464
2465                         buffer_size = scst_get_buf_first(cmd, &buffer);
2466                         if (unlikely(buffer_size <= 0)) {
2467                                 if (buffer_size < 0) {
2468                                         PRINT_ERROR("%s: Unable to get the"
2469                                         " buffer (%d)", __func__, buffer_size);
2470                                 }
2471                                 goto out;
2472                         }
2473
2474                         sector_size =
2475                             ((buffer[4] << 24) | (buffer[5] << 16) |
2476                              (buffer[6] << 8) | (buffer[7] << 0));
2477                         scst_put_buf(cmd, buffer);
2478                         if (sector_size != 0)
2479                                 sh = scst_calc_block_shift(sector_size);
2480                         else
2481                                 sh = 0;
2482                         set_block_shift(cmd, sh);
2483                         TRACE_DBG("block_shift %d", sh);
2484                         break;
2485                 }
2486                 default:
2487                         /* It's all good */
2488                         break;
2489                 }
2490         }
2491
2492         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2493               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2494
2495 out:
2496         TRACE_EXIT_RES(res);
2497         return res;
2498 }
2499 EXPORT_SYMBOL(scst_block_generic_dev_done);
2500
2501 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2502         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2503 {
2504         int opcode = cmd->cdb[0];
2505         int res = SCST_CMD_STATE_DEFAULT;
2506         int buffer_size, bs;
2507         uint8_t *buffer = NULL;
2508
2509         TRACE_ENTRY();
2510
2511         /*
2512          * SCST sets good defaults for cmd->is_send_status and
2513          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2514          * therefore change them only if necessary
2515          */
2516
2517         switch (opcode) {
2518         case MODE_SENSE:
2519         case MODE_SELECT:
2520                 buffer_size = scst_get_buf_first(cmd, &buffer);
2521                 if (unlikely(buffer_size <= 0)) {
2522                         if (buffer_size < 0) {
2523                                 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2524                                         __func__, buffer_size);
2525                         }
2526                         goto out;
2527                 }
2528                 break;
2529         }
2530
2531         switch (opcode) {
2532         case MODE_SENSE:
2533                 TRACE_DBG("%s", "MODE_SENSE");
2534                 if ((cmd->cdb[2] & 0xC0) == 0) {
2535                         if (buffer[3] == 8) {
2536                                 bs = (buffer[9] << 16) |
2537                                     (buffer[10] << 8) | buffer[11];
2538                                 set_block_size(cmd, bs);
2539                         }
2540                 }
2541                 break;
2542         case MODE_SELECT:
2543                 TRACE_DBG("%s", "MODE_SELECT");
2544                 if (buffer[3] == 8) {
2545                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2546                             (buffer[11]);
2547                         set_block_size(cmd, bs);
2548                 }
2549                 break;
2550         default:
2551                 /* It's all good */
2552                 break;
2553         }
2554
2555         switch (opcode) {
2556         case MODE_SENSE:
2557         case MODE_SELECT:
2558                 scst_put_buf(cmd, buffer);
2559                 break;
2560         }
2561
2562 out:
2563         TRACE_EXIT_RES(res);
2564         return res;
2565 }
2566 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2567
2568 static void scst_check_internal_sense(struct scst_device *dev, int result,
2569         uint8_t *sense, int sense_len)
2570 {
2571         TRACE_ENTRY();
2572
2573         if (host_byte(result) == DID_RESET) {
2574                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2575                         "reset UA");
2576                 scst_set_sense(sense, sense_len,
2577                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2578                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2579         } else if ((status_byte(result) == CHECK_CONDITION) &&
2580                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2581                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2582
2583         TRACE_EXIT();
2584         return;
2585 }
2586
2587 int scst_obtain_device_parameters(struct scst_device *dev)
2588 {
2589         int res = 0, i;
2590         uint8_t cmd[16];
2591         uint8_t buffer[4+0x0A];
2592         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2593
2594         TRACE_ENTRY();
2595
2596         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2597
2598         for (i = 0; i < 5; i++) {
2599                 /* Get control mode page */
2600                 memset(cmd, 0, sizeof(cmd));
2601                 cmd[0] = MODE_SENSE;
2602                 cmd[1] = 8; /* DBD */
2603                 cmd[2] = 0x0A;
2604                 cmd[4] = sizeof(buffer);
2605
2606                 memset(buffer, 0, sizeof(buffer));
2607                 memset(sense_buffer, 0, sizeof(sense_buffer));
2608
2609                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2610                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2611                                 sizeof(buffer), sense_buffer, 15, 0, 0);
2612
2613                 TRACE_DBG("MODE_SENSE done: %x", res);
2614
2615                 if (scsi_status_is_good(res)) {
2616                         int q;
2617
2618                         PRINT_BUFF_FLAG(TRACE_SCSI,
2619                                 "Returned control mode page data",
2620                                 buffer, sizeof(buffer));
2621
2622                         dev->tst = buffer[4+2] >> 5;
2623                         q = buffer[4+3] >> 4;
2624                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2625                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2626                                         "%d:%d:%d:%d", dev->queue_alg,
2627                                         dev->scsi_dev->host->host_no,
2628                                         dev->scsi_dev->channel,
2629                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2630                         }
2631                         dev->queue_alg = q;
2632                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2633                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2634
2635                         /*
2636                          * Unfortunately, SCSI ML doesn't provide a way to
2637                          * specify commands task attribute, so we can rely on
2638                          * device's restricted reordering only.
2639                          */
2640                         dev->has_own_order_mgmt = !dev->queue_alg;
2641
2642                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2643                                 "Device %d:%d:%d:%d: TST %x, "
2644                                 "QUEUE ALG %x, SWP %x, TAS %x, "
2645                                 "has_own_order_mgmt %d",
2646                                 dev->scsi_dev->host->host_no,
2647                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2648                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2649                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2650
2651                         goto out;
2652                 } else {
2653 #if 0
2654                         if ((status_byte(res) == CHECK_CONDITION) &&
2655 #else
2656                         /*
2657                          * 3ware controller is buggy and returns CONDITION_GOOD
2658                          * instead of CHECK_CONDITION
2659                          */
2660                         if (
2661 #endif
2662                             SCST_SENSE_VALID(sense_buffer)) {
2663                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2664                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2665                                                 "Device %d:%d:%d:%d doesn't"
2666                                                 " support control mode page,"
2667                                                 " using defaults: TST %x,"
2668                                                 " QUEUE ALG %x, SWP %x, TAS %x,"
2669                                                 " has_own_order_mgmt %d",
2670                                                 dev->scsi_dev->host->host_no,
2671                                                 dev->scsi_dev->channel,
2672                                                 dev->scsi_dev->id,
2673                                                 dev->scsi_dev->lun,
2674                                                 dev->tst,
2675                                                 dev->queue_alg,
2676                                                 dev->swp,
2677                                                 dev->tas,
2678                                                 dev->has_own_order_mgmt);
2679                                         res = 0;
2680                                         goto out;
2681                                 } else if (sense_buffer[2] == NOT_READY) {
2682                                         TRACE(TRACE_SCSI,
2683                                                 "Device %d:%d:%d:%d not ready",
2684                                                 dev->scsi_dev->host->host_no,
2685                                                 dev->scsi_dev->channel,
2686                                                 dev->scsi_dev->id,
2687                                                 dev->scsi_dev->lun);
2688                                         res = 0;
2689                                         goto out;
2690                                 }
2691                         } else {
2692                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2693                                         "Internal MODE SENSE to "
2694                                         "device %d:%d:%d:%d failed: %x",
2695                                         dev->scsi_dev->host->host_no,
2696                                         dev->scsi_dev->channel,
2697                                         dev->scsi_dev->id,
2698                                         dev->scsi_dev->lun, res);
2699                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
2700                                         "MODE SENSE sense",
2701                                         sense_buffer, sizeof(sense_buffer));
2702                         }
2703                         scst_check_internal_sense(dev, res, sense_buffer,
2704                                         sizeof(sense_buffer));
2705                 }
2706         }
2707         res = -ENODEV;
2708
2709 out:
2710         TRACE_EXIT_RES(res);
2711         return res;
2712 }
2713 EXPORT_SYMBOL(scst_obtain_device_parameters);
2714
2715 /* Called under dev_lock and BH off */
2716 void scst_process_reset(struct scst_device *dev,
2717         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2718         struct scst_mgmt_cmd *mcmd, bool setUA)
2719 {
2720         struct scst_tgt_dev *tgt_dev;
2721         struct scst_cmd *cmd, *tcmd;
2722
2723         TRACE_ENTRY();
2724
2725         /* Clear RESERVE'ation, if necessary */
2726         if (dev->dev_reserved) {
2727                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2728                                     dev_tgt_dev_list_entry) {
2729                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2730                                 "lun %lld",
2731                                 (long long unsigned int)tgt_dev->lun);
2732                         clear_bit(SCST_TGT_DEV_RESERVED,
2733                                   &tgt_dev->tgt_dev_flags);
2734                 }
2735                 dev->dev_reserved = 0;
2736                 /*
2737                  * There is no need to send RELEASE, since the device is going
2738                  * to be resetted. Actually, since we can be in RESET TM
2739                  * function, it might be dangerous.
2740                  */
2741         }
2742
2743         dev->dev_double_ua_possible = 1;
2744         dev->dev_serialized = 1;
2745
2746         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2747                 dev_tgt_dev_list_entry) {
2748                 struct scst_session *sess = tgt_dev->sess;
2749
2750                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2751                 scst_free_all_UA(tgt_dev);
2752                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2753
2754                 spin_lock_irq(&sess->sess_list_lock);
2755
2756                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2757                 list_for_each_entry(cmd, &sess->search_cmd_list,
2758                                 search_cmd_list_entry) {
2759                         if (cmd == exclude_cmd)
2760                                 continue;
2761                         if ((cmd->tgt_dev == tgt_dev) ||
2762                             ((cmd->tgt_dev == NULL) &&
2763                              (cmd->lun == tgt_dev->lun))) {
2764                                 scst_abort_cmd(cmd, mcmd,
2765                                         (tgt_dev->sess != originator), 0);
2766                         }
2767                 }
2768                 spin_unlock_irq(&sess->sess_list_lock);
2769         }
2770
2771         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2772                                 blocked_cmd_list_entry) {
2773                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2774                         list_del(&cmd->blocked_cmd_list_entry);
2775                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2776                                 "to active cmd list", cmd);
2777                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2778                         list_add_tail(&cmd->cmd_list_entry,
2779                                 &cmd->cmd_lists->active_cmd_list);
2780                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2781                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2782                 }
2783         }
2784
2785         if (setUA) {
2786                 /* BH already off */
2787                 spin_lock(&scst_temp_UA_lock);
2788                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2789                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2790                 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2791                         sizeof(scst_temp_UA));
2792                 spin_unlock(&scst_temp_UA_lock);
2793         }
2794
2795         TRACE_EXIT();
2796         return;
2797 }
2798
2799 int scst_set_pending_UA(struct scst_cmd *cmd)
2800 {
2801         int res = 0;
2802         struct scst_tgt_dev_UA *UA_entry;
2803
2804         TRACE_ENTRY();
2805
2806         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2807
2808         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2809
2810         /* UA list could be cleared behind us, so retest */
2811         if (list_empty(&cmd->tgt_dev->UA_list)) {
2812                 TRACE_DBG("%s",
2813                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2814                 res = -1;
2815                 goto out_unlock;
2816         }
2817
2818         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2819                               UA_list_entry);
2820
2821         TRACE_DBG("next %p UA_entry %p",
2822               cmd->tgt_dev->UA_list.next, UA_entry);
2823
2824         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2825                 sizeof(UA_entry->UA_sense_buffer));
2826
2827         cmd->ua_ignore = 1;
2828
2829         list_del(&UA_entry->UA_list_entry);
2830
2831         mempool_free(UA_entry, scst_ua_mempool);
2832
2833         if (list_empty(&cmd->tgt_dev->UA_list)) {
2834                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2835                           &cmd->tgt_dev->tgt_dev_flags);
2836         }
2837
2838         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2839
2840 out:
2841         TRACE_EXIT_RES(res);
2842         return res;
2843
2844 out_unlock:
2845         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2846         goto out;
2847 }
2848
2849 /* Called under tgt_dev_lock and BH off */
2850 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2851         const uint8_t *sense, int sense_len, int head)
2852 {
2853         struct scst_tgt_dev_UA *UA_entry = NULL;
2854
2855         TRACE_ENTRY();
2856
2857         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2858         if (UA_entry == NULL) {
2859                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2860                      "allocation failed. The UNIT ATTENTION "
2861                      "on some sessions will be missed");
2862                 PRINT_BUFFER("Lost UA", sense, sense_len);
2863                 goto out;
2864         }
2865         memset(UA_entry, 0, sizeof(*UA_entry));
2866
2867         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2868                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2869         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2870
2871         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2872
2873         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2874
2875         if (head)
2876                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2877         else
2878                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2879
2880 out:
2881         TRACE_EXIT();
2882         return;
2883 }
2884
2885 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2886         const uint8_t *sense, int sense_len, int head)
2887 {
2888         int skip_UA = 0;
2889         struct scst_tgt_dev_UA *UA_entry_tmp;
2890
2891         TRACE_ENTRY();
2892
2893         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2894
2895         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2896                             UA_list_entry) {
2897                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer,
2898                            sense_len) == 0) {
2899                         TRACE_MGMT_DBG("%s", "UA already exists");
2900                         skip_UA = 1;
2901                         break;
2902                 }
2903         }
2904
2905         if (skip_UA == 0)
2906                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2907
2908         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2909
2910         TRACE_EXIT();
2911         return;
2912 }
2913
2914 /* Called under dev_lock and BH off */
2915 void scst_dev_check_set_local_UA(struct scst_device *dev,
2916         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2917 {
2918         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2919
2920         TRACE_ENTRY();
2921
2922         if (exclude != NULL)
2923                 exclude_tgt_dev = exclude->tgt_dev;
2924
2925         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2926                         dev_tgt_dev_list_entry) {
2927                 if (tgt_dev != exclude_tgt_dev)
2928                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2929         }
2930
2931         TRACE_EXIT();
2932         return;
2933 }
2934
2935 /* Called under dev_lock and BH off */
2936 void __scst_dev_check_set_UA(struct scst_device *dev,
2937         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2938 {
2939         TRACE_ENTRY();
2940
2941         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2942
2943         /* Check for reset UA */
2944         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2945                 scst_process_reset(dev,
2946                                    (exclude != NULL) ? exclude->sess : NULL,
2947                                    exclude, NULL, false);
2948
2949         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2950
2951         TRACE_EXIT();
2952         return;
2953 }
2954
2955 /* Called under tgt_dev_lock or when tgt_dev is unused */
2956 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2957 {
2958         struct scst_tgt_dev_UA *UA_entry, *t;
2959
2960         TRACE_ENTRY();
2961
2962         list_for_each_entry_safe(UA_entry, t,
2963                                  &tgt_dev->UA_list, UA_list_entry) {
2964                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
2965                                (long long unsigned int)tgt_dev->lun);
2966                 list_del(&UA_entry->UA_list_entry);
2967                 kfree(UA_entry);
2968         }
2969         INIT_LIST_HEAD(&tgt_dev->UA_list);
2970         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2971
2972         TRACE_EXIT();
2973         return;
2974 }
2975
2976 /* No locks */
2977 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2978 {
2979         struct scst_cmd *res = NULL, *cmd, *t;
2980         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2981
2982         spin_lock_irq(&tgt_dev->sn_lock);
2983
2984         if (unlikely(tgt_dev->hq_cmd_count != 0))
2985                 goto out_unlock;
2986
2987 restart:
2988         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2989                                 sn_cmd_list_entry) {
2990                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2991                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2992                 if (cmd->sn == expected_sn) {
2993                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2994                                 cmd, cmd->sn, cmd->sn_set);
2995                         tgt_dev->def_cmd_count--;
2996                         list_del(&cmd->sn_cmd_list_entry);
2997                         if (res == NULL)
2998                                 res = cmd;
2999                         else {
3000                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3001                                 TRACE_SN("Adding cmd %p to active cmd list",
3002                                         cmd);
3003                                 list_add_tail(&cmd->cmd_list_entry,
3004                                         &cmd->cmd_lists->active_cmd_list);
3005                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3006                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3007                         }
3008                 }
3009         }
3010         if (res != NULL)
3011                 goto out_unlock;
3012
3013         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
3014                                 sn_cmd_list_entry) {
3015                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3016                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3017                 if (cmd->sn == expected_sn) {
3018                         atomic_t *slot = cmd->sn_slot;
3019                         /*
3020                          * !! At this point any pointer in cmd, except !!
3021                          * !! sn_slot and sn_cmd_list_entry, could be   !!
3022                          * !! already destroyed                         !!
3023                          */
3024                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
3025                                  cmd,
3026                                  (long long unsigned int)cmd->tag,
3027                                  cmd->sn);
3028                         tgt_dev->def_cmd_count--;
3029                         list_del(&cmd->sn_cmd_list_entry);
3030                         spin_unlock_irq(&tgt_dev->sn_lock);
3031                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
3032                                              &cmd->cmd_flags))
3033                                 scst_destroy_put_cmd(cmd);
3034                         scst_inc_expected_sn(tgt_dev, slot);
3035                         expected_sn = tgt_dev->expected_sn;
3036                         spin_lock_irq(&tgt_dev->sn_lock);
3037                         goto restart;
3038                 }
3039         }
3040
3041 out_unlock:
3042         spin_unlock_irq(&tgt_dev->sn_lock);
3043         return res;
3044 }
3045
3046 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
3047         struct scst_thr_data_hdr *data,
3048         void (*free_fn) (struct scst_thr_data_hdr *data))
3049 {
3050         data->pid = current->pid;
3051         atomic_set(&data->ref, 1);
3052         EXTRACHECKS_BUG_ON(free_fn == NULL);
3053         data->free_fn = free_fn;
3054         spin_lock(&tgt_dev->thr_data_lock);
3055         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
3056         spin_unlock(&tgt_dev->thr_data_lock);
3057 }
3058 EXPORT_SYMBOL(scst_add_thr_data);
3059
3060 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
3061 {
3062         spin_lock(&tgt_dev->thr_data_lock);
3063         while (!list_empty(&tgt_dev->thr_data_list)) {
3064                 struct scst_thr_data_hdr *d = list_entry(
3065                                 tgt_dev->thr_data_list.next, typeof(*d),
3066                                 thr_data_list_entry);
3067                 list_del(&d->thr_data_list_entry);
3068                 spin_unlock(&tgt_dev->thr_data_lock);
3069                 scst_thr_data_put(d);
3070                 spin_lock(&tgt_dev->thr_data_lock);
3071         }
3072         spin_unlock(&tgt_dev->thr_data_lock);
3073         return;
3074 }
3075 EXPORT_SYMBOL(scst_del_all_thr_data);
3076
3077 void scst_dev_del_all_thr_data(struct scst_device *dev)
3078 {
3079         struct scst_tgt_dev *tgt_dev;
3080
3081         TRACE_ENTRY();
3082
3083         mutex_lock(&scst_mutex);
3084
3085         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3086                                 dev_tgt_dev_list_entry) {
3087                 scst_del_all_thr_data(tgt_dev);
3088         }
3089
3090         mutex_unlock(&scst_mutex);
3091
3092         TRACE_EXIT();
3093         return;
3094 }
3095 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
3096
3097 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
3098 {
3099         struct scst_thr_data_hdr *res = NULL, *d;
3100         struct task_struct *tsk = current;
3101
3102         spin_lock(&tgt_dev->thr_data_lock);
3103         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
3104                 if (d->pid == tsk->pid) {
3105                         res = d;
3106                         scst_thr_data_get(res);
3107                         break;
3108                 }
3109         }
3110         spin_unlock(&tgt_dev->thr_data_lock);
3111         return res;
3112 }
3113 EXPORT_SYMBOL(scst_find_thr_data);
3114
3115 /* dev_lock supposed to be held and BH disabled */
3116 void __scst_block_dev(struct scst_device *dev)
3117 {
3118         dev->block_count++;
3119         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
3120 }
3121
3122 /* No locks */
3123 static void scst_block_dev(struct scst_device *dev, int outstanding)
3124 {
3125         spin_lock_bh(&dev->dev_lock);
3126         __scst_block_dev(dev);
3127         spin_unlock_bh(&dev->dev_lock);
3128
3129         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
3130         smp_mb();
3131
3132         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
3133                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
3134         wait_event(dev->on_dev_waitQ,
3135                 atomic_read(&dev->on_dev_count) <= outstanding);
3136         TRACE_MGMT_DBG("%s", "wait_event() returned");
3137 }
3138
3139 /* No locks */
3140 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3141 {
3142         sBUG_ON(cmd->needs_unblocking);
3143
3144         cmd->needs_unblocking = 1;
3145         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3146                        cmd, (long long unsigned int)cmd->tag);
3147
3148         scst_block_dev(cmd->dev, outstanding);
3149 }
3150
3151 /* No locks */
3152 void scst_unblock_dev(struct scst_device *dev)
3153 {
3154         spin_lock_bh(&dev->dev_lock);
3155         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3156                 dev->block_count-1, dev);
3157         if (--dev->block_count == 0)
3158                 scst_unblock_cmds(dev);
3159         spin_unlock_bh(&dev->dev_lock);
3160         sBUG_ON(dev->block_count < 0);
3161 }
3162
3163 /* No locks */
3164 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3165 {
3166         scst_unblock_dev(cmd->dev);
3167         cmd->needs_unblocking = 0;
3168 }
3169
3170 /* No locks */
3171 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3172 {
3173         int res = 0;
3174         struct scst_device *dev = cmd->dev;
3175
3176         TRACE_ENTRY();
3177
3178         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3179
3180         atomic_inc(&dev->on_dev_count);
3181         cmd->dec_on_dev_needed = 1;
3182         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3183
3184         if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
3185                 /*
3186                  * The original command can already block the device, so
3187                  * REQUEST SENSE command should always pass.
3188                  */
3189                 goto out;
3190         }
3191
3192 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3193         spin_lock_bh(&dev->dev_lock);
3194         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3195                 goto out_unlock;
3196         if (dev->block_count > 0) {
3197                 scst_dec_on_dev_cmd(cmd);
3198                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3199                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3200                 list_add_tail(&cmd->blocked_cmd_list_entry,
3201                               &dev->blocked_cmd_list);
3202                 res = 1;
3203         } else {
3204                 __scst_block_dev(dev);
3205                 cmd->inc_blocking = 1;
3206         }
3207         spin_unlock_bh(&dev->dev_lock);
3208         goto out;
3209 #else
3210 repeat:
3211         if (unlikely(dev->block_count > 0)) {
3212                 spin_lock_bh(&dev->dev_lock);
3213                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3214                         goto out_unlock;
3215                 barrier(); /* to reread block_count */
3216                 if (dev->block_count > 0) {
3217                         scst_dec_on_dev_cmd(cmd);
3218                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
3219                                 "serializing (tag %llu, dev %p)", cmd,
3220                                 (long long unsigned int)cmd->tag, dev);
3221                         list_add_tail(&cmd->blocked_cmd_list_entry,
3222                                       &dev->blocked_cmd_list);
3223                         res = 1;
3224                         spin_unlock_bh(&dev->dev_lock);
3225                         goto out;
3226                 } else {
3227                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3228                                 "continuing");
3229                 }
3230                 spin_unlock_bh(&dev->dev_lock);
3231         }
3232         if (unlikely(dev->dev_serialized)) {
3233                 spin_lock_bh(&dev->dev_lock);
3234                 barrier(); /* to reread block_count */
3235                 if (dev->block_count == 0) {
3236                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3237                                 "cmds due to serializing (dev %p)", cmd,
3238                                 (long long unsigned int)cmd->tag, dev);
3239                         __scst_block_dev(dev);
3240                         cmd->inc_blocking = 1;
3241                 } else {
3242                         spin_unlock_bh(&dev->dev_lock);
3243                         TRACE_MGMT_DBG("Somebody blocked the device, "
3244                                 "repeating (count %d)", dev->block_count);
3245                         goto repeat;
3246                 }
3247                 spin_unlock_bh(&dev->dev_lock);
3248         }
3249 #endif
3250
3251 out:
3252         TRACE_EXIT_RES(res);
3253         return res;
3254
3255 out_unlock:
3256         spin_unlock_bh(&dev->dev_lock);
3257         goto out;
3258 }
3259
3260 /* Called under dev_lock */
3261 void scst_unblock_cmds(struct scst_device *dev)
3262 {
3263 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3264         struct scst_cmd *cmd, *t;
3265         unsigned long flags;
3266
3267         TRACE_ENTRY();
3268
3269         local_irq_save(flags);
3270         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3271                                  blocked_cmd_list_entry) {
3272                 int brk = 0;
3273                 /*
3274                  * Since only one cmd per time is being executed, expected_sn
3275                  * can't change behind us, if the corresponding cmd is in
3276                  * blocked_cmd_list, but we could be called before
3277                  * scst_inc_expected_sn().
3278                  *
3279                  * For HQ commands SN is not set.
3280                  */
3281                 if (likely(!cmd->internal && cmd->sn_set)) {
3282                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3283                         if (cmd->tgt_dev == NULL)
3284                                 sBUG();
3285                         expected_sn = cmd->tgt_dev->expected_sn;
3286                         if (cmd->sn == expected_sn)
3287                                 brk = 1;
3288                         else if (cmd->sn != (expected_sn+1))
3289                                 continue;
3290                 }
3291
3292                 list_del(&cmd->blocked_cmd_list_entry);
3293                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3294                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3295                 list_add(&cmd->cmd_list_entry,
3296                          &cmd->cmd_lists->active_cmd_list);
3297                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3298                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3299                 if (brk)
3300                         break;
3301         }
3302         local_irq_restore(flags);
3303 #else /* CONFIG_SCST_STRICT_SERIALIZING */
3304         struct scst_cmd *cmd, *tcmd;
3305         unsigned long flags;
3306
3307         TRACE_ENTRY();
3308
3309         local_irq_save(flags);
3310         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3311                                  blocked_cmd_list_entry) {
3312                 list_del(&cmd->blocked_cmd_list_entry);
3313                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3314                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3315                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3316                         list_add(&cmd->cmd_list_entry,
3317                                 &cmd->cmd_lists->active_cmd_list);
3318                 else
3319                         list_add_tail(&cmd->cmd_list_entry,
3320                                 &cmd->cmd_lists->active_cmd_list);
3321                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3322                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3323         }
3324         local_irq_restore(flags);
3325 #endif /* CONFIG_SCST_STRICT_SERIALIZING */
3326
3327         TRACE_EXIT();
3328         return;
3329 }
3330
3331 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3332         struct scst_cmd *out_of_sn_cmd)
3333 {
3334         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3335
3336         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3337                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3338                 scst_make_deferred_commands_active(tgt_dev);
3339         } else {
3340                 out_of_sn_cmd->out_of_sn = 1;
3341                 spin_lock_irq(&tgt_dev->sn_lock);
3342                 tgt_dev->def_cmd_count++;
3343                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3344                               &tgt_dev->skipped_sn_list);
3345                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list"
3346                         " (expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3347                         tgt_dev->expected_sn);
3348                 spin_unlock_irq(&tgt_dev->sn_lock);
3349         }
3350
3351         return;
3352 }
3353
3354 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3355         struct scst_cmd *out_of_sn_cmd)
3356 {
3357         TRACE_ENTRY();
3358
3359         if (!out_of_sn_cmd->sn_set) {
3360                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3361                 goto out;
3362         }
3363
3364         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3365
3366 out:
3367         TRACE_EXIT();
3368         return;
3369 }
3370
3371 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3372 {
3373         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3374
3375         TRACE_ENTRY();
3376
3377         if (!cmd->hq_cmd_inced)
3378                 goto out;
3379
3380         spin_lock_irq(&tgt_dev->sn_lock);
3381         tgt_dev->hq_cmd_count--;
3382         spin_unlock_irq(&tgt_dev->sn_lock);
3383
3384         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3385
3386         /*
3387          * There is no problem in checking hq_cmd_count in the
3388          * non-locked state. In the worst case we will only have
3389          * unneeded run of the deferred commands.
3390          */
3391         if (tgt_dev->hq_cmd_count == 0)
3392                 scst_make_deferred_commands_active(tgt_dev);
3393
3394 out:
3395         TRACE_EXIT();
3396         return;
3397 }
3398
3399 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3400 {
3401         TRACE_ENTRY();
3402
3403         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3404                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3405                 atomic_read(&scst_cmd_count));
3406
3407         scst_done_cmd_mgmt(cmd);
3408
3409         smp_rmb();
3410         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3411                 if (cmd->completed) {
3412                         /* It's completed and it's OK to return its result */
3413                         goto out;
3414                 }
3415
3416                 if (cmd->dev->tas) {
3417                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3418                                 "(tag %llu), returning TASK ABORTED ", cmd,
3419                                 (long long unsigned int)cmd->tag);
3420                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3421                 } else {
3422                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3423                                 "(tag %llu), aborting without delivery or "
3424                                 "notification",
3425                                 cmd, (long long unsigned int)cmd->tag);
3426                         /*
3427                          * There is no need to check/requeue possible UA,
3428                          * because, if it exists, it will be delivered
3429                          * by the "completed" branch above.
3430                          */
3431                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3432                 }
3433         }
3434
3435 out:
3436         TRACE_EXIT();
3437         return;
3438 }
3439
3440 void __init scst_scsi_op_list_init(void)
3441 {
3442         int i;
3443         uint8_t op = 0xff;
3444
3445         TRACE_ENTRY();
3446
3447         for (i = 0; i < 256; i++)
3448                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3449
3450         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3451                 if (scst_scsi_op_table[i].ops != op) {
3452                         op = scst_scsi_op_table[i].ops;
3453                         scst_scsi_op_list[op] = i;
3454                 }
3455         }
3456
3457         TRACE_EXIT();
3458         return;
3459 }
3460
3461 #ifdef CONFIG_SCST_DEBUG
3462 /* Original taken from the XFS code */
3463 unsigned long scst_random(void)
3464 {
3465         static int Inited;
3466         static unsigned long RandomValue;
3467         static DEFINE_SPINLOCK(lock);
3468         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3469         register long rv;
3470         register long lo;
3471         register long hi;
3472         unsigned long flags;
3473
3474         spin_lock_irqsave(&lock, flags);
3475         if (!Inited) {
3476                 RandomValue = jiffies;
3477                 Inited = 1;
3478         }
3479         rv = RandomValue;
3480         hi = rv / 127773;
3481         lo = rv % 127773;
3482         rv = 16807 * lo - 2836 * hi;
3483         if (rv <= 0)
3484                 rv += 2147483647;
3485         RandomValue = rv;
3486         spin_unlock_irqrestore(&lock, flags);
3487         return rv;
3488 }
3489 EXPORT_SYMBOL(scst_random);
3490 #endif
3491
3492 #ifdef CONFIG_SCST_DEBUG_TM
3493
3494 #define TM_DBG_STATE_ABORT              0
3495 #define TM_DBG_STATE_RESET              1
3496 #define TM_DBG_STATE_OFFLINE            2
3497
3498 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3499
3500 static void tm_dbg_timer_fn(unsigned long arg);
3501
3502 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3503 /* All serialized by scst_tm_dbg_lock */
3504 static struct {
3505         unsigned int tm_dbg_release:1;
3506         unsigned int tm_dbg_blocked:1;
3507 } tm_dbg_flags;
3508 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3509 static int tm_dbg_delayed_cmds_count;
3510 static int tm_dbg_passed_cmds_count;
3511 static int tm_dbg_state;
3512 static int tm_dbg_on_state_passes;
3513 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3514 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3515
3516 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3517
3518 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3519         struct scst_acg_dev *acg_dev)
3520 {
3521         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3522                 unsigned long flags;
3523                 /* Do TM debugging only for LUN 0 */
3524                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3525                 tm_dbg_p_cmd_list_waitQ =
3526                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3527                 tm_dbg_state = INIT_TM_DBG_STATE;
3528                 tm_dbg_on_state_passes =
3529                         tm_dbg_on_state_num_passes[tm_dbg_state];
3530                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3531                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3532                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3533                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3534         }
3535 }
3536
3537 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3538 {
3539         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3540                 unsigned long flags;
3541                 del_timer_sync(&tm_dbg_timer);
3542                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3543                 tm_dbg_p_cmd_list_waitQ = NULL;
3544                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3545         }
3546 }
3547
3548 static void tm_dbg_timer_fn(unsigned long arg)
3549 {
3550         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3551         tm_dbg_flags.tm_dbg_release = 1;
3552         smp_wmb();
3553         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3554 }
3555
3556 /* Called under scst_tm_dbg_lock and IRQs off */
3557 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3558 {
3559         switch (tm_dbg_state) {
3560         case TM_DBG_STATE_ABORT:
3561                 if (tm_dbg_delayed_cmds_count == 0) {
3562                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3563                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
3564                                 " for %ld.%ld seconds (%ld HZ), "
3565                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3566                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3567                         mod_timer(&tm_dbg_timer, jiffies + d);
3568 #if 0
3569                         tm_dbg_flags.tm_dbg_blocked = 1;
3570 #endif
3571                 } else {
3572                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3573                                 "(tag %llu), delayed_cmds_count=%d, "
3574                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3575                                 tm_dbg_delayed_cmds_count,
3576                                 tm_dbg_on_state_passes);
3577                         if (tm_dbg_delayed_cmds_count == 2)
3578                                 tm_dbg_flags.tm_dbg_blocked = 0;
3579                 }
3580                 break;
3581
3582         case TM_DBG_STATE_RESET:
3583         case TM_DBG_STATE_OFFLINE:
3584                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3585                         "(tag %llu), delayed_cmds_count=%d, "
3586                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3587                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3588                 tm_dbg_flags.tm_dbg_blocked = 1;
3589                 break;
3590
3591         default:
3592                 sBUG();
3593         }
3594         /* IRQs already off */
3595         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3596         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3597         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3598         cmd->tm_dbg_delayed = 1;
3599         tm_dbg_delayed_cmds_count++;
3600         return;
3601 }
3602
3603 /* No locks */
3604 void tm_dbg_check_released_cmds(void)
3605 {
3606         if (tm_dbg_flags.tm_dbg_release) {
3607                 struct scst_cmd *cmd, *tc;
3608                 spin_lock_irq(&scst_tm_dbg_lock);
3609                 list_for_each_entry_safe_reverse(cmd, tc,
3610                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3611                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3612                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3613                                 tm_dbg_delayed_cmds_count);
3614                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3615                         list_move(&cmd->cmd_list_entry,
3616                                 &cmd->cmd_lists->active_cmd_list);
3617                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3618                 }
3619                 tm_dbg_flags.tm_dbg_release = 0;
3620                 spin_unlock_irq(&scst_tm_dbg_lock);
3621         }
3622 }
3623
3624 /* Called under scst_tm_dbg_lock */
3625 static void tm_dbg_change_state(void)
3626 {
3627         tm_dbg_flags.tm_dbg_blocked = 0;
3628         if (--tm_dbg_on_state_passes == 0) {
3629                 switch (tm_dbg_state) {
3630                 case TM_DBG_STATE_ABORT:
3631                         TRACE_MGMT_DBG("%s", "Changing "
3632                             "tm_dbg_state to RESET");
3633                         tm_dbg_state =
3634                                 TM_DBG_STATE_RESET;
3635                         tm_dbg_flags.tm_dbg_blocked = 0;
3636                         break;
3637                 case TM_DBG_STATE_RESET:
3638                 case TM_DBG_STATE_OFFLINE:
3639 #ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
3640                             TRACE_MGMT_DBG("%s", "Changing "
3641                                     "tm_dbg_state to OFFLINE");
3642                             tm_dbg_state =
3643                                 TM_DBG_STATE_OFFLINE;
3644 #else
3645                             TRACE_MGMT_DBG("%s", "Changing "
3646                                     "tm_dbg_state to ABORT");
3647                             tm_dbg_state =
3648                                 TM_DBG_STATE_ABORT;
3649 #endif
3650                         break;
3651                 default:
3652                         sBUG();
3653                 }
3654                 tm_dbg_on_state_passes =
3655                     tm_dbg_on_state_num_passes[tm_dbg_state];
3656         }
3657
3658         TRACE_MGMT_DBG("%s", "Deleting timer");
3659         del_timer(&tm_dbg_timer);
3660 }
3661
3662 /* No locks */
3663 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3664 {
3665         int res = 0;
3666         unsigned long flags;
3667
3668         if (cmd->tm_dbg_immut)
3669                 goto out;
3670
3671         if (cmd->tm_dbg_delayed) {
3672                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3673                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3674                         "delayed_cmds_count=%d", cmd, cmd->tag,
3675                         tm_dbg_delayed_cmds_count);
3676
3677                 cmd->tm_dbg_immut = 1;
3678                 tm_dbg_delayed_cmds_count--;
3679                 if ((tm_dbg_delayed_cmds_count == 0) &&
3680                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3681                         tm_dbg_change_state();
3682                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3683         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3684                                         &cmd->tgt_dev->tgt_dev_flags)) {
3685                 /* Delay 50th command */
3686                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3687                 if (tm_dbg_flags.tm_dbg_blocked ||
3688                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3689                         tm_dbg_delay_cmd(cmd);
3690                         res = 1;
3691                 } else
3692                         cmd->tm_dbg_immut = 1;
3693                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3694         }
3695
3696 out:
3697         return res;
3698 }
3699
3700 /* No locks */
3701 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3702 {
3703         struct scst_cmd *c;
3704         unsigned long flags;
3705
3706         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3707         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3708                                 cmd_list_entry) {
3709                 if (c == cmd) {
3710                         TRACE_MGMT_DBG("Abort request for "
3711                                 "delayed cmd %p (tag=%llu), moving it to "
3712                                 "active cmd list (delayed_cmds_count=%d)",
3713                                 c, c->tag, tm_dbg_delayed_cmds_count);
3714
3715                         if (!test_bit(SCST_CMD_ABORTED_OTHER,
3716                                             &cmd->cmd_flags)) {
3717                                 /* Test how completed commands handled */
3718                                 if (((scst_random() % 10) == 5)) {
3719                                         scst_set_cmd_error(cmd,
3720                                                 SCST_LOAD_SENSE(
3721                                                 scst_sense_hardw_error));
3722                                         /* It's completed now */
3723                                 }
3724                         }
3725
3726                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3727                         list_move(&c->cmd_list_entry,
3728                                 &c->cmd_lists->active_cmd_list);
3729                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3730                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3731                         break;
3732                 }
3733         }
3734         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3735 }
3736
3737 /* Might be called under scst_mutex */
3738 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3739 {
3740         unsigned long flags;
3741
3742         if (dev != NULL) {
3743                 struct scst_tgt_dev *tgt_dev;
3744                 bool found = 0;
3745
3746                 spin_lock_bh(&dev->dev_lock);
3747                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3748                                             dev_tgt_dev_list_entry) {
3749                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3750                                         &tgt_dev->tgt_dev_flags)) {
3751                                 found = 1;
3752                                 break;
3753                         }
3754                 }
3755                 spin_unlock_bh(&dev->dev_lock);
3756
3757                 if (!found)
3758                         goto out;
3759         }
3760
3761         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3762         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3763                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3764 &nb