0d8d37979a84943f6c5682aa0949a9cd2b77831c
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #include "scst_cdbprobe.h"
36
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39         uint8_t *sense, int sense_len);
40 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
41         const uint8_t *sense, int sense_len, int head);
42 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
43 static void scst_release_space(struct scst_cmd *cmd);
44 static void scst_sess_free_tgt_devs(struct scst_session *sess);
45 static void scst_unblock_cmds(struct scst_device *dev);
46
47 #ifdef CONFIG_SCST_DEBUG_TM
48 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
49         struct scst_acg_dev *acg_dev);
50 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
51 #else
52 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
53         struct scst_acg_dev *acg_dev) {}
54 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
55 #endif /* CONFIG_SCST_DEBUG_TM */
56
57 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
58 {
59         int res = 0;
60         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
61
62         TRACE_ENTRY();
63
64         sBUG_ON(cmd->sense != NULL);
65
66         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
67         if (cmd->sense == NULL) {
68                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
69                         "The sense data will be lost!!", cmd->cdb[0]);
70                 res = -ENOMEM;
71                 goto out;
72         }
73
74         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
75
76 out:
77         TRACE_EXIT_RES(res);
78         return res;
79 }
80 EXPORT_SYMBOL(scst_alloc_sense);
81
82 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
83         const uint8_t *sense, unsigned int len)
84 {
85         int res;
86
87         TRACE_ENTRY();
88
89         res = scst_alloc_sense(cmd, atomic);
90         if (res != 0) {
91                 PRINT_BUFFER("Lost sense", sense, len);
92                 goto out;
93         }
94
95         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
96         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
97
98 out:
99         TRACE_EXIT_RES(res);
100         return res;
101 }
102 EXPORT_SYMBOL(scst_alloc_set_sense);
103
104 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
105 {
106         TRACE_ENTRY();
107
108         cmd->status = status;
109         cmd->host_status = DID_OK;
110
111         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
112         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
113
114         cmd->data_direction = SCST_DATA_NONE;
115         cmd->resp_data_len = 0;
116         cmd->is_send_status = 1;
117
118         cmd->completed = 1;
119
120         TRACE_EXIT();
121         return;
122 }
123 EXPORT_SYMBOL(scst_set_cmd_error_status);
124
125 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
126 {
127         int rc;
128
129         TRACE_ENTRY();
130
131         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
132
133         rc = scst_alloc_sense(cmd, 1);
134         if (rc != 0) {
135                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
136                         key, asc, ascq);
137                 goto out;
138         }
139
140         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
141         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
142
143 out:
144         TRACE_EXIT();
145         return;
146 }
147 EXPORT_SYMBOL(scst_set_cmd_error);
148
149 void scst_set_sense(uint8_t *buffer, int len, int key,
150         int asc, int ascq)
151 {
152         memset(buffer, 0, len);
153         buffer[0] = 0x70;       /* Error Code                   */
154         buffer[2] = key;        /* Sense Key                    */
155         buffer[7] = 0x0a;       /* Additional Sense Length      */
156         buffer[12] = asc;       /* ASC                          */
157         buffer[13] = ascq;      /* ASCQ                         */
158         TRACE_BUFFER("Sense set", buffer, len);
159         return;
160 }
161 EXPORT_SYMBOL(scst_set_sense);
162
163 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
164         unsigned int len)
165 {
166         TRACE_ENTRY();
167
168         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
169         scst_alloc_set_sense(cmd, 1, sense, len);
170
171         TRACE_EXIT();
172         return;
173 }
174
175 void scst_set_busy(struct scst_cmd *cmd)
176 {
177         int c = atomic_read(&cmd->sess->sess_cmd_count);
178
179         TRACE_ENTRY();
180
181         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
182                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
183                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
184                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
185                         cmd->sess->initiator_name, c,
186                         cmd->queue_type, cmd->sess->init_phase);
187         } else {
188                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
189                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
190                         "initiator %s (cmds count %d, queue_type %x, "
191                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
192                         cmd->queue_type, cmd->sess->init_phase);
193         }
194
195         TRACE_EXIT();
196         return;
197 }
198 EXPORT_SYMBOL(scst_set_busy);
199
200 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
201 {
202         int i;
203
204         TRACE_ENTRY();
205
206         TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
207                 asc, ascq);
208
209         /* Protect sess_tgt_dev_list_hash */
210         mutex_lock(&scst_mutex); 
211
212         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
213                 struct list_head *sess_tgt_dev_list_head =
214                         &sess->sess_tgt_dev_list_hash[i];
215                 struct scst_tgt_dev *tgt_dev;
216
217                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
218                                 sess_tgt_dev_list_entry) {
219                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
220                         if (!list_empty(&tgt_dev->UA_list)) {
221                                 struct scst_tgt_dev_UA *ua;
222                                 uint8_t *sense;
223
224                                 ua = list_entry(tgt_dev->UA_list.next,
225                                         typeof(*ua), UA_list_entry);
226                                 sense = ua->UA_sense_buffer;
227                                 if ((sense[2] == UNIT_ATTENTION) &&
228                                     (sense[12] == 0x29) &&
229                                     (sense[13] == 0)) {
230                                         scst_set_sense(sense,
231                                                 sizeof(ua->UA_sense_buffer),
232                                                 key, asc, ascq);
233                                 } else
234                                         PRINT_ERROR("%s",
235                                                 "The first UA isn't RESET UA");
236                         } else
237                                 PRINT_ERROR("%s", "There's no RESET UA to "
238                                         "replace");
239                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
240                 }
241         }
242
243         mutex_unlock(&scst_mutex); 
244
245         TRACE_EXIT();
246         return;
247 }
248 EXPORT_SYMBOL(scst_set_initial_UA);
249
250 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
251 {
252         int res;
253
254         TRACE_ENTRY();
255
256         switch (cmd->state) {
257         case SCST_CMD_STATE_INIT_WAIT:
258         case SCST_CMD_STATE_INIT:
259         case SCST_CMD_STATE_PRE_PARSE:
260         case SCST_CMD_STATE_DEV_PARSE:
261                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
262                 break;
263
264         default:
265                 res = SCST_CMD_STATE_PRE_DEV_DONE;
266                 break;
267         }
268
269         TRACE_EXIT_RES(res);
270         return res;
271 }
272 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
273
274 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
275 {
276         TRACE_ENTRY();
277
278 #ifdef CONFIG_SCST_EXTRACHECKS
279         switch (cmd->state) {
280         case SCST_CMD_STATE_PRE_XMIT_RESP:
281         case SCST_CMD_STATE_XMIT_RESP:
282         case SCST_CMD_STATE_FINISHED:
283         case SCST_CMD_STATE_XMIT_WAIT:
284                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
285                         cmd->state, cmd, cmd->cdb[0]);
286                 sBUG();
287         }
288 #endif
289
290         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
291
292         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
293                            (cmd->tgt_dev == NULL));
294
295         TRACE_EXIT();
296         return;
297 }
298 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
299
300 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
301 {
302         int i, l;
303
304         TRACE_ENTRY();
305
306         scst_check_restore_sg_buff(cmd);
307         cmd->resp_data_len = resp_data_len;
308
309         if (resp_data_len == cmd->bufflen)
310                 goto out;
311
312         l = 0;
313         for (i = 0; i < cmd->sg_cnt; i++) {
314                 l += cmd->sg[i].length;
315                 if (l >= resp_data_len) {
316                         int left = resp_data_len - (l - cmd->sg[i].length);
317 #ifdef CONFIG_SCST_DEBUG
318                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
319                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
320                                 "left %d",
321                                 cmd, (long long unsigned int)cmd->tag,
322                                 resp_data_len, i,
323                                 cmd->sg[i].length, left);
324 #endif
325                         cmd->orig_sg_cnt = cmd->sg_cnt;
326                         cmd->orig_sg_entry = i;
327                         cmd->orig_entry_len = cmd->sg[i].length;
328                         cmd->sg_cnt = (left > 0) ? i+1 : i;
329                         cmd->sg[i].length = left;
330                         cmd->sg_buff_modified = 1;
331                         break;
332                 }
333         }
334
335 out:
336         TRACE_EXIT();
337         return;
338 }
339 EXPORT_SYMBOL(scst_set_resp_data_len);
340
341 /* Called under scst_mutex and suspended activity */
342 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
343 {
344         struct scst_device *dev;
345         int res = 0;
346         static int dev_num; /* protected by scst_mutex */
347
348         TRACE_ENTRY();
349
350         dev = kzalloc(sizeof(*dev), gfp_mask);
351         if (dev == NULL) {
352                 TRACE(TRACE_OUT_OF_MEM, "%s",
353                         "Allocation of scst_device failed");
354                 res = -ENOMEM;
355                 goto out;
356         }
357
358         dev->handler = &scst_null_devtype;
359         dev->p_cmd_lists = &scst_main_cmd_lists;
360         atomic_set(&dev->dev_cmd_count, 0);
361         atomic_set(&dev->write_cmd_count, 0);
362         scst_init_mem_lim(&dev->dev_mem_lim);
363         spin_lock_init(&dev->dev_lock);
364         atomic_set(&dev->on_dev_count, 0);
365         INIT_LIST_HEAD(&dev->blocked_cmd_list);
366         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
367         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
368         INIT_LIST_HEAD(&dev->threads_list);
369         init_waitqueue_head(&dev->on_dev_waitQ);
370         dev->dev_double_ua_possible = 1;
371         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
372         dev->dev_num = dev_num++;
373
374         *out_dev = dev;
375
376 out:
377         TRACE_EXIT_RES(res);
378         return res;
379 }
380
381 /* Called under scst_mutex and suspended activity */
382 void scst_free_device(struct scst_device *dev)
383 {
384         TRACE_ENTRY();
385
386 #ifdef CONFIG_SCST_EXTRACHECKS
387         if (!list_empty(&dev->dev_tgt_dev_list) ||
388             !list_empty(&dev->dev_acg_dev_list)) {
389                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
390                         "is not empty!", __func__);
391                 sBUG();
392         }
393 #endif
394
395         kfree(dev);
396
397         TRACE_EXIT();
398         return;
399 }
400
401 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
402 {
403         atomic_set(&mem_lim->alloced_pages, 0);
404         mem_lim->max_allowed_pages =
405                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
406 }
407 EXPORT_SYMBOL(scst_init_mem_lim);
408
409 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
410                                         struct scst_device *dev, uint64_t lun)
411 {
412         struct scst_acg_dev *res;
413
414         TRACE_ENTRY();
415
416 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
417         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
418 #else
419         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
420 #endif
421         if (res == NULL) {
422                 TRACE(TRACE_OUT_OF_MEM,
423                       "%s", "Allocation of scst_acg_dev failed");
424                 goto out;
425         }
426 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
427         memset(res, 0, sizeof(*res));
428 #endif
429
430         res->dev = dev;
431         res->acg = acg;
432         res->lun = lun;
433
434 out:
435         TRACE_EXIT_HRES(res);
436         return res;
437 }
438
439 /* The activity supposed to be suspended and scst_mutex held */
440 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
441 {
442         TRACE_ENTRY();
443
444         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
445                 acg_dev);
446         list_del(&acg_dev->acg_dev_list_entry);
447         list_del(&acg_dev->dev_acg_dev_list_entry);
448
449         kmem_cache_free(scst_acgd_cachep, acg_dev);
450
451         TRACE_EXIT();
452         return;
453 }
454
455 /* The activity supposed to be suspended and scst_mutex held */
456 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
457 {
458         struct scst_acg *acg;
459
460         TRACE_ENTRY();
461
462         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
463         if (acg == NULL) {
464                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
465                 goto out;
466         }
467
468         INIT_LIST_HEAD(&acg->acg_dev_list);
469         INIT_LIST_HEAD(&acg->acg_sess_list);
470         INIT_LIST_HEAD(&acg->acn_list);
471         acg->acg_name = acg_name;
472
473         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
474         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
475
476 out:
477         TRACE_EXIT_HRES(acg);
478         return acg;
479 }
480
481 /* The activity supposed to be suspended and scst_mutex held */
482 int scst_destroy_acg(struct scst_acg *acg)
483 {
484         struct scst_acn *n, *nn;
485         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
486         int res = 0;
487
488         TRACE_ENTRY();
489
490         if (!list_empty(&acg->acg_sess_list)) {
491                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
492                 res = -EBUSY;
493                 goto out;
494         }
495
496         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
497         list_del(&acg->scst_acg_list_entry);
498
499         /* Freeing acg_devs */
500         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
501                         acg_dev_list_entry) {
502                 struct scst_tgt_dev *tgt_dev, *tt;
503                 list_for_each_entry_safe(tgt_dev, tt,
504                                  &acg_dev->dev->dev_tgt_dev_list,
505                                  dev_tgt_dev_list_entry) {
506                         if (tgt_dev->acg_dev == acg_dev)
507                                 scst_free_tgt_dev(tgt_dev);
508                 }
509                 scst_free_acg_dev(acg_dev);
510         }
511
512         /* Freeing names */
513         list_for_each_entry_safe(n, nn, &acg->acn_list,
514                         acn_list_entry) {
515                 list_del(&n->acn_list_entry);
516                 kfree(n->name);
517                 kfree(n);
518         }
519         INIT_LIST_HEAD(&acg->acn_list);
520
521         kfree(acg);
522 out:
523         TRACE_EXIT_RES(res);
524         return res;
525 }
526
527 /*
528  * scst_mutex supposed to be held, there must not be parallel activity in this
529  * session.
530  */
531 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
532         struct scst_acg_dev *acg_dev)
533 {
534         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
535         struct scst_tgt_dev *tgt_dev;
536         struct scst_device *dev = acg_dev->dev;
537         struct list_head *sess_tgt_dev_list_head;
538         struct scst_tgt_template *vtt = sess->tgt->tgtt;
539         int rc, i;
540
541         TRACE_ENTRY();
542
543 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
544         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
545 #else
546         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
547 #endif
548         if (tgt_dev == NULL) {
549                 TRACE(TRACE_OUT_OF_MEM, "%s",
550                       "Allocation of scst_tgt_dev failed");
551                 goto out;
552         }
553 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
554         memset(tgt_dev, 0, sizeof(*tgt_dev));
555 #endif
556
557         tgt_dev->dev = dev;
558         tgt_dev->lun = acg_dev->lun;
559         tgt_dev->acg_dev = acg_dev;
560         tgt_dev->sess = sess;
561         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
562
563         scst_sgv_pool_use_norm(tgt_dev);
564
565         if (dev->scsi_dev != NULL) {
566                 ini_sg = dev->scsi_dev->host->sg_tablesize;
567                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
568                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
569                                 ENABLE_CLUSTERING);
570         } else {
571                 ini_sg = (1 << 15) /* infinite */;
572                 ini_unchecked_isa_dma = 0;
573                 ini_use_clustering = 0;
574         }
575         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
576
577         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
578             !sess->tgt->tgtt->no_clustering)
579                 scst_sgv_pool_use_norm_clust(tgt_dev);
580
581         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
582                 scst_sgv_pool_use_dma(tgt_dev);
583
584         if (dev->scsi_dev != NULL) {
585                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
586                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
587                       dev->scsi_dev->channel, dev->scsi_dev->id,
588                       dev->scsi_dev->lun,
589                       (long long unsigned int)tgt_dev->lun);
590         } else {
591                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
592                                dev->virt_name,
593                                (long long unsigned int)tgt_dev->lun);
594         }
595
596         spin_lock_init(&tgt_dev->tgt_dev_lock);
597         INIT_LIST_HEAD(&tgt_dev->UA_list);
598         spin_lock_init(&tgt_dev->thr_data_lock);
599         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
600         spin_lock_init(&tgt_dev->sn_lock);
601         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
602         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
603         tgt_dev->expected_sn = 1;
604         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
605         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
606         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
607                 atomic_set(&tgt_dev->sn_slots[i], 0);
608
609         if (dev->handler->parse_atomic &&
610             (sess->tgt->tgtt->preprocessing_done == NULL)) {
611                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
612                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
613                                 &tgt_dev->tgt_dev_flags);
614                 if (dev->handler->exec_atomic)
615                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
616                                 &tgt_dev->tgt_dev_flags);
617         }
618         if (dev->handler->exec_atomic) {
619                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
620                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
621                                 &tgt_dev->tgt_dev_flags);
622                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
623                                 &tgt_dev->tgt_dev_flags);
624                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
625                         &tgt_dev->tgt_dev_flags);
626         }
627         if (dev->handler->dev_done_atomic &&
628             sess->tgt->tgtt->xmit_response_atomic) {
629                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
630                         &tgt_dev->tgt_dev_flags);
631         }
632
633         spin_lock_bh(&scst_temp_UA_lock);
634         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
635                 SCST_LOAD_SENSE(scst_sense_reset_UA));
636         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
637         spin_unlock_bh(&scst_temp_UA_lock);
638
639         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
640
641         if (vtt->threads_num > 0) {
642                 rc = 0;
643                 if (dev->handler->threads_num > 0)
644                         rc = scst_add_dev_threads(dev, vtt->threads_num);
645                 else if (dev->handler->threads_num == 0)
646                         rc = scst_add_cmd_threads(vtt->threads_num);
647                 if (rc != 0)
648                         goto out_free;
649         }
650
651         if (dev->handler && dev->handler->attach_tgt) {
652                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
653                       tgt_dev);
654                 rc = dev->handler->attach_tgt(tgt_dev);
655                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
656                 if (rc != 0) {
657                         PRINT_ERROR("Device handler's %s attach_tgt() "
658                             "failed: %d", dev->handler->name, rc);
659                         goto out_thr_free;
660                 }
661         }
662
663         spin_lock_bh(&dev->dev_lock);
664         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
665         if (dev->dev_reserved)
666                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
667         spin_unlock_bh(&dev->dev_lock);
668
669         sess_tgt_dev_list_head =
670                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
671         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
672                       sess_tgt_dev_list_head);
673
674 out:
675         TRACE_EXIT();
676         return tgt_dev;
677
678 out_thr_free:
679         if (vtt->threads_num > 0) {
680                 if (dev->handler->threads_num > 0)
681                         scst_del_dev_threads(dev, vtt->threads_num);
682                 else if (dev->handler->threads_num == 0)
683                         scst_del_cmd_threads(vtt->threads_num);
684         }
685
686 out_free:
687         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
688         tgt_dev = NULL;
689         goto out;
690 }
691
692 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
693
694 /* No locks supposed to be held, scst_mutex - held */
695 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
696 {
697         TRACE_ENTRY();
698
699         scst_clear_reservation(tgt_dev);
700
701         /* With activity suspended the lock isn't needed, but let's be safe */
702         spin_lock_bh(&tgt_dev->tgt_dev_lock);
703         scst_free_all_UA(tgt_dev);
704         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
705
706         if (queue_UA) {
707                 spin_lock_bh(&scst_temp_UA_lock);
708                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
709                         SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
710                 scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
711                 spin_unlock_bh(&scst_temp_UA_lock);
712         }
713
714         TRACE_EXIT();
715         return;
716 }
717
718 /*
719  * scst_mutex supposed to be held, there must not be parallel activity in this
720  * session.
721  */
722 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
723 {
724         struct scst_device *dev = tgt_dev->dev;
725         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
726
727         TRACE_ENTRY();
728
729         tm_dbg_deinit_tgt_dev(tgt_dev);
730
731         spin_lock_bh(&dev->dev_lock);
732         list_del(&tgt_dev->dev_tgt_dev_list_entry);
733         spin_unlock_bh(&dev->dev_lock);
734
735         list_del(&tgt_dev->sess_tgt_dev_list_entry);
736
737         scst_clear_reservation(tgt_dev);
738         scst_free_all_UA(tgt_dev);
739
740         if (dev->handler && dev->handler->detach_tgt) {
741                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
742                       tgt_dev);
743                 dev->handler->detach_tgt(tgt_dev);
744                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
745         }
746
747         if (vtt->threads_num > 0) {
748                 if (dev->handler->threads_num > 0)
749                         scst_del_dev_threads(dev, vtt->threads_num);
750                 else if (dev->handler->threads_num == 0)
751                         scst_del_cmd_threads(vtt->threads_num);
752         }
753
754         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
755
756         TRACE_EXIT();
757         return;
758 }
759
760 /* scst_mutex supposed to be held */
761 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
762 {
763         int res = 0;
764         struct scst_acg_dev *acg_dev;
765         struct scst_tgt_dev *tgt_dev;
766
767         TRACE_ENTRY();
768
769         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
770                         acg_dev_list_entry) {
771                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
772                 if (tgt_dev == NULL) {
773                         res = -ENOMEM;
774                         goto out_free;
775                 }
776         }
777
778 out:
779         TRACE_EXIT();
780         return res;
781
782 out_free:
783         scst_sess_free_tgt_devs(sess);
784         goto out;
785 }
786
787 /*
788  * scst_mutex supposed to be held, there must not be parallel activity in this
789  * session.
790  */
791 static void scst_sess_free_tgt_devs(struct scst_session *sess)
792 {
793         int i;
794         struct scst_tgt_dev *tgt_dev, *t;
795
796         TRACE_ENTRY();
797
798         /* The session is going down, no users, so no locks */
799         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
800                 struct list_head *sess_tgt_dev_list_head =
801                         &sess->sess_tgt_dev_list_hash[i];
802                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
803                                 sess_tgt_dev_list_entry) {
804                         scst_free_tgt_dev(tgt_dev);
805                 }
806                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
807         }
808
809         TRACE_EXIT();
810         return;
811 }
812
813 /* The activity supposed to be suspended and scst_mutex held */
814 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
815                      uint64_t lun, int read_only)
816 {
817         int res = 0;
818         struct scst_acg_dev *acg_dev;
819         struct scst_tgt_dev *tgt_dev;
820         struct scst_session *sess;
821         LIST_HEAD(tmp_tgt_dev_list);
822
823         TRACE_ENTRY();
824
825         INIT_LIST_HEAD(&tmp_tgt_dev_list);
826
827 #ifdef CONFIG_SCST_EXTRACHECKS
828         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
829                 if (acg_dev->dev == dev) {
830                         PRINT_ERROR("Device is already in group %s",
831                                 acg->acg_name);
832                         res = -EINVAL;
833                         goto out;
834                 }
835         }
836 #endif
837
838         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
839         if (acg_dev == NULL) {
840                 res = -ENOMEM;
841                 goto out;
842         }
843         acg_dev->rd_only_flag = read_only;
844
845         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
846                 acg_dev);
847         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
848         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
849
850         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
851                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
852                 if (tgt_dev == NULL) {
853                         res = -ENOMEM;
854                         goto out_free;
855                 }
856                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
857                               &tmp_tgt_dev_list);
858         }
859
860 out:
861         if (res == 0) {
862                 if (dev->virt_name != NULL) {
863                         PRINT_INFO("Added device %s to group %s (LUN %lld, "
864                                 "rd_only %d)", dev->virt_name, acg->acg_name,
865                                 (long long unsigned int)lun,
866                                 read_only);
867                 } else {
868                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
869                                 "%lld, rd_only %d)",
870                                 dev->scsi_dev->host->host_no,
871                                 dev->scsi_dev->channel, dev->scsi_dev->id,
872                                 dev->scsi_dev->lun, acg->acg_name,
873                                 (long long unsigned int)lun,
874                                 read_only);
875                 }
876         }
877
878         TRACE_EXIT_RES(res);
879         return res;
880
881 out_free:
882         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
883                          extra_tgt_dev_list_entry) {
884                 scst_free_tgt_dev(tgt_dev);
885         }
886         scst_free_acg_dev(acg_dev);
887         goto out;
888 }
889
890 /* The activity supposed to be suspended and scst_mutex held */
891 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
892 {
893         int res = 0;
894         struct scst_acg_dev *acg_dev = NULL, *a;
895         struct scst_tgt_dev *tgt_dev, *tt;
896
897         TRACE_ENTRY();
898
899         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
900                 if (a->dev == dev) {
901                         acg_dev = a;
902                         break;
903                 }
904         }
905
906         if (acg_dev == NULL) {
907                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
908                 res = -EINVAL;
909                 goto out;
910         }
911
912         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
913                          dev_tgt_dev_list_entry) {
914                 if (tgt_dev->acg_dev == acg_dev)
915                         scst_free_tgt_dev(tgt_dev);
916         }
917         scst_free_acg_dev(acg_dev);
918
919 out:
920         if (res == 0) {
921                 if (dev->virt_name != NULL) {
922                         PRINT_INFO("Removed device %s from group %s",
923                                 dev->virt_name, acg->acg_name);
924                 } else {
925                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
926                                 dev->scsi_dev->host->host_no,
927                                 dev->scsi_dev->channel, dev->scsi_dev->id,
928                                 dev->scsi_dev->lun, acg->acg_name);
929                 }
930         }
931
932         TRACE_EXIT_RES(res);
933         return res;
934 }
935
936 /* scst_mutex supposed to be held */
937 int scst_acg_add_name(struct scst_acg *acg, const char *name)
938 {
939         int res = 0;
940         struct scst_acn *n;
941         int len;
942         char *nm;
943
944         TRACE_ENTRY();
945
946         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
947         {
948                 if (strcmp(n->name, name) == 0) {
949                         PRINT_ERROR("Name %s already exists in group %s",
950                                 name, acg->acg_name);
951                         res = -EINVAL;
952                         goto out;
953                 }
954         }
955
956         n = kmalloc(sizeof(*n), GFP_KERNEL);
957         if (n == NULL) {
958                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
959                 res = -ENOMEM;
960                 goto out;
961         }
962
963         len = strlen(name);
964         nm = kmalloc(len + 1, GFP_KERNEL);
965         if (nm == NULL) {
966                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
967                 res = -ENOMEM;
968                 goto out_free;
969         }
970
971         strcpy(nm, name);
972         n->name = nm;
973
974         list_add_tail(&n->acn_list_entry, &acg->acn_list);
975
976 out:
977         if (res == 0)
978                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
979
980         TRACE_EXIT_RES(res);
981         return res;
982
983 out_free:
984         kfree(n);
985         goto out;
986 }
987
988 /* scst_mutex supposed to be held */
989 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
990 {
991         int res = -EINVAL;
992         struct scst_acn *n;
993
994         TRACE_ENTRY();
995
996         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
997         {
998                 if (strcmp(n->name, name) == 0) {
999                         list_del(&n->acn_list_entry);
1000                         kfree(n->name);
1001                         kfree(n);
1002                         res = 0;
1003                         break;
1004                 }
1005         }
1006
1007         if (res == 0) {
1008                 PRINT_INFO("Removed name %s from group %s", name,
1009                         acg->acg_name);
1010         } else {
1011                 PRINT_ERROR("Unable to find name %s in group %s", name,
1012                         acg->acg_name);
1013         }
1014
1015         TRACE_EXIT_RES(res);
1016         return res;
1017 }
1018
1019 static struct scst_cmd *scst_create_prepare_internal_cmd(
1020         struct scst_cmd *orig_cmd, int bufsize)
1021 {
1022         struct scst_cmd *res;
1023         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1024
1025         TRACE_ENTRY();
1026
1027         res = scst_alloc_cmd(gfp_mask);
1028         if (res == NULL)
1029                 goto out;
1030
1031         res->cmd_lists = orig_cmd->cmd_lists;
1032         res->sess = orig_cmd->sess;
1033         res->atomic = scst_cmd_atomic(orig_cmd);
1034         res->internal = 1;
1035         res->tgtt = orig_cmd->tgtt;
1036         res->tgt = orig_cmd->tgt;
1037         res->dev = orig_cmd->dev;
1038         res->tgt_dev = orig_cmd->tgt_dev;
1039         res->lun = orig_cmd->lun;
1040         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1041         res->data_direction = SCST_DATA_UNKNOWN;
1042         res->orig_cmd = orig_cmd;
1043         res->bufflen = bufsize;
1044
1045         res->state = SCST_CMD_STATE_PRE_PARSE;
1046
1047 out:
1048         TRACE_EXIT_HRES((unsigned long)res);
1049         return res;
1050 }
1051
1052 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1053 {
1054         TRACE_ENTRY();
1055
1056         __scst_cmd_put(cmd);
1057
1058         TRACE_EXIT();
1059         return;
1060 }
1061
1062 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1063 {
1064         int res = 0;
1065 #define sbuf_size 252
1066         static const uint8_t request_sense[6] =
1067             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1068         struct scst_cmd *rs_cmd;
1069
1070         TRACE_ENTRY();
1071
1072         if (orig_cmd->sense != NULL) {
1073                 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1074                         orig_cmd->sense, orig_cmd);
1075                 mempool_free(orig_cmd->sense, scst_sense_mempool);
1076                 orig_cmd->sense = NULL;
1077         }
1078
1079         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1080         if (rs_cmd == NULL)
1081                 goto out_error;
1082
1083         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1084         rs_cmd->cdb_len = sizeof(request_sense);
1085         rs_cmd->data_direction = SCST_DATA_READ;
1086         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1087         rs_cmd->expected_transfer_len = sbuf_size;
1088         rs_cmd->expected_values_set = 1;
1089
1090         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1091                 "cmd list ", rs_cmd);
1092         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1093         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1094         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1095         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1096
1097 out:
1098         TRACE_EXIT_RES(res);
1099         return res;
1100
1101 out_error:
1102         res = -1;
1103         goto out;
1104 #undef sbuf_size
1105 }
1106
1107 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1108 {
1109         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1110         uint8_t *buf;
1111         int len;
1112
1113         TRACE_ENTRY();
1114
1115         sBUG_ON(orig_cmd == NULL);
1116
1117         len = scst_get_buf_first(req_cmd, &buf);
1118
1119         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1120             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1121                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1122                         buf, len);
1123                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1124                         len);
1125         } else {
1126                 PRINT_ERROR("%s", "Unable to get the sense via "
1127                         "REQUEST SENSE, returning HARDWARE ERROR");
1128                 scst_set_cmd_error(orig_cmd,
1129                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1130         }
1131
1132         if (len > 0)
1133                 scst_put_buf(req_cmd, buf);
1134
1135         scst_free_internal_cmd(req_cmd);
1136
1137         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1138         return orig_cmd;
1139 }
1140
1141 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1142 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1143 {
1144         struct scsi_request *req;
1145
1146         TRACE_ENTRY();
1147
1148         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1149                 if (req) {
1150                         if (req->sr_bufflen)
1151                                 kfree(req->sr_buffer);
1152                         scsi_release_request(req);
1153                 }
1154         }
1155
1156         TRACE_EXIT();
1157         return;
1158 }
1159
1160 static void scst_send_release(struct scst_device *dev)
1161 {
1162         struct scsi_request *req;
1163         struct scsi_device *scsi_dev;
1164         uint8_t cdb[6];
1165
1166         TRACE_ENTRY();
1167
1168         if (dev->scsi_dev == NULL)
1169                 goto out;
1170
1171         scsi_dev = dev->scsi_dev;
1172
1173         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1174         if (req == NULL) {
1175                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1176                             "to RELEASE device %d:%d:%d:%d",
1177                             scsi_dev->host->host_no, scsi_dev->channel,
1178                             scsi_dev->id, scsi_dev->lun);
1179                 goto out;
1180         }
1181
1182         memset(cdb, 0, sizeof(cdb));
1183         cdb[0] = RELEASE;
1184         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1185             ((scsi_dev->lun << 5) & 0xe0) : 0;
1186         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1187         req->sr_cmd_len = sizeof(cdb);
1188         req->sr_data_direction = SCST_DATA_NONE;
1189         req->sr_use_sg = 0;
1190         req->sr_bufflen = 0;
1191         req->sr_buffer = NULL;
1192         req->sr_request->rq_disk = dev->rq_disk;
1193         req->sr_sense_buffer[0] = 0;
1194
1195         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1196                 "mid-level", req);
1197         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1198                     scst_req_done, 15, 3);
1199
1200 out:
1201         TRACE_EXIT();
1202         return;
1203 }
1204 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1205 static void scst_send_release(struct scst_device *dev)
1206 {
1207         struct scsi_device *scsi_dev;
1208         unsigned char cdb[6];
1209         unsigned char *sense;
1210         int rc, i;
1211
1212         TRACE_ENTRY();
1213
1214         if (dev->scsi_dev == NULL)
1215                 goto out;
1216
1217         /* We can't afford missing RELEASE due to memory shortage */
1218         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1219
1220         scsi_dev = dev->scsi_dev;
1221
1222         for (i = 0; i < 5; i++) {
1223                 memset(cdb, 0, sizeof(cdb));
1224                 cdb[0] = RELEASE;
1225                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1226                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1227
1228                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1229
1230                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1231                         "SCSI mid-level");
1232                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1233                                 sense, 15, 0, 0);
1234                 TRACE_DBG("MODE_SENSE done: %x", rc);
1235
1236                 if (scsi_status_is_good(rc)) {
1237                         break;
1238                 } else {
1239                         PRINT_ERROR("RELEASE failed: %d", rc);
1240                         PRINT_BUFFER("RELEASE sense", sense,
1241                                 SCST_SENSE_BUFFERSIZE);
1242                         scst_check_internal_sense(dev, rc,
1243                                         sense, SCST_SENSE_BUFFERSIZE);
1244                 }
1245         }
1246
1247         kfree(sense);
1248
1249 out:
1250         TRACE_EXIT();
1251         return;
1252 }
1253 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1254
1255 /* scst_mutex supposed to be held */
1256 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1257 {
1258         struct scst_device *dev = tgt_dev->dev;
1259         int release = 0;
1260
1261         TRACE_ENTRY();
1262
1263         spin_lock_bh(&dev->dev_lock);
1264         if (dev->dev_reserved &&
1265             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1266                 /* This is one who holds the reservation */
1267                 struct scst_tgt_dev *tgt_dev_tmp;
1268                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1269                                     dev_tgt_dev_list_entry) {
1270                         clear_bit(SCST_TGT_DEV_RESERVED,
1271                                     &tgt_dev_tmp->tgt_dev_flags);
1272                 }
1273                 dev->dev_reserved = 0;
1274                 release = 1;
1275         }
1276         spin_unlock_bh(&dev->dev_lock);
1277
1278         if (release)
1279                 scst_send_release(dev);
1280
1281         TRACE_EXIT();
1282         return;
1283 }
1284
1285 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1286         const char *initiator_name)
1287 {
1288         struct scst_session *sess;
1289         int i;
1290         int len;
1291         char *nm;
1292
1293         TRACE_ENTRY();
1294
1295 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1296         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1297 #else
1298         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1299 #endif
1300         if (sess == NULL) {
1301                 TRACE(TRACE_OUT_OF_MEM, "%s",
1302                       "Allocation of scst_session failed");
1303                 goto out;
1304         }
1305 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1306         memset(sess, 0, sizeof(*sess));
1307 #endif
1308
1309         sess->init_phase = SCST_SESS_IPH_INITING;
1310         sess->shut_phase = SCST_SESS_SPH_READY;
1311         atomic_set(&sess->refcnt, 0);
1312         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1313                 struct list_head *sess_tgt_dev_list_head =
1314                          &sess->sess_tgt_dev_list_hash[i];
1315                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1316         }
1317         spin_lock_init(&sess->sess_list_lock);
1318         INIT_LIST_HEAD(&sess->search_cmd_list);
1319         sess->tgt = tgt;
1320         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1321         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1322
1323 #ifdef CONFIG_SCST_MEASURE_LATENCY
1324         spin_lock_init(&sess->meas_lock);
1325 #endif
1326
1327         len = strlen(initiator_name);
1328         nm = kmalloc(len + 1, gfp_mask);
1329         if (nm == NULL) {
1330                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1331                 goto out_free;
1332         }
1333
1334         strcpy(nm, initiator_name);
1335         sess->initiator_name = nm;
1336
1337 out:
1338         TRACE_EXIT();
1339         return sess;
1340
1341 out_free:
1342         kmem_cache_free(scst_sess_cachep, sess);
1343         sess = NULL;
1344         goto out;
1345 }
1346
1347 void scst_free_session(struct scst_session *sess)
1348 {
1349         TRACE_ENTRY();
1350
1351         mutex_lock(&scst_mutex);
1352
1353         TRACE_DBG("Removing sess %p from the list", sess);
1354         list_del(&sess->sess_list_entry);
1355         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1356         list_del(&sess->acg_sess_list_entry);
1357
1358         scst_sess_free_tgt_devs(sess);
1359
1360         wake_up_all(&sess->tgt->unreg_waitQ);
1361
1362         mutex_unlock(&scst_mutex);
1363
1364         kfree(sess->initiator_name);
1365         kmem_cache_free(scst_sess_cachep, sess);
1366
1367         TRACE_EXIT();
1368         return;
1369 }
1370
1371 void scst_free_session_callback(struct scst_session *sess)
1372 {
1373         struct completion *c;
1374
1375         TRACE_ENTRY();
1376
1377         TRACE_DBG("Freeing session %p", sess);
1378
1379         c = sess->shutdown_compl;
1380
1381         if (sess->unreg_done_fn) {
1382                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1383                 sess->unreg_done_fn(sess);
1384                 TRACE_DBG("%s", "unreg_done_fn() returned");
1385         }
1386         scst_free_session(sess);
1387
1388         if (c)
1389                 complete_all(c);
1390
1391         TRACE_EXIT();
1392         return;
1393 }
1394
1395 void scst_sched_session_free(struct scst_session *sess)
1396 {
1397         unsigned long flags;
1398
1399         TRACE_ENTRY();
1400
1401         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1402                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1403                         "shut phase %lx", sess, sess->shut_phase);
1404                 sBUG();
1405         }
1406
1407         spin_lock_irqsave(&scst_mgmt_lock, flags);
1408         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1409         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1410         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1411
1412         wake_up(&scst_mgmt_waitQ);
1413
1414         TRACE_EXIT();
1415         return;
1416 }
1417
1418 void scst_cmd_get(struct scst_cmd *cmd)
1419 {
1420         __scst_cmd_get(cmd);
1421 }
1422 EXPORT_SYMBOL(scst_cmd_get);
1423
1424 void scst_cmd_put(struct scst_cmd *cmd)
1425 {
1426         __scst_cmd_put(cmd);
1427 }
1428 EXPORT_SYMBOL(scst_cmd_put);
1429
1430 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1431 {
1432         struct scst_cmd *cmd;
1433
1434         TRACE_ENTRY();
1435
1436 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1437         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1438 #else
1439         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1440 #endif
1441         if (cmd == NULL) {
1442                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1443                 goto out;
1444         }
1445 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1446         memset(cmd, 0, sizeof(*cmd));
1447 #endif
1448
1449         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1450         cmd->start_time = jiffies;
1451         atomic_set(&cmd->cmd_ref, 1);
1452         cmd->cmd_lists = &scst_main_cmd_lists;
1453         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1454         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1455         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1456         cmd->retries = 0;
1457         cmd->data_len = -1;
1458         cmd->is_send_status = 1;
1459         cmd->resp_data_len = -1;
1460
1461         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1462         cmd->dbl_ua_orig_resp_data_len = -1;
1463
1464 out:
1465         TRACE_EXIT();
1466         return cmd;
1467 }
1468
1469 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1470 {
1471         scst_sess_put(cmd->sess);
1472
1473         /*
1474          * At this point tgt_dev can be dead, but the pointer remains non-NULL
1475          */
1476         if (likely(cmd->tgt_dev != NULL))
1477                 __scst_put();
1478
1479         scst_destroy_cmd(cmd);
1480         return;
1481 }
1482
1483 /* No locks supposed to be held */
1484 void scst_free_cmd(struct scst_cmd *cmd)
1485 {
1486         int destroy = 1;
1487
1488         TRACE_ENTRY();
1489
1490         TRACE_DBG("Freeing cmd %p (tag %llu)",
1491                   cmd, (long long unsigned int)cmd->tag);
1492
1493         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1494                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1495                         cmd, atomic_read(&scst_cmd_count));
1496         }
1497
1498         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1499                 cmd->dec_on_dev_needed);
1500
1501 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1502 #if defined(CONFIG_SCST_EXTRACHECKS)
1503         if (cmd->scsi_req) {
1504                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1505                         "scsi_req!");
1506                 scst_release_request(cmd);
1507         }
1508 #endif
1509 #endif
1510
1511         /*
1512          * Target driver can already free sg buffer before calling
1513          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
1514          */
1515         if (!cmd->tgt_data_buf_alloced)
1516                 scst_check_restore_sg_buff(cmd);
1517
1518         if (unlikely(cmd->internal)) {
1519                 if (cmd->bufflen > 0)
1520                         scst_release_space(cmd);
1521                 scst_destroy_cmd(cmd);
1522                 goto out;
1523         }
1524
1525         if (cmd->tgtt->on_free_cmd != NULL) {
1526                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1527                 cmd->tgtt->on_free_cmd(cmd);
1528                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1529         }
1530
1531         if (likely(cmd->dev != NULL)) {
1532                 struct scst_dev_type *handler = cmd->dev->handler;
1533                 if (handler->on_free_cmd != NULL) {
1534                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1535                               handler->name, cmd);
1536                         handler->on_free_cmd(cmd);
1537                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1538                                 handler->name);
1539                 }
1540         }
1541
1542         scst_release_space(cmd);
1543
1544         if (unlikely(cmd->sense != NULL)) {
1545                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1546                 mempool_free(cmd->sense, scst_sense_mempool);
1547                 cmd->sense = NULL;
1548         }
1549
1550         if (likely(cmd->tgt_dev != NULL)) {
1551 #ifdef CONFIG_SCST_EXTRACHECKS
1552                 if (unlikely(!cmd->sent_for_exec)) {
1553                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1554                             "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1555                             cmd, cmd->cdb[0], cmd->tgtt->name,
1556                             (long long unsigned int)cmd->lun,
1557                             cmd->sn, cmd->tgt_dev->expected_sn);
1558                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1559                 }
1560 #endif
1561
1562                 if (unlikely(cmd->out_of_sn)) {
1563                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1564                                 "destroy=%d", cmd,
1565                                 (long long unsigned int)cmd->tag,
1566                                 cmd->sn, destroy);
1567                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1568                                         &cmd->cmd_flags);
1569                 }
1570         }
1571
1572         if (likely(destroy))
1573                 scst_destroy_put_cmd(cmd);
1574
1575 out:
1576         TRACE_EXIT();
1577         return;
1578 }
1579
1580 /* No locks supposed to be held. */
1581 void scst_check_retries(struct scst_tgt *tgt)
1582 {
1583         int need_wake_up = 0;
1584
1585         TRACE_ENTRY();
1586
1587         /*
1588          * We don't worry about overflow of finished_cmds, because we check
1589          * only for its change.
1590          */
1591         atomic_inc(&tgt->finished_cmds);
1592         /* See comment in scst_queue_retry_cmd() */
1593         smp_mb__after_atomic_inc();
1594         if (unlikely(tgt->retry_cmds > 0)) {
1595                 struct scst_cmd *c, *tc;
1596                 unsigned long flags;
1597
1598                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1599                       tgt->retry_cmds);
1600
1601                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1602                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1603                                 cmd_list_entry) {
1604                         tgt->retry_cmds--;
1605
1606                         TRACE_RETRY("Moving retry cmd %p to head of active "
1607                                 "cmd list (retry_cmds left %d)",
1608                                 c, tgt->retry_cmds);
1609                         spin_lock(&c->cmd_lists->cmd_list_lock);
1610                         list_move(&c->cmd_list_entry,
1611                                   &c->cmd_lists->active_cmd_list);
1612                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1613                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1614
1615                         need_wake_up++;
1616                         if (need_wake_up >= 2) /* "slow start" */
1617                                 break;
1618                 }
1619                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1620         }
1621
1622         TRACE_EXIT();
1623         return;
1624 }
1625
1626 void scst_tgt_retry_timer_fn(unsigned long arg)
1627 {
1628         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1629         unsigned long flags;
1630
1631         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1632
1633         spin_lock_irqsave(&tgt->tgt_lock, flags);
1634         tgt->retry_timer_active = 0;
1635         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1636
1637         scst_check_retries(tgt);
1638
1639         TRACE_EXIT();
1640         return;
1641 }
1642
1643 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
1644 {
1645         struct scst_mgmt_cmd *mcmd;
1646
1647         TRACE_ENTRY();
1648
1649         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1650         if (mcmd == NULL) {
1651                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1652                         "failed, some commands and their data could leak");
1653                 goto out;
1654         }
1655         memset(mcmd, 0, sizeof(*mcmd));
1656
1657 out:
1658         TRACE_EXIT();
1659         return mcmd;
1660 }
1661
1662 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1663 {
1664         unsigned long flags;
1665
1666         TRACE_ENTRY();
1667
1668         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1669         atomic_dec(&mcmd->sess->sess_cmd_count);
1670         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1671
1672         scst_sess_put(mcmd->sess);
1673
1674         if (mcmd->mcmd_tgt_dev != NULL)
1675                 __scst_put();
1676
1677         mempool_free(mcmd, scst_mgmt_mempool);
1678
1679         TRACE_EXIT();
1680         return;
1681 }
1682
1683 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1684 int scst_alloc_request(struct scst_cmd *cmd)
1685 {
1686         int res = 0;
1687         struct scsi_request *req;
1688         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1689
1690         TRACE_ENTRY();
1691
1692         /* cmd->dev->scsi_dev must be non-NULL here */
1693         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1694         if (req == NULL) {
1695                 TRACE(TRACE_OUT_OF_MEM, "%s",
1696                       "Allocation of scsi_request failed");
1697                 res = -ENOMEM;
1698                 goto out;
1699         }
1700
1701         cmd->scsi_req = req;
1702
1703         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1704         req->sr_cmd_len = cmd->cdb_len;
1705         req->sr_data_direction = cmd->data_direction;
1706         req->sr_use_sg = cmd->sg_cnt;
1707         req->sr_bufflen = cmd->bufflen;
1708         req->sr_buffer = cmd->sg;
1709         req->sr_request->rq_disk = cmd->dev->rq_disk;
1710         req->sr_sense_buffer[0] = 0;
1711
1712         cmd->scsi_req->upper_private_data = cmd;
1713
1714 out:
1715         TRACE_EXIT();
1716         return res;
1717 }
1718
1719 void scst_release_request(struct scst_cmd *cmd)
1720 {
1721         scsi_release_request(cmd->scsi_req);
1722         cmd->scsi_req = NULL;
1723 }
1724 #endif
1725
1726 int scst_alloc_space(struct scst_cmd *cmd)
1727 {
1728         gfp_t gfp_mask;
1729         int res = -ENOMEM;
1730         int atomic = scst_cmd_atomic(cmd);
1731         int flags;
1732         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1733
1734         TRACE_ENTRY();
1735
1736         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1737
1738         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1739         if (cmd->no_sgv)
1740                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1741
1742         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1743                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1744         if (cmd->sg == NULL)
1745                 goto out;
1746
1747         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1748                 static int ll;
1749                 if (ll < 10) {
1750                         PRINT_INFO("Unable to complete command due to "
1751                                 "SG IO count limitation (requested %d, "
1752                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1753                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1754                         ll++;
1755                 }
1756                 goto out_sg_free;
1757         }
1758
1759         res = 0;
1760
1761 out:
1762         TRACE_EXIT();
1763         return res;
1764
1765 out_sg_free:
1766         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1767         cmd->sgv = NULL;
1768         cmd->sg = NULL;
1769         cmd->sg_cnt = 0;
1770         goto out;
1771 }
1772
1773 static void scst_release_space(struct scst_cmd *cmd)
1774 {
1775         TRACE_ENTRY();
1776
1777         if (cmd->sgv == NULL)
1778                 goto out;
1779
1780         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
1781                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
1782                 goto out;
1783         }
1784
1785         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1786
1787         cmd->sgv = NULL;
1788         cmd->sg_cnt = 0;
1789         cmd->sg = NULL;
1790         cmd->bufflen = 0;
1791         cmd->data_len = 0;
1792
1793 out:
1794         TRACE_EXIT();
1795         return;
1796 }
1797
1798 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
1799 {
1800         struct scatterlist *src_sg, *dst_sg;
1801         unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
1802         struct page *src, *dst;
1803         unsigned int s, d, to_copy;
1804
1805         TRACE_ENTRY();
1806
1807         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
1808                 src_sg = cmd->tgt_sg;
1809                 src_sg_cnt = cmd->tgt_sg_cnt;
1810                 dst_sg = cmd->sg;
1811                 to_copy = cmd->bufflen;
1812         } else {
1813                 src_sg = cmd->sg;
1814                 src_sg_cnt = cmd->sg_cnt;
1815                 dst_sg = cmd->tgt_sg;
1816                 to_copy = cmd->resp_data_len;
1817         }
1818
1819         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
1820                 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
1821                 to_copy);
1822
1823         dst = sg_page(dst_sg);
1824         dst_len = dst_sg->length;
1825         dst_offs = dst_sg->offset;
1826
1827         s = 0;
1828         d = 0;
1829         src_offs = 0;
1830         while (s < src_sg_cnt) {
1831                 src = sg_page(&src_sg[s]);
1832                 src_len = src_sg[s].length;
1833                 src_offs += src_sg[s].offset;
1834
1835                 do {
1836                         unsigned int n;
1837
1838                         /*
1839                          * Himem pages are not allowed here, see the
1840                          * corresponding #warning in scst_main.c. Correct
1841                          * your target driver or dev handler to not alloc
1842                          * such pages!
1843                          */
1844                         EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
1845                                            PageHighMem(src));
1846
1847                         TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
1848                                 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
1849                                 cmd, to_copy, src, src_len, src_offs, dst,
1850                                 dst_len, dst_offs);
1851
1852                         if ((src_offs == 0) && (dst_offs == 0) &&
1853                             (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
1854                                 copy_page(page_address(dst), page_address(src));
1855                                 n = PAGE_SIZE;
1856                         } else {
1857                                 n = min(PAGE_SIZE - dst_offs,
1858                                         PAGE_SIZE - src_offs);
1859                                 n = min(n, src_len);
1860                                 n = min(n, dst_len);
1861                                 memcpy(page_address(dst) + dst_offs,
1862                                        page_address(src) + src_offs, n);
1863                                 dst_offs -= min(n, dst_offs);
1864                                 src_offs -= min(n, src_offs);
1865                         }
1866
1867                         TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
1868
1869                         to_copy -= n;
1870                         if (to_copy <= 0)
1871                                 goto out;
1872
1873                         src_len -= n;
1874                         dst_len -= n;
1875                         if (dst_len == 0) {
1876                                 d++;
1877                                 dst = sg_page(&dst_sg[d]);
1878                                 dst_len = dst_sg[d].length;
1879                                 dst_offs += dst_sg[d].offset;
1880                         }
1881                 } while (src_len > 0);
1882
1883                 s++;
1884         }
1885
1886 out:
1887         TRACE_EXIT();
1888         return;
1889 }
1890
1891 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1892
1893 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1894 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1895
1896 int scst_get_cdb_len(const uint8_t *cdb)
1897 {
1898         return SCST_GET_CDB_LEN(cdb[0]);
1899 }
1900
1901 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1902
1903 /* for special commands */
1904 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1905 {
1906         cmd->bufflen = 6;
1907         return 0;
1908 }
1909
1910 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1911 {
1912         cmd->bufflen = READ_CAP_LEN;
1913         return 0;
1914 }
1915
1916 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1917 {
1918         cmd->bufflen = 1;
1919         return 0;
1920 }
1921
1922 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1923 {
1924         uint8_t *p = (uint8_t *)cmd->cdb + off;
1925         int res = 0;
1926
1927         cmd->bufflen = 0;
1928         cmd->bufflen |= ((u32)p[0]) << 8;
1929         cmd->bufflen |= ((u32)p[1]);
1930
1931         switch (cmd->cdb[1] & 0x1f) {
1932         case 0:
1933         case 1:
1934         case 6:
1935                 if (cmd->bufflen != 0) {
1936                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1937                                 "allocation length for service action %x",
1938                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1939                         goto out_inval;
1940                 }
1941                 break;
1942         }
1943
1944         switch (cmd->cdb[1] & 0x1f) {
1945         case 0:
1946         case 1:
1947                 cmd->bufflen = 20;
1948                 break;
1949         case 6:
1950                 cmd->bufflen = 32;
1951                 break;
1952         case 8:
1953                 cmd->bufflen = max(28, cmd->bufflen);
1954                 break;
1955         default:
1956                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1957                         cmd->cdb[1] & 0x1f);
1958                 goto out_inval;
1959         }
1960
1961 out:
1962         return res;
1963
1964 out_inval:
1965         scst_set_cmd_error(cmd,
1966                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1967         res = 1;
1968         goto out;
1969 }
1970
1971 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1972 {
1973         cmd->bufflen = (u32)cmd->cdb[off];
1974         return 0;
1975 }
1976
1977 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1978 {
1979         cmd->bufflen = (u32)cmd->cdb[off];
1980         if (cmd->bufflen == 0)
1981                 cmd->bufflen = 256;
1982         return 0;
1983 }
1984
1985 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1986 {
1987         const uint8_t *p = cmd->cdb + off;
1988
1989         cmd->bufflen = 0;
1990         cmd->bufflen |= ((u32)p[0]) << 8;
1991         cmd->bufflen |= ((u32)p[1]);
1992
1993         return 0;
1994 }
1995
1996 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1997 {
1998         const uint8_t *p = cmd->cdb + off;
1999
2000         cmd->bufflen = 0;
2001         cmd->bufflen |= ((u32)p[0]) << 16;
2002         cmd->bufflen |= ((u32)p[1]) << 8;
2003         cmd->bufflen |= ((u32)p[2]);
2004
2005         return 0;
2006 }
2007
2008 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
2009 {
2010         const uint8_t *p = cmd->cdb + off;
2011
2012         cmd->bufflen = 0;
2013         cmd->bufflen |= ((u32)p[0]) << 24;
2014         cmd->bufflen |= ((u32)p[1]) << 16;
2015         cmd->bufflen |= ((u32)p[2]) << 8;
2016         cmd->bufflen |= ((u32)p[3]);
2017
2018         return 0;
2019 }
2020
2021 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
2022 {
2023         cmd->bufflen = 0;
2024         return 0;
2025 }
2026
2027 int scst_get_cdb_info(struct scst_cmd *cmd)
2028 {
2029         int dev_type = cmd->dev->handler->type;
2030         int i, res = 0;
2031         uint8_t op;
2032         const struct scst_sdbops *ptr = NULL;
2033
2034         TRACE_ENTRY();
2035
2036         op = cmd->cdb[0];       /* get clear opcode */
2037
2038         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
2039                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
2040                 dev_type);
2041
2042         i = scst_scsi_op_list[op];
2043         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
2044                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
2045                         ptr = &scst_scsi_op_table[i];
2046                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
2047                               ptr->ops, ptr->devkey[0], /* disk     */
2048                               ptr->devkey[1],   /* tape     */
2049                               ptr->devkey[2],   /* printer */
2050                               ptr->devkey[3],   /* cpu      */
2051                               ptr->devkey[4],   /* cdr      */
2052                               ptr->devkey[5],   /* cdrom    */
2053                               ptr->devkey[6],   /* scanner */
2054                               ptr->devkey[7],   /* worm     */
2055                               ptr->devkey[8],   /* changer */
2056                               ptr->devkey[9],   /* commdev */
2057                               ptr->op_name);
2058                         TRACE_DBG("direction=%d flags=%d off=%d",
2059                               ptr->direction,
2060                               ptr->flags,
2061                               ptr->off);
2062                         break;
2063                 }
2064                 i++;
2065         }
2066
2067         if (ptr == NULL) {
2068                 /* opcode not found or now not used !!! */
2069                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
2070                       dev_type);
2071                 res = -1;
2072                 cmd->op_flags = SCST_INFO_INVALID;
2073                 goto out;
2074         }
2075
2076         cmd->cdb_len = SCST_GET_CDB_LEN(op);
2077         cmd->op_name = ptr->op_name;
2078         cmd->data_direction = ptr->direction;
2079         cmd->op_flags = ptr->flags;
2080         res = (*ptr->get_trans_len)(cmd, ptr->off);
2081
2082 out:
2083         TRACE_EXIT();
2084         return res;
2085 }
2086 EXPORT_SYMBOL(scst_get_cdb_info);
2087
2088 /*
2089  * Routine to extract a lun number from an 8-byte LUN structure
2090  * in network byte order (BE).
2091  * (see SAM-2, Section 4.12.3 page 40)
2092  * Supports 2 types of lun unpacking: peripheral and logical unit.
2093  */
2094 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2095 {
2096         uint64_t res = NO_SUCH_LUN;
2097         int address_method;
2098
2099         TRACE_ENTRY();
2100
2101         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2102
2103         if (unlikely(len < 2)) {
2104                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2105                         "more", len);
2106                 goto out;
2107         }
2108
2109         if (len > 2) {
2110                 switch (len) {
2111                 case 8:
2112                         if ((*((uint64_t *)lun) &
2113                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2114                                 goto out_err;
2115                         break;
2116                 case 4:
2117                         if (*((uint16_t *)&lun[2]) != 0)
2118                                 goto out_err;
2119                         break;
2120                 case 6:
2121                         if (*((uint32_t *)&lun[2]) != 0)
2122                                 goto out_err;
2123                         break;
2124                 default:
2125                         goto out_err;
2126                 }
2127         }
2128
2129         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
2130         switch (address_method) {
2131         case 0: /* peripheral device addressing method */
2132 #if 0
2133                 if (*lun) {
2134                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2135                              "peripheral device addressing method 0x%02x, "
2136                              "expected 0", *lun);
2137                         break;
2138                 }
2139                 res = *(lun + 1);
2140                 break;
2141 #else
2142                 /*
2143                  * Looks like it's legal to use it as flat space addressing
2144                  * method as well
2145                  */
2146
2147                 /* go through */
2148 #endif
2149
2150         case 1: /* flat space addressing method */
2151                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2152                 break;
2153
2154         case 2: /* logical unit addressing method */
2155                 if (*lun & 0x3f) {
2156                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2157                                     "addressing method 0x%02x, expected 0",
2158                                     *lun & 0x3f);
2159                         break;
2160                 }
2161                 if (*(lun + 1) & 0xe0) {
2162                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
2163                                     "addressing method 0x%02x, expected 0",
2164                                     (*(lun + 1) & 0xf8) >> 5);
2165                         break;
2166                 }
2167                 res = *(lun + 1) & 0x1f;
2168                 break;
2169
2170         case 3: /* extended logical unit addressing method */
2171         default:
2172                 PRINT_ERROR("Unimplemented LUN addressing method %u",
2173                             address_method);
2174                 break;
2175         }
2176
2177 out:
2178         TRACE_EXIT_RES((int)res);
2179         return res;
2180
2181 out_err:
2182         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2183         goto out;
2184 }
2185
2186 int scst_calc_block_shift(int sector_size)
2187 {
2188         int block_shift = 0;
2189         int t;
2190
2191         if (sector_size == 0)
2192                 sector_size = 512;
2193
2194         t = sector_size;
2195         while (1) {
2196                 if ((t & 1) != 0)
2197                         break;
2198                 t >>= 1;
2199                 block_shift++;
2200         }
2201         if (block_shift < 9) {
2202                 PRINT_ERROR("Wrong sector size %d", sector_size);
2203                 block_shift = -1;
2204         }
2205
2206         TRACE_EXIT_RES(block_shift);
2207         return block_shift;
2208 }
2209 EXPORT_SYMBOL(scst_calc_block_shift);
2210
2211 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2212         int (*get_block_shift)(struct scst_cmd *cmd))
2213 {
2214         int res = 0;
2215
2216         TRACE_ENTRY();
2217
2218         /*
2219          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2220          * therefore change them only if necessary
2221          */
2222
2223         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2224               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2225
2226         switch (cmd->cdb[0]) {
2227         case SERVICE_ACTION_IN:
2228                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2229                         cmd->bufflen = READ_CAP16_LEN;
2230                         cmd->data_direction = SCST_DATA_READ;
2231                 }
2232                 break;
2233         case VERIFY_6:
2234         case VERIFY:
2235         case VERIFY_12:
2236         case VERIFY_16:
2237                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2238                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2239                         cmd->bufflen = 0;
2240                         goto set_timeout;
2241                 } else
2242                         cmd->data_len = 0;
2243                 break;
2244         default:
2245                 /* It's all good */
2246                 break;
2247         }
2248
2249         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2250                 /*
2251                  * No need for locks here, since *_detach() can not be
2252                  * called, when there are existing commands.
2253                  */
2254                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2255         }
2256
2257 set_timeout:
2258         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2259                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2260         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2261                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2262         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2263                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2264
2265         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2266               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2267
2268         TRACE_EXIT_RES(res);
2269         return res;
2270 }
2271 EXPORT_SYMBOL(scst_sbc_generic_parse);
2272
2273 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2274         int (*get_block_shift)(struct scst_cmd *cmd))
2275 {
2276         int res = 0;
2277
2278         TRACE_ENTRY();
2279
2280         /*
2281          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2282          * therefore change them only if necessary
2283          */
2284
2285         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2286               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2287
2288         cmd->cdb[1] &= 0x1f;
2289
2290         switch (cmd->cdb[0]) {
2291         case VERIFY_6:
2292         case VERIFY:
2293         case VERIFY_12:
2294         case VERIFY_16:
2295                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2296                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2297                         cmd->bufflen = 0;
2298                         goto set_timeout;
2299                 }
2300                 break;
2301         default:
2302                 /* It's all good */
2303                 break;
2304         }
2305
2306         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2307                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2308
2309 set_timeout:
2310         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2311                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2312         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2313                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2314         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2315                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2316
2317         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2318                 cmd->data_direction);
2319
2320         TRACE_EXIT();
2321         return res;
2322 }
2323 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2324
2325 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2326         int (*get_block_shift)(struct scst_cmd *cmd))
2327 {
2328         int res = 0;
2329
2330         TRACE_ENTRY();
2331
2332         /*
2333          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2334          * therefore change them only if necessary
2335          */
2336
2337         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2338               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2339
2340         cmd->cdb[1] &= 0x1f;
2341
2342         switch (cmd->cdb[0]) {
2343         case VERIFY_6:
2344         case VERIFY:
2345         case VERIFY_12:
2346         case VERIFY_16:
2347                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2348                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2349                         cmd->bufflen = 0;
2350                         goto set_timeout;
2351                 }
2352                 break;
2353         default:
2354                 /* It's all good */
2355                 break;
2356         }
2357
2358         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2359                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2360
2361 set_timeout:
2362         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2363                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2364         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2365                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2366         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2367                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2368
2369         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2370                 cmd->data_direction);
2371
2372         TRACE_EXIT_RES(res);
2373         return res;
2374 }
2375 EXPORT_SYMBOL(scst_modisk_generic_parse);
2376
2377 int scst_tape_generic_parse(struct scst_cmd *cmd,
2378         int (*get_block_size)(struct scst_cmd *cmd))
2379 {
2380         int res = 0;
2381
2382         TRACE_ENTRY();
2383
2384         /*
2385          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2386          * therefore change them only if necessary
2387          */
2388
2389         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2390               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2391
2392         if (cmd->cdb[0] == READ_POSITION) {
2393                 int tclp = cmd->cdb[1] & 4;
2394                 int long_bit = cmd->cdb[1] & 2;
2395                 int bt = cmd->cdb[1] & 1;
2396
2397                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2398                         cmd->bufflen =
2399                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2400                         cmd->data_direction = SCST_DATA_READ;
2401                 } else {
2402                         cmd->bufflen = 0;
2403                         cmd->data_direction = SCST_DATA_NONE;
2404                 }
2405         }
2406
2407         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2408                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2409
2410         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2411                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2412         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2413                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2414         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2415                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2416
2417         TRACE_EXIT_RES(res);
2418         return res;
2419 }
2420 EXPORT_SYMBOL(scst_tape_generic_parse);
2421
2422 static int scst_null_parse(struct scst_cmd *cmd)
2423 {
2424         int res = 0;
2425
2426         TRACE_ENTRY();
2427
2428         /*
2429          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2430          * therefore change them only if necessary
2431          */
2432
2433         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2434               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2435 #if 0
2436         switch (cmd->cdb[0]) {
2437         default:
2438                 /* It's all good */
2439                 break;
2440         }
2441 #endif
2442         TRACE_DBG("res %d bufflen %d direct %d",
2443               res, cmd->bufflen, cmd->data_direction);
2444
2445         TRACE_EXIT();
2446         return res;
2447 }
2448
2449 int scst_changer_generic_parse(struct scst_cmd *cmd,
2450         int (*nothing)(struct scst_cmd *cmd))
2451 {
2452         int res = scst_null_parse(cmd);
2453
2454         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2455                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2456         else
2457                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2458
2459         return res;
2460 }
2461 EXPORT_SYMBOL(scst_changer_generic_parse);
2462
2463 int scst_processor_generic_parse(struct scst_cmd *cmd,
2464         int (*nothing)(struct scst_cmd *cmd))
2465 {
2466         int res = scst_null_parse(cmd);
2467
2468         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2469                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2470         else
2471                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2472
2473         return res;
2474 }
2475 EXPORT_SYMBOL(scst_processor_generic_parse);
2476
2477 int scst_raid_generic_parse(struct scst_cmd *cmd,
2478         int (*nothing)(struct scst_cmd *cmd))
2479 {
2480         int res = scst_null_parse(cmd);
2481
2482         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2483                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2484         else
2485                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2486
2487         return res;
2488 }
2489 EXPORT_SYMBOL(scst_raid_generic_parse);
2490
2491 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2492         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2493 {
2494         int opcode = cmd->cdb[0];
2495         int status = cmd->status;
2496         int res = SCST_CMD_STATE_DEFAULT;
2497
2498         TRACE_ENTRY();
2499
2500         /*
2501          * SCST sets good defaults for cmd->is_send_status and
2502          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2503          * therefore change them only if necessary
2504          */
2505
2506         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2507                 switch (opcode) {
2508                 case READ_CAPACITY:
2509                 {
2510                         /* Always keep track of disk capacity */
2511                         int buffer_size, sector_size, sh;
2512                         uint8_t *buffer;
2513
2514                         buffer_size = scst_get_buf_first(cmd, &buffer);
2515                         if (unlikely(buffer_size <= 0)) {
2516                                 if (buffer_size < 0) {
2517                                         PRINT_ERROR("%s: Unable to get the"
2518                                         " buffer (%d)", __func__, buffer_size);
2519                                 }
2520                                 goto out;
2521                         }
2522
2523                         sector_size =
2524                             ((buffer[4] << 24) | (buffer[5] << 16) |
2525                              (buffer[6] << 8) | (buffer[7] << 0));
2526                         scst_put_buf(cmd, buffer);
2527                         if (sector_size != 0)
2528                                 sh = scst_calc_block_shift(sector_size);
2529                         else
2530                                 sh = 0;
2531                         set_block_shift(cmd, sh);
2532                         TRACE_DBG("block_shift %d", sh);
2533                         break;
2534                 }
2535                 default:
2536                         /* It's all good */
2537                         break;
2538                 }
2539         }
2540
2541         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2542               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2543
2544 out:
2545         TRACE_EXIT_RES(res);
2546         return res;
2547 }
2548 EXPORT_SYMBOL(scst_block_generic_dev_done);
2549
2550 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2551         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2552 {
2553         int opcode = cmd->cdb[0];
2554         int res = SCST_CMD_STATE_DEFAULT;
2555         int buffer_size, bs;
2556         uint8_t *buffer = NULL;
2557
2558         TRACE_ENTRY();
2559
2560         /*
2561          * SCST sets good defaults for cmd->is_send_status and
2562          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2563          * therefore change them only if necessary
2564          */
2565
2566         switch (opcode) {
2567         case MODE_SENSE:
2568         case MODE_SELECT:
2569                 buffer_size = scst_get_buf_first(cmd, &buffer);
2570                 if (unlikely(buffer_size <= 0)) {
2571                         if (buffer_size < 0) {
2572                                 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2573                                         __func__, buffer_size);
2574                         }
2575                         goto out;
2576                 }
2577                 break;
2578         }
2579
2580         switch (opcode) {
2581         case MODE_SENSE:
2582                 TRACE_DBG("%s", "MODE_SENSE");
2583                 if ((cmd->cdb[2] & 0xC0) == 0) {
2584                         if (buffer[3] == 8) {
2585                                 bs = (buffer[9] << 16) |
2586                                     (buffer[10] << 8) | buffer[11];
2587                                 set_block_size(cmd, bs);
2588                         }
2589                 }
2590                 break;
2591         case MODE_SELECT:
2592                 TRACE_DBG("%s", "MODE_SELECT");
2593                 if (buffer[3] == 8) {
2594                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2595                             (buffer[11]);
2596                         set_block_size(cmd, bs);
2597                 }
2598                 break;
2599         default:
2600                 /* It's all good */
2601                 break;
2602         }
2603
2604         switch (opcode) {
2605         case MODE_SENSE:
2606         case MODE_SELECT:
2607                 scst_put_buf(cmd, buffer);
2608                 break;
2609         }
2610
2611 out:
2612         TRACE_EXIT_RES(res);
2613         return res;
2614 }
2615 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2616
2617 static void scst_check_internal_sense(struct scst_device *dev, int result,
2618         uint8_t *sense, int sense_len)
2619 {
2620         TRACE_ENTRY();
2621
2622         if (host_byte(result) == DID_RESET) {
2623                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2624                         "reset UA");
2625                 scst_set_sense(sense, sense_len,
2626                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2627                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2628         } else if ((status_byte(result) == CHECK_CONDITION) &&
2629                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2630                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2631
2632         TRACE_EXIT();
2633         return;
2634 }
2635
2636 int scst_obtain_device_parameters(struct scst_device *dev)
2637 {
2638         int res = 0, i;
2639         uint8_t cmd[16];
2640         uint8_t buffer[4+0x0A];
2641         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2642
2643         TRACE_ENTRY();
2644
2645         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2646
2647         for (i = 0; i < 5; i++) {
2648                 /* Get control mode page */
2649                 memset(cmd, 0, sizeof(cmd));
2650                 cmd[0] = MODE_SENSE;
2651                 cmd[1] = 8; /* DBD */
2652                 cmd[2] = 0x0A;
2653                 cmd[4] = sizeof(buffer);
2654
2655                 memset(buffer, 0, sizeof(buffer));
2656                 memset(sense_buffer, 0, sizeof(sense_buffer));
2657
2658                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2659                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2660                                 sizeof(buffer), sense_buffer, 15, 0, 0);
2661
2662                 TRACE_DBG("MODE_SENSE done: %x", res);
2663
2664                 if (scsi_status_is_good(res)) {
2665                         int q;
2666
2667                         PRINT_BUFF_FLAG(TRACE_SCSI,
2668                                 "Returned control mode page data",
2669                                 buffer, sizeof(buffer));
2670
2671                         dev->tst = buffer[4+2] >> 5;
2672                         q = buffer[4+3] >> 4;
2673                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2674                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2675                                         "%d:%d:%d:%d", dev->queue_alg,
2676                                         dev->scsi_dev->host->host_no,
2677                                         dev->scsi_dev->channel,
2678                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2679                         }
2680                         dev->queue_alg = q;
2681                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2682                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2683
2684                         /*
2685                          * Unfortunately, SCSI ML doesn't provide a way to
2686                          * specify commands task attribute, so we can rely on
2687                          * device's restricted reordering only.
2688                          */
2689                         dev->has_own_order_mgmt = !dev->queue_alg;
2690
2691                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2692                                 "Device %d:%d:%d:%d: TST %x, "
2693                                 "QUEUE ALG %x, SWP %x, TAS %x, "
2694                                 "has_own_order_mgmt %d",
2695                                 dev->scsi_dev->host->host_no,
2696                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2697                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2698                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2699
2700                         goto out;
2701                 } else {
2702 #if 0
2703                         if ((status_byte(res) == CHECK_CONDITION) &&
2704 #else
2705                         /*
2706                          * 3ware controller is buggy and returns CONDITION_GOOD
2707                          * instead of CHECK_CONDITION
2708                          */
2709                         if (
2710 #endif
2711                             SCST_SENSE_VALID(sense_buffer)) {
2712                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2713                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2714                                                 "Device %d:%d:%d:%d doesn't"
2715                                                 " support control mode page,"
2716                                                 " using defaults: TST %x,"
2717                                                 " QUEUE ALG %x, SWP %x, TAS %x,"
2718                                                 " has_own_order_mgmt %d",
2719                                                 dev->scsi_dev->host->host_no,
2720                                                 dev->scsi_dev->channel,
2721                                                 dev->scsi_dev->id,
2722                                                 dev->scsi_dev->lun,
2723                                                 dev->tst,
2724                                                 dev->queue_alg,
2725                                                 dev->swp,
2726                                                 dev->tas,
2727                                                 dev->has_own_order_mgmt);
2728                                         res = 0;
2729                                         goto out;
2730                                 } else if (sense_buffer[2] == NOT_READY) {
2731                                         TRACE(TRACE_SCSI,
2732                                                 "Device %d:%d:%d:%d not ready",
2733                                                 dev->scsi_dev->host->host_no,
2734                                                 dev->scsi_dev->channel,
2735                                                 dev->scsi_dev->id,
2736                                                 dev->scsi_dev->lun);
2737                                         res = 0;
2738                                         goto out;
2739                                 }
2740                         } else {
2741                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2742                                         "Internal MODE SENSE to "
2743                                         "device %d:%d:%d:%d failed: %x",
2744                                         dev->scsi_dev->host->host_no,
2745                                         dev->scsi_dev->channel,
2746                                         dev->scsi_dev->id,
2747                                         dev->scsi_dev->lun, res);
2748                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
2749                                         "MODE SENSE sense",
2750                                         sense_buffer, sizeof(sense_buffer));
2751                         }
2752                         scst_check_internal_sense(dev, res, sense_buffer,
2753                                         sizeof(sense_buffer));
2754                 }
2755         }
2756         res = -ENODEV;
2757
2758 out:
2759         TRACE_EXIT_RES(res);
2760         return res;
2761 }
2762 EXPORT_SYMBOL(scst_obtain_device_parameters);
2763
2764 /* Called under dev_lock and BH off */
2765 void scst_process_reset(struct scst_device *dev,
2766         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2767         struct scst_mgmt_cmd *mcmd, bool setUA)
2768 {
2769         struct scst_tgt_dev *tgt_dev;
2770         struct scst_cmd *cmd, *tcmd;
2771
2772         TRACE_ENTRY();
2773
2774         /* Clear RESERVE'ation, if necessary */
2775         if (dev->dev_reserved) {
2776                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2777                                     dev_tgt_dev_list_entry) {
2778                         TRACE(TRACE_MGMT_MINOR, "Clearing RESERVE'ation for "
2779                                 "tgt_dev lun %lld",
2780                                 (long long unsigned int)tgt_dev->lun);
2781                         clear_bit(SCST_TGT_DEV_RESERVED,
2782                                   &tgt_dev->tgt_dev_flags);
2783                 }
2784                 dev->dev_reserved = 0;
2785                 /*
2786                  * There is no need to send RELEASE, since the device is going
2787                  * to be resetted. Actually, since we can be in RESET TM
2788                  * function, it might be dangerous.
2789                  */
2790         }
2791
2792         dev->dev_double_ua_possible = 1;
2793
2794         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2795                 dev_tgt_dev_list_entry) {
2796                 struct scst_session *sess = tgt_dev->sess;
2797
2798                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2799                 scst_free_all_UA(tgt_dev);
2800                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2801
2802                 spin_lock_irq(&sess->sess_list_lock);
2803
2804                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2805                 list_for_each_entry(cmd, &sess->search_cmd_list,
2806                                 search_cmd_list_entry) {
2807                         if (cmd == exclude_cmd)
2808                                 continue;
2809                         if ((cmd->tgt_dev == tgt_dev) ||
2810                             ((cmd->tgt_dev == NULL) &&
2811                              (cmd->lun == tgt_dev->lun))) {
2812                                 scst_abort_cmd(cmd, mcmd,
2813                                         (tgt_dev->sess != originator), 0);
2814                         }
2815                 }
2816                 spin_unlock_irq(&sess->sess_list_lock);
2817         }
2818
2819         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2820                                 blocked_cmd_list_entry) {
2821                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2822                         list_del(&cmd->blocked_cmd_list_entry);
2823                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2824                                 "to active cmd list", cmd);
2825                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2826                         list_add_tail(&cmd->cmd_list_entry,
2827                                 &cmd->cmd_lists->active_cmd_list);
2828                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2829                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2830                 }
2831         }
2832
2833         if (setUA) {
2834                 /* BH already off */
2835                 spin_lock(&scst_temp_UA_lock);
2836                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2837                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2838                 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2839                         sizeof(scst_temp_UA));
2840                 spin_unlock(&scst_temp_UA_lock);
2841         }
2842
2843         TRACE_EXIT();
2844         return;
2845 }
2846
2847 int scst_set_pending_UA(struct scst_cmd *cmd)
2848 {
2849         int res = 0;
2850         struct scst_tgt_dev_UA *UA_entry;
2851
2852         TRACE_ENTRY();
2853
2854         TRACE(TRACE_MGMT_MINOR, "Setting pending UA cmd %p", cmd);
2855
2856         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2857
2858         /* UA list could be cleared behind us, so retest */
2859         if (list_empty(&cmd->tgt_dev->UA_list)) {
2860                 TRACE_DBG("%s",
2861                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2862                 res = -1;
2863                 goto out_unlock;
2864         }
2865
2866         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2867                               UA_list_entry);
2868
2869         TRACE_DBG("next %p UA_entry %p",
2870               cmd->tgt_dev->UA_list.next, UA_entry);
2871
2872         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2873                 sizeof(UA_entry->UA_sense_buffer));
2874
2875         cmd->ua_ignore = 1;
2876
2877         list_del(&UA_entry->UA_list_entry);
2878
2879         mempool_free(UA_entry, scst_ua_mempool);
2880
2881         if (list_empty(&cmd->tgt_dev->UA_list)) {
2882                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2883                           &cmd->tgt_dev->tgt_dev_flags);
2884         }
2885
2886         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2887
2888 out:
2889         TRACE_EXIT_RES(res);
2890         return res;
2891
2892 out_unlock:
2893         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2894         goto out;
2895 }
2896
2897 /* Called under tgt_dev_lock and BH off */
2898 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2899         const uint8_t *sense, int sense_len, int head)
2900 {
2901         struct scst_tgt_dev_UA *UA_entry = NULL;
2902
2903         TRACE_ENTRY();
2904
2905         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2906         if (UA_entry == NULL) {
2907                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2908                      "allocation failed. The UNIT ATTENTION "
2909                      "on some sessions will be missed");
2910                 PRINT_BUFFER("Lost UA", sense, sense_len);
2911                 goto out;
2912         }
2913         memset(UA_entry, 0, sizeof(*UA_entry));
2914
2915         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2916                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2917         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2918
2919         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2920
2921         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2922
2923         if (head)
2924                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2925         else
2926                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2927
2928 out:
2929         TRACE_EXIT();
2930         return;
2931 }
2932
2933 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2934         const uint8_t *sense, int sense_len, int head)
2935 {
2936         int skip_UA = 0;
2937         struct scst_tgt_dev_UA *UA_entry_tmp;
2938
2939         TRACE_ENTRY();
2940
2941         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2942
2943         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2944                             UA_list_entry) {
2945                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer,
2946                            sense_len) == 0) {
2947                         TRACE_MGMT_DBG("%s", "UA already exists");
2948                         skip_UA = 1;
2949                         break;
2950                 }
2951         }
2952
2953         if (skip_UA == 0)
2954                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2955
2956         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2957
2958         TRACE_EXIT();
2959         return;
2960 }
2961
2962 /* Called under dev_lock and BH off */
2963 void scst_dev_check_set_local_UA(struct scst_device *dev,
2964         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2965 {
2966         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2967
2968         TRACE_ENTRY();
2969
2970         if (exclude != NULL)
2971                 exclude_tgt_dev = exclude->tgt_dev;
2972
2973         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2974                         dev_tgt_dev_list_entry) {
2975                 if (tgt_dev != exclude_tgt_dev)
2976                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2977         }
2978
2979         TRACE_EXIT();
2980         return;
2981 }
2982
2983 /* Called under dev_lock and BH off */
2984 void __scst_dev_check_set_UA(struct scst_device *dev,
2985         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2986 {
2987         TRACE_ENTRY();
2988
2989         TRACE(TRACE_MGMT_MINOR, "Processing UA dev %p", dev);
2990
2991         /* Check for reset UA */
2992         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2993                 scst_process_reset(dev,
2994                                    (exclude != NULL) ? exclude->sess : NULL,
2995                                    exclude, NULL, false);
2996
2997         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2998
2999         TRACE_EXIT();
3000         return;
3001 }
3002
3003 /* Called under tgt_dev_lock or when tgt_dev is unused */
3004 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
3005 {
3006         struct scst_tgt_dev_UA *UA_entry, *t;
3007
3008         TRACE_ENTRY();
3009
3010         list_for_each_entry_safe(UA_entry, t,
3011                                  &tgt_dev->UA_list, UA_list_entry) {
3012                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
3013                                (long long unsigned int)tgt_dev->lun);
3014                 list_del(&UA_entry->UA_list_entry);
3015                 kfree(UA_entry);
3016         }
3017         INIT_LIST_HEAD(&tgt_dev->UA_list);
3018         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
3019
3020         TRACE_EXIT();
3021         return;
3022 }
3023
3024 /* No locks */
3025 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
3026 {
3027         struct scst_cmd *res = NULL, *cmd, *t;
3028         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
3029
3030         spin_lock_irq(&tgt_dev->sn_lock);
3031
3032         if (unlikely(tgt_dev->hq_cmd_count != 0))
3033                 goto out_unlock;
3034
3035 restart:
3036         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
3037                                 sn_cmd_list_entry) {
3038                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3039                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3040                 if (cmd->sn == expected_sn) {
3041                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
3042                                 cmd, cmd->sn, cmd->sn_set);
3043                         tgt_dev->def_cmd_count--;
3044                         list_del(&cmd->sn_cmd_list_entry);
3045                         if (res == NULL)
3046                                 res = cmd;
3047                         else {
3048                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3049                                 TRACE_SN("Adding cmd %p to active cmd list",
3050                                         cmd);
3051                                 list_add_tail(&cmd->cmd_list_entry,
3052                                         &cmd->cmd_lists->active_cmd_list);
3053                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3054                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3055                         }
3056                 }
3057         }
3058         if (res != NULL)
3059                 goto out_unlock;
3060
3061         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
3062                                 sn_cmd_list_entry) {
3063                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3064                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3065                 if (cmd->sn == expected_sn) {
3066                         atomic_t *slot = cmd->sn_slot;
3067                         /*
3068                          * !! At this point any pointer in cmd, except !!
3069                          * !! sn_slot and sn_cmd_list_entry, could be   !!
3070                          * !! already destroyed                         !!
3071                          */
3072                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
3073                                  cmd,
3074                                  (long long unsigned int)cmd->tag,
3075                                  cmd->sn);
3076                         tgt_dev->def_cmd_count--;
3077                         list_del(&cmd->sn_cmd_list_entry);
3078                         spin_unlock_irq(&tgt_dev->sn_lock);
3079                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
3080                                              &cmd->cmd_flags))
3081                                 scst_destroy_put_cmd(cmd);
3082                         scst_inc_expected_sn(tgt_dev, slot);
3083                         expected_sn = tgt_dev->expected_sn;
3084                         spin_lock_irq(&tgt_dev->sn_lock);
3085                         goto restart;
3086                 }
3087         }
3088
3089 out_unlock:
3090         spin_unlock_irq(&tgt_dev->sn_lock);
3091         return res;
3092 }
3093
3094 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
3095         struct scst_thr_data_hdr *data,
3096         void (*free_fn) (struct scst_thr_data_hdr *data))
3097 {
3098         data->owner_thr = current;
3099         atomic_set(&data->ref, 1);
3100         EXTRACHECKS_BUG_ON(free_fn == NULL);
3101         data->free_fn = free_fn;
3102         spin_lock(&tgt_dev->thr_data_lock);
3103         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
3104         spin_unlock(&tgt_dev->thr_data_lock);
3105 }
3106 EXPORT_SYMBOL(scst_add_thr_data);
3107
3108 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
3109 {
3110         spin_lock(&tgt_dev->thr_data_lock);
3111         while (!list_empty(&tgt_dev->thr_data_list)) {
3112                 struct scst_thr_data_hdr *d = list_entry(
3113                                 tgt_dev->thr_data_list.next, typeof(*d),
3114                                 thr_data_list_entry);
3115                 list_del(&d->thr_data_list_entry);
3116                 spin_unlock(&tgt_dev->thr_data_lock);
3117                 scst_thr_data_put(d);
3118                 spin_lock(&tgt_dev->thr_data_lock);
3119         }
3120         spin_unlock(&tgt_dev->thr_data_lock);
3121         return;
3122 }
3123 EXPORT_SYMBOL(scst_del_all_thr_data);
3124
3125 void scst_dev_del_all_thr_data(struct scst_device *dev)
3126 {
3127         struct scst_tgt_dev *tgt_dev;
3128
3129         TRACE_ENTRY();
3130
3131         mutex_lock(&scst_mutex);
3132
3133         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3134                                 dev_tgt_dev_list_entry) {
3135                 scst_del_all_thr_data(tgt_dev);
3136         }
3137
3138         mutex_unlock(&scst_mutex);
3139
3140         TRACE_EXIT();
3141         return;
3142 }
3143 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
3144
3145 struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
3146         struct task_struct *tsk)
3147 {
3148         struct scst_thr_data_hdr *res = NULL, *d;
3149
3150         spin_lock(&tgt_dev->thr_data_lock);
3151         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
3152                 if (d->owner_thr == tsk) {
3153                         res = d;
3154                         scst_thr_data_get(res);
3155                         break;
3156                 }
3157         }
3158         spin_unlock(&tgt_dev->thr_data_lock);
3159         return res;
3160 }
3161 EXPORT_SYMBOL(__scst_find_thr_data);
3162
3163 /* dev_lock supposed to be held and BH disabled */
3164 void __scst_block_dev(struct scst_device *dev)
3165 {
3166         dev->block_count++;
3167         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
3168 }
3169
3170 /* No locks */
3171 static void scst_block_dev(struct scst_device *dev, int outstanding)
3172 {
3173         spin_lock_bh(&dev->dev_lock);
3174         __scst_block_dev(dev);
3175         spin_unlock_bh(&dev->dev_lock);
3176
3177         /*
3178          * Memory barrier is necessary here, because we need to read
3179          * on_dev_count in wait_event() below after we increased block_count.
3180          * Otherwise, we can miss wake up in scst_dec_on_dev_cmd().
3181          * We use the explicit barrier, because spin_unlock_bh() doesn't
3182          * provide the necessary memory barrier functionality.
3183          */
3184         smp_mb();
3185
3186         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
3187                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
3188         wait_event(dev->on_dev_waitQ,
3189                 atomic_read(&dev->on_dev_count) <= outstanding);
3190         TRACE_MGMT_DBG("%s", "wait_event() returned");
3191 }
3192
3193 /* No locks */
3194 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3195 {
3196         sBUG_ON(cmd->needs_unblocking);
3197
3198         cmd->needs_unblocking = 1;
3199         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3200                        cmd, (long long unsigned int)cmd->tag);
3201
3202         scst_block_dev(cmd->dev, outstanding);
3203 }
3204
3205 /* No locks */
3206 void scst_unblock_dev(struct scst_device *dev)
3207 {
3208         spin_lock_bh(&dev->dev_lock);
3209         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3210                 dev->block_count-1, dev);
3211         if (--dev->block_count == 0)
3212                 scst_unblock_cmds(dev);
3213         spin_unlock_bh(&dev->dev_lock);
3214         sBUG_ON(dev->block_count < 0);
3215 }
3216
3217 /* No locks */
3218 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3219 {
3220         scst_unblock_dev(cmd->dev);
3221         cmd->needs_unblocking = 0;
3222 }
3223
3224 /* No locks */
3225 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3226 {
3227         int res = 0;
3228         struct scst_device *dev = cmd->dev;
3229
3230         TRACE_ENTRY();
3231
3232         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3233
3234         atomic_inc(&dev->on_dev_count);
3235         cmd->dec_on_dev_needed = 1;
3236         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3237
3238         if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
3239                 /*
3240                  * The original command can already block the device, so
3241                  * REQUEST SENSE command should always pass.
3242                  */
3243                 goto out;
3244         }
3245
3246 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3247         spin_lock_bh(&dev->dev_lock);
3248         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3249                 goto out_unlock;
3250         if (dev->block_count > 0) {
3251                 scst_dec_on_dev_cmd(cmd);
3252                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3253                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3254                 list_add_tail(&cmd->blocked_cmd_list_entry,
3255                               &dev->blocked_cmd_list);
3256                 res = 1;
3257         } else {
3258                 __scst_block_dev(dev);
3259                 cmd->inc_blocking = 1;
3260         }
3261         spin_unlock_bh(&dev->dev_lock);
3262         goto out;
3263 #else
3264 repeat:
3265         if (unlikely(dev->block_count > 0)) {
3266                 spin_lock_bh(&dev->dev_lock);
3267                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3268                         goto out_unlock;
3269                 if (dev->block_count > 0) {
3270                         scst_dec_on_dev_cmd(cmd);
3271                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking "
3272                                 "(tag %llu, dev %p)", cmd,
3273                                 (long long unsigned int)cmd->tag, dev);
3274                         list_add_tail(&cmd->blocked_cmd_list_entry,
3275                                       &dev->blocked_cmd_list);
3276                         res = 1;
3277                         spin_unlock_bh(&dev->dev_lock);
3278                         goto out;
3279                 } else {
3280                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3281                                 "continuing");
3282                 }
3283                 spin_unlock_bh(&dev->dev_lock);
3284         }
3285         if (unlikely(dev->dev_double_ua_possible)) {
3286                 spin_lock_bh(&dev->dev_lock);
3287                 if (dev->block_count == 0) {
3288                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3289                                 "cmds due to possible double reset UA (dev %p)",
3290                                 cmd, (long long unsigned int)cmd->tag, dev);
3291                         __scst_block_dev(dev);
3292                         cmd->inc_blocking = 1;
3293                 } else {
3294                         spin_unlock_bh(&dev->dev_lock);
3295                         TRACE_MGMT_DBG("Somebody blocked the device, "
3296                                 "repeating (count %d)", dev->block_count);
3297                         goto repeat;
3298                 }
3299                 spin_unlock_bh(&dev->dev_lock);
3300         }
3301 #endif
3302
3303 out:
3304         TRACE_EXIT_RES(res);
3305         return res;
3306
3307 out_unlock:
3308         spin_unlock_bh(&dev->dev_lock);
3309         goto out;
3310 }
3311
3312 /* Called under dev_lock */
3313 static void scst_unblock_cmds(struct scst_device *dev)
3314 {
3315 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3316         struct scst_cmd *cmd, *t;
3317         unsigned long flags;
3318
3319         TRACE_ENTRY();
3320
3321         local_irq_save(flags);
3322         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3323                                  blocked_cmd_list_entry) {
3324                 int brk = 0;
3325                 /*
3326                  * Since only one cmd per time is being executed, expected_sn
3327                  * can't change behind us, if the corresponding cmd is in
3328                  * blocked_cmd_list, but we could be called before
3329                  * scst_inc_expected_sn().
3330                  *
3331                  * For HQ commands SN is not set.
3332                  */
3333                 if (likely(!cmd->internal && cmd->sn_set)) {
3334                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3335                         if (cmd->tgt_dev == NULL)
3336                                 sBUG();
3337                         expected_sn = cmd->tgt_dev->expected_sn;
3338                         if (cmd->sn == expected_sn)
3339                                 brk = 1;
3340                         else if (cmd->sn != (expected_sn+1))
3341                                 continue;
3342                 }
3343
3344                 list_del(&cmd->blocked_cmd_list_entry);
3345                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3346                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3347                 list_add(&cmd->cmd_list_entry,
3348                          &cmd->cmd_lists->active_cmd_list);
3349                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3350                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3351                 if (brk)
3352                         break;
3353         }
3354         local_irq_restore(flags);
3355 #else /* CONFIG_SCST_STRICT_SERIALIZING */
3356         struct scst_cmd *cmd, *tcmd;
3357         unsigned long flags;
3358
3359         TRACE_ENTRY();
3360
3361         local_irq_save(flags);
3362         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3363                                  blocked_cmd_list_entry) {
3364                 list_del(&cmd->blocked_cmd_list_entry);
3365                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3366                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3367                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3368                         list_add(&cmd->cmd_list_entry,
3369                                 &cmd->cmd_lists->active_cmd_list);
3370                 else
3371                         list_add_tail(&cmd->cmd_list_entry,
3372                                 &cmd->cmd_lists->active_cmd_list);
3373                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3374                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3375         }
3376         local_irq_restore(flags);
3377 #endif /* CONFIG_SCST_STRICT_SERIALIZING */
3378
3379         TRACE_EXIT();
3380         return;
3381 }
3382
3383 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3384         struct scst_cmd *out_of_sn_cmd)
3385 {
3386         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3387
3388         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3389                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3390                 scst_make_deferred_commands_active(tgt_dev);
3391         } else {
3392                 out_of_sn_cmd->out_of_sn = 1;
3393                 spin_lock_irq(&tgt_dev->sn_lock);
3394                 tgt_dev->def_cmd_count++;
3395                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3396                               &tgt_dev->skipped_sn_list);
3397                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list"
3398                         " (expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3399                         tgt_dev->expected_sn);
3400                 spin_unlock_irq(&tgt_dev->sn_lock);
3401         }
3402
3403         return;
3404 }
3405
3406 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3407         struct scst_cmd *out_of_sn_cmd)
3408 {
3409         TRACE_ENTRY();
3410
3411         if (!out_of_sn_cmd->sn_set) {
3412                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3413                 goto out;
3414         }
3415
3416         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3417
3418 out:
3419         TRACE_EXIT();
3420         return;
3421 }
3422
3423 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3424 {
3425         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3426
3427         TRACE_ENTRY();
3428
3429         if (!cmd->hq_cmd_inced)
3430                 goto out;
3431
3432         spin_lock_irq(&tgt_dev->sn_lock);
3433         tgt_dev->hq_cmd_count--;
3434         spin_unlock_irq(&tgt_dev->sn_lock);
3435
3436         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3437
3438         /*
3439          * There is no problem in checking hq_cmd_count in the
3440          * non-locked state. In the worst case we will only have
3441          * unneeded run of the deferred commands.
3442          */
3443         if (tgt_dev->hq_cmd_count == 0)
3444                 scst_make_deferred_commands_active(tgt_dev);
3445
3446 out:
3447         TRACE_EXIT();
3448         return;
3449 }
3450
3451 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3452 {
3453         TRACE_ENTRY();
3454
3455         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3456                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3457                 atomic_read(&scst_cmd_count));
3458
3459         scst_done_cmd_mgmt(cmd);
3460
3461         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3462                 if (cmd->completed) {
3463                         /* It's completed and it's OK to return its result */
3464                         goto out;
3465                 }
3466
3467                 if (cmd->dev->tas) {
3468                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3469                                 "(tag %llu), returning TASK ABORTED ", cmd,
3470                                 (long long unsigned int)cmd->tag);
3471                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3472                 } else {
3473                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3474                                 "(tag %llu), aborting without delivery or "
3475                                 "notification",
3476                                 cmd, (long long unsigned int)cmd->tag);
3477                         /*
3478                          * There is no need to check/requeue possible UA,
3479                          * because, if it exists, it will be delivered
3480                          * by the "completed" branch above.
3481                          */
3482                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3483                 }
3484         }
3485
3486 out:
3487         TRACE_EXIT();
3488         return;
3489 }
3490
3491 void __init scst_scsi_op_list_init(void)
3492 {
3493         int i;
3494         uint8_t op = 0xff;
3495
3496         TRACE_ENTRY();
3497
3498         for (i = 0; i < 256; i++)
3499                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3500
3501         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3502                 if (scst_scsi_op_table[i].ops != op) {
3503                         op = scst_scsi_op_table[i].ops;
3504                         scst_scsi_op_list[op] = i;
3505                 }
3506         }
3507
3508         TRACE_EXIT();
3509         return;
3510 }
3511
3512 #ifdef CONFIG_SCST_DEBUG
3513 /* Original taken from the XFS code */
3514 unsigned long scst_random(void)
3515 {
3516         static int Inited;
3517         static unsigned long RandomValue;
3518         static DEFINE_SPINLOCK(lock);
3519         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3520         register long rv;
3521         register long lo;
3522         register long hi;
3523         unsigned long flags;
3524
3525         spin_lock_irqsave(&lock, flags);
3526         if (!Inited) {
3527                 RandomValue = jiffies;
3528                 Inited = 1;
3529         }
3530         rv = RandomValue;
3531         hi = rv / 127773;
3532         lo = rv % 127773;
3533         rv = 16807 * lo - 2836 * hi;
3534         if (rv <= 0)
3535                 rv += 2147483647;
3536         RandomValue = rv;
3537         spin_unlock_irqrestore(&lock, flags);
3538         return rv;
3539 }
3540 EXPORT_SYMBOL(scst_random);
3541 #endif
3542
3543 #ifdef CONFIG_SCST_DEBUG_TM
3544
3545 #define TM_DBG_STATE_ABORT              0
3546 #define TM_DBG_STATE_RESET              1
3547 #define TM_DBG_STATE_OFFLINE            2
3548
3549 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3550
3551 static void tm_dbg_timer_fn(unsigned long arg);
3552
3553 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3554 /* All serialized by scst_tm_dbg_lock */
3555 static struct {
3556         unsigned int tm_dbg_release:1;
3557         unsigned int tm_dbg_blocked:1;
3558 } tm_dbg_flags;
3559 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3560 static int tm_dbg_delayed_cmds_count;
3561 static int tm_dbg_passed_cmds_count;
3562 static int tm_dbg_state;
3563 static int tm_dbg_on_state_passes;
3564 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3565 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3566
3567 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3568
3569 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3570         struct scst_acg_dev *acg_dev)
3571 {
3572         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3573                 unsigned long flags;
3574                 /* Do TM debugging only for LUN 0 */
3575                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3576                 tm_dbg_p_cmd_list_waitQ =
3577                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3578                 tm_dbg_state = INIT_TM_DBG_STATE;
3579                 tm_dbg_on_state_passes =
3580                         tm_dbg_on_state_num_passes[tm_dbg_state];
3581                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3582                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3583                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3584                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3585         }
3586 }
3587
3588 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3589 {
3590         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3591                 unsigned long flags;
3592                 del_timer_sync(&tm_dbg_timer);
3593                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3594                 tm_dbg_p_cmd_list_waitQ = NULL;
3595                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3596         }
3597 }
3598
3599 static void tm_dbg_timer_fn(unsigned long arg)
3600 {
3601         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3602         tm_dbg_flags.tm_dbg_release = 1;
3603         /* Used to make sure that all woken up threads see the new value */
3604         smp_wmb();
3605         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3606 }
3607
3608 /* Called under scst_tm_dbg_lock and IRQs off */
3609 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3610 {
3611         switch (tm_dbg_state) {
3612         case TM_DBG_STATE_ABORT:
3613                 if (tm_dbg_delayed_cmds_count == 0) {
3614                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3615                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
3616                                 " for %ld.%ld seconds (%ld HZ), "
3617                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3618                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3619                         mod_timer(&tm_dbg_timer, jiffies + d);
3620 #if 0
3621                         tm_dbg_flags.tm_dbg_blocked = 1;
3622 #endif
3623                 } else {
3624                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3625                                 "(tag %llu), delayed_cmds_count=%d, "
3626                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3627                                 tm_dbg_delayed_cmds_count,
3628                                 tm_dbg_on_state_passes);
3629                         if (tm_dbg_delayed_cmds_count == 2)
3630                                 tm_dbg_flags.tm_dbg_blocked = 0;
3631                 }
3632                 break;
3633
3634         case TM_DBG_STATE_RESET:
3635         case TM_DBG_STATE_OFFLINE:
3636                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3637                         "(tag %llu), delayed_cmds_count=%d, "
3638                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3639                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3640                 tm_dbg_flags.tm_dbg_blocked = 1;
3641                 break;
3642
3643         default:
3644                 sBUG();
3645         }
3646         /* IRQs already off */
3647         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3648         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3649         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3650         cmd->tm_dbg_delayed = 1;
3651         tm_dbg_delayed_cmds_count++;
3652         return;
3653 }
3654
3655 /* No locks */
3656 void tm_dbg_check_released_cmds(void)
3657 {
3658         if (tm_dbg_flags.tm_dbg_release) {
3659                 struct scst_cmd *cmd, *tc;
3660                 spin_lock_irq(&scst_tm_dbg_lock);
3661                 list_for_each_entry_safe_reverse(cmd, tc,
3662                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3663                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3664                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3665                                 tm_dbg_delayed_cmds_count);
3666                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3667                         list_move(&cmd->cmd_list_entry,
3668                                 &cmd->cmd_lists->active_cmd_list);
3669                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3670                 }
3671                 tm_dbg_flags.tm_dbg_release = 0;
3672                 spin_unlock_irq(&scst_tm_dbg_lock);
3673         }
3674 }
3675
3676 /* Called under scst_tm_dbg_lock */
3677 static void tm_dbg_change_state(void)
3678 {
3679         tm_dbg_flags.tm_dbg_blocked = 0;
3680         if (--tm_dbg_on_state_passes == 0) {
3681                 switch (tm_dbg_state) {
3682                 case TM_DBG_STATE_ABORT:
3683                         TRACE_MGMT_DBG("%s", "Changing "
3684                             "tm_dbg_state to RESET");
3685                         tm_dbg_state =
3686                                 TM_DBG_STATE_RESET;
3687                         tm_dbg_flags.tm_dbg_blocked = 0;
3688                         break;
3689                 case TM_DBG_STATE_RESET:
3690                 case TM_DBG_STATE_OFFLINE:
3691 #ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
3692                             TRACE_MGMT_DBG("%s", "Changing "
3693                                     "tm_dbg_state to OFFLINE");
3694                             tm_dbg_state =
3695                                 TM_DBG_STATE_OFFLINE;
3696 #else
3697                             TRACE_MGMT_DBG("%s", "Changing "
3698                                     "tm_dbg_state to ABORT");
3699                             tm_dbg_state =
3700                                 TM_DBG_STATE_ABORT;
3701 #endif
3702                         break;
3703                 default:
3704                         sBUG();
3705                 }
3706                 tm_dbg_on_state_passes =
3707                     tm_dbg_on_state_num_passes[tm_dbg_state];
3708         }
3709
3710         TRACE_MGMT_DBG("%s", "Deleting timer");
3711         del_timer(&tm_dbg_timer);
3712 }
3713
3714 /* No locks */
3715 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3716 {
3717         int res = 0;
3718         unsigned long flags;
3719
3720         if (cmd->tm_dbg_immut)
3721                 goto out;
3722
3723         if (cmd->tm_dbg_delayed) {
3724                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3725                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3726                         "delayed_cmds_count=%d", cmd, cmd->tag,
3727                         tm_dbg_delayed_cmds_count);
3728
3729                 cmd->tm_dbg_immut = 1;
3730                 tm_dbg_delayed_cmds_count--;
3731                 if ((tm_dbg_delayed_cmds_count == 0) &&
3732                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3733                         tm_dbg_change_state();
3734                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3735         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3736                                         &cmd->tgt_dev->tgt_dev_flags)) {
3737                 /* Delay 50th command */
3738                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3739                 if (tm_dbg_flags.tm_dbg_blocked ||
3740                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3741                         tm_dbg_delay_cmd(cmd);
3742                         res = 1;
3743                 } else
3744                         cmd->tm_dbg_immut = 1;
3745                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3746         }
3747
3748 out:
3749         return res;
3750 }
3751
3752 /* No locks */
3753 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3754 {
3755         struct scst_cmd *c;
3756         unsigned long flags;
3757
3758         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3759         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3760                                 cmd_list_entry) {
3761                 if (c == cmd) {
3762                         TRACE_MGMT_DBG("Abort request for "
3763                                 "delayed cmd %p (tag=%llu), moving it to "