Fexes for processing internal REQUEST SENSE with scst_user handler
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #include "scst_cdbprobe.h"
36
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39         uint8_t *sense, int sense_len);
40 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
41         const uint8_t *sense, int sense_len, int head);
42 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
43 static void scst_release_space(struct scst_cmd *cmd);
44 static void scst_sess_free_tgt_devs(struct scst_session *sess);
45 static void scst_unblock_cmds(struct scst_device *dev);
46
47 #ifdef CONFIG_SCST_DEBUG_TM
48 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
49         struct scst_acg_dev *acg_dev);
50 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
51 #else
52 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
53         struct scst_acg_dev *acg_dev) {}
54 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
55 #endif /* CONFIG_SCST_DEBUG_TM */
56
57 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
58 {
59         int res = 0;
60         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
61
62         TRACE_ENTRY();
63
64         sBUG_ON(cmd->sense != NULL);
65
66         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
67         if (cmd->sense == NULL) {
68                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
69                         "The sense data will be lost!!", cmd->cdb[0]);
70                 res = -ENOMEM;
71                 goto out;
72         }
73
74         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
75
76 out:
77         TRACE_EXIT_RES(res);
78         return res;
79 }
80 EXPORT_SYMBOL(scst_alloc_sense);
81
82 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
83         const uint8_t *sense, unsigned int len)
84 {
85         int res;
86
87         TRACE_ENTRY();
88
89         res = scst_alloc_sense(cmd, atomic);
90         if (res != 0) {
91                 PRINT_BUFFER("Lost sense", sense, len);
92                 goto out;
93         }
94
95         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
96         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
97
98 out:
99         TRACE_EXIT_RES(res);
100         return res;
101 }
102 EXPORT_SYMBOL(scst_alloc_set_sense);
103
104 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
105 {
106         TRACE_ENTRY();
107
108         cmd->status = status;
109         cmd->host_status = DID_OK;
110
111         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
112         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
113
114         cmd->data_direction = SCST_DATA_NONE;
115         cmd->resp_data_len = 0;
116         cmd->is_send_status = 1;
117
118         cmd->completed = 1;
119
120         TRACE_EXIT();
121         return;
122 }
123 EXPORT_SYMBOL(scst_set_cmd_error_status);
124
125 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
126 {
127         int rc;
128
129         TRACE_ENTRY();
130
131         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
132
133         rc = scst_alloc_sense(cmd, 1);
134         if (rc != 0) {
135                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
136                         key, asc, ascq);
137                 goto out;
138         }
139
140         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
141         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
142
143 out:
144         TRACE_EXIT();
145         return;
146 }
147 EXPORT_SYMBOL(scst_set_cmd_error);
148
149 void scst_set_sense(uint8_t *buffer, int len, int key,
150         int asc, int ascq)
151 {
152         memset(buffer, 0, len);
153         buffer[0] = 0x70;       /* Error Code                   */
154         buffer[2] = key;        /* Sense Key                    */
155         buffer[7] = 0x0a;       /* Additional Sense Length      */
156         buffer[12] = asc;       /* ASC                          */
157         buffer[13] = ascq;      /* ASCQ                         */
158         TRACE_BUFFER("Sense set", buffer, len);
159         return;
160 }
161 EXPORT_SYMBOL(scst_set_sense);
162
163 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
164         unsigned int len)
165 {
166         TRACE_ENTRY();
167
168         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
169         scst_alloc_set_sense(cmd, 1, sense, len);
170
171         TRACE_EXIT();
172         return;
173 }
174
175 void scst_set_busy(struct scst_cmd *cmd)
176 {
177         int c = atomic_read(&cmd->sess->sess_cmd_count);
178
179         TRACE_ENTRY();
180
181         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
182                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
183                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
184                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
185                         cmd->sess->initiator_name, c,
186                         cmd->queue_type, cmd->sess->init_phase);
187         } else {
188                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
189                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
190                         "initiator %s (cmds count %d, queue_type %x, "
191                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
192                         cmd->queue_type, cmd->sess->init_phase);
193         }
194
195         TRACE_EXIT();
196         return;
197 }
198 EXPORT_SYMBOL(scst_set_busy);
199
200 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
201 {
202         int i;
203
204         TRACE_ENTRY();
205
206         TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
207                 asc, ascq);
208
209         /* Protect sess_tgt_dev_list_hash */
210         mutex_lock(&scst_mutex); 
211
212         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
213                 struct list_head *sess_tgt_dev_list_head =
214                         &sess->sess_tgt_dev_list_hash[i];
215                 struct scst_tgt_dev *tgt_dev;
216
217                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
218                                 sess_tgt_dev_list_entry) {
219                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
220                         if (!list_empty(&tgt_dev->UA_list)) {
221                                 struct scst_tgt_dev_UA *ua;
222                                 uint8_t *sense;
223
224                                 ua = list_entry(tgt_dev->UA_list.next,
225                                         typeof(*ua), UA_list_entry);
226                                 sense = ua->UA_sense_buffer;
227                                 if ((sense[2] == UNIT_ATTENTION) &&
228                                     (sense[12] == 0x29) &&
229                                     (sense[13] == 0)) {
230                                         scst_set_sense(sense,
231                                                 sizeof(ua->UA_sense_buffer),
232                                                 key, asc, ascq);
233                                 } else
234                                         PRINT_ERROR("%s",
235                                                 "The first UA isn't RESET UA");
236                         } else
237                                 PRINT_ERROR("%s", "There's no RESET UA to "
238                                         "replace");
239                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
240                 }
241         }
242
243         mutex_unlock(&scst_mutex); 
244
245         TRACE_EXIT();
246         return;
247 }
248 EXPORT_SYMBOL(scst_set_initial_UA);
249
250 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
251 {
252         int res;
253
254         TRACE_ENTRY();
255
256         switch (cmd->state) {
257         case SCST_CMD_STATE_INIT_WAIT:
258         case SCST_CMD_STATE_INIT:
259         case SCST_CMD_STATE_PRE_PARSE:
260         case SCST_CMD_STATE_DEV_PARSE:
261                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
262                 break;
263
264         default:
265                 res = SCST_CMD_STATE_PRE_DEV_DONE;
266                 break;
267         }
268
269         TRACE_EXIT_RES(res);
270         return res;
271 }
272 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
273
274 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
275 {
276         TRACE_ENTRY();
277
278 #ifdef CONFIG_SCST_EXTRACHECKS
279         switch (cmd->state) {
280         case SCST_CMD_STATE_PRE_XMIT_RESP:
281         case SCST_CMD_STATE_XMIT_RESP:
282         case SCST_CMD_STATE_FINISHED:
283         case SCST_CMD_STATE_FINISHED_INTERNAL:
284         case SCST_CMD_STATE_XMIT_WAIT:
285                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
286                         cmd->state, cmd, cmd->cdb[0]);
287                 sBUG();
288         }
289 #endif
290
291         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
292
293         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
294                            (cmd->tgt_dev == NULL));
295
296         TRACE_EXIT();
297         return;
298 }
299 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
300
301 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
302 {
303         int i, l;
304
305         TRACE_ENTRY();
306
307         scst_check_restore_sg_buff(cmd);
308         cmd->resp_data_len = resp_data_len;
309
310         if (resp_data_len == cmd->bufflen)
311                 goto out;
312
313         l = 0;
314         for (i = 0; i < cmd->sg_cnt; i++) {
315                 l += cmd->sg[i].length;
316                 if (l >= resp_data_len) {
317                         int left = resp_data_len - (l - cmd->sg[i].length);
318 #ifdef CONFIG_SCST_DEBUG
319                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
320                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
321                                 "left %d",
322                                 cmd, (long long unsigned int)cmd->tag,
323                                 resp_data_len, i,
324                                 cmd->sg[i].length, left);
325 #endif
326                         cmd->orig_sg_cnt = cmd->sg_cnt;
327                         cmd->orig_sg_entry = i;
328                         cmd->orig_entry_len = cmd->sg[i].length;
329                         cmd->sg_cnt = (left > 0) ? i+1 : i;
330                         cmd->sg[i].length = left;
331                         cmd->sg_buff_modified = 1;
332                         break;
333                 }
334         }
335
336 out:
337         TRACE_EXIT();
338         return;
339 }
340 EXPORT_SYMBOL(scst_set_resp_data_len);
341
342 /* Called under scst_mutex and suspended activity */
343 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
344 {
345         struct scst_device *dev;
346         int res = 0;
347         static int dev_num; /* protected by scst_mutex */
348
349         TRACE_ENTRY();
350
351         dev = kzalloc(sizeof(*dev), gfp_mask);
352         if (dev == NULL) {
353                 TRACE(TRACE_OUT_OF_MEM, "%s",
354                         "Allocation of scst_device failed");
355                 res = -ENOMEM;
356                 goto out;
357         }
358
359         dev->handler = &scst_null_devtype;
360         dev->p_cmd_lists = &scst_main_cmd_lists;
361         atomic_set(&dev->dev_cmd_count, 0);
362         atomic_set(&dev->write_cmd_count, 0);
363         scst_init_mem_lim(&dev->dev_mem_lim);
364         spin_lock_init(&dev->dev_lock);
365         atomic_set(&dev->on_dev_count, 0);
366         INIT_LIST_HEAD(&dev->blocked_cmd_list);
367         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
368         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
369         INIT_LIST_HEAD(&dev->threads_list);
370         init_waitqueue_head(&dev->on_dev_waitQ);
371         dev->dev_double_ua_possible = 1;
372         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
373         dev->dev_num = dev_num++;
374
375         *out_dev = dev;
376
377 out:
378         TRACE_EXIT_RES(res);
379         return res;
380 }
381
382 /* Called under scst_mutex and suspended activity */
383 void scst_free_device(struct scst_device *dev)
384 {
385         TRACE_ENTRY();
386
387 #ifdef CONFIG_SCST_EXTRACHECKS
388         if (!list_empty(&dev->dev_tgt_dev_list) ||
389             !list_empty(&dev->dev_acg_dev_list)) {
390                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
391                         "is not empty!", __func__);
392                 sBUG();
393         }
394 #endif
395
396         kfree(dev);
397
398         TRACE_EXIT();
399         return;
400 }
401
402 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
403 {
404         atomic_set(&mem_lim->alloced_pages, 0);
405         mem_lim->max_allowed_pages =
406                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
407 }
408 EXPORT_SYMBOL(scst_init_mem_lim);
409
410 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
411                                         struct scst_device *dev, uint64_t lun)
412 {
413         struct scst_acg_dev *res;
414
415         TRACE_ENTRY();
416
417 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
418         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
419 #else
420         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
421 #endif
422         if (res == NULL) {
423                 TRACE(TRACE_OUT_OF_MEM,
424                       "%s", "Allocation of scst_acg_dev failed");
425                 goto out;
426         }
427 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
428         memset(res, 0, sizeof(*res));
429 #endif
430
431         res->dev = dev;
432         res->acg = acg;
433         res->lun = lun;
434
435 out:
436         TRACE_EXIT_HRES(res);
437         return res;
438 }
439
440 /* The activity supposed to be suspended and scst_mutex held */
441 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
442 {
443         TRACE_ENTRY();
444
445         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
446                 acg_dev);
447         list_del(&acg_dev->acg_dev_list_entry);
448         list_del(&acg_dev->dev_acg_dev_list_entry);
449
450         kmem_cache_free(scst_acgd_cachep, acg_dev);
451
452         TRACE_EXIT();
453         return;
454 }
455
456 /* The activity supposed to be suspended and scst_mutex held */
457 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
458 {
459         struct scst_acg *acg;
460
461         TRACE_ENTRY();
462
463         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
464         if (acg == NULL) {
465                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
466                 goto out;
467         }
468
469         INIT_LIST_HEAD(&acg->acg_dev_list);
470         INIT_LIST_HEAD(&acg->acg_sess_list);
471         INIT_LIST_HEAD(&acg->acn_list);
472         acg->acg_name = acg_name;
473
474         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
475         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
476
477 out:
478         TRACE_EXIT_HRES(acg);
479         return acg;
480 }
481
482 /* The activity supposed to be suspended and scst_mutex held */
483 int scst_destroy_acg(struct scst_acg *acg)
484 {
485         struct scst_acn *n, *nn;
486         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
487         int res = 0;
488
489         TRACE_ENTRY();
490
491         if (!list_empty(&acg->acg_sess_list)) {
492                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
493                 res = -EBUSY;
494                 goto out;
495         }
496
497         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
498         list_del(&acg->scst_acg_list_entry);
499
500         /* Freeing acg_devs */
501         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
502                         acg_dev_list_entry) {
503                 struct scst_tgt_dev *tgt_dev, *tt;
504                 list_for_each_entry_safe(tgt_dev, tt,
505                                  &acg_dev->dev->dev_tgt_dev_list,
506                                  dev_tgt_dev_list_entry) {
507                         if (tgt_dev->acg_dev == acg_dev)
508                                 scst_free_tgt_dev(tgt_dev);
509                 }
510                 scst_free_acg_dev(acg_dev);
511         }
512
513         /* Freeing names */
514         list_for_each_entry_safe(n, nn, &acg->acn_list,
515                         acn_list_entry) {
516                 list_del(&n->acn_list_entry);
517                 kfree(n->name);
518                 kfree(n);
519         }
520         INIT_LIST_HEAD(&acg->acn_list);
521
522         kfree(acg);
523 out:
524         TRACE_EXIT_RES(res);
525         return res;
526 }
527
528 /*
529  * scst_mutex supposed to be held, there must not be parallel activity in this
530  * session.
531  */
532 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
533         struct scst_acg_dev *acg_dev)
534 {
535         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
536         struct scst_tgt_dev *tgt_dev;
537         struct scst_device *dev = acg_dev->dev;
538         struct list_head *sess_tgt_dev_list_head;
539         struct scst_tgt_template *vtt = sess->tgt->tgtt;
540         int rc, i;
541
542         TRACE_ENTRY();
543
544 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
545         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
546 #else
547         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
548 #endif
549         if (tgt_dev == NULL) {
550                 TRACE(TRACE_OUT_OF_MEM, "%s",
551                       "Allocation of scst_tgt_dev failed");
552                 goto out;
553         }
554 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
555         memset(tgt_dev, 0, sizeof(*tgt_dev));
556 #endif
557
558         tgt_dev->dev = dev;
559         tgt_dev->lun = acg_dev->lun;
560         tgt_dev->acg_dev = acg_dev;
561         tgt_dev->sess = sess;
562         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
563
564         scst_sgv_pool_use_norm(tgt_dev);
565
566         if (dev->scsi_dev != NULL) {
567                 ini_sg = dev->scsi_dev->host->sg_tablesize;
568                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
569                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
570                                 ENABLE_CLUSTERING);
571         } else {
572                 ini_sg = (1 << 15) /* infinite */;
573                 ini_unchecked_isa_dma = 0;
574                 ini_use_clustering = 0;
575         }
576         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
577
578         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
579             !sess->tgt->tgtt->no_clustering)
580                 scst_sgv_pool_use_norm_clust(tgt_dev);
581
582         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
583                 scst_sgv_pool_use_dma(tgt_dev);
584
585         if (dev->scsi_dev != NULL) {
586                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
587                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
588                       dev->scsi_dev->channel, dev->scsi_dev->id,
589                       dev->scsi_dev->lun,
590                       (long long unsigned int)tgt_dev->lun);
591         } else {
592                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
593                                dev->virt_name,
594                                (long long unsigned int)tgt_dev->lun);
595         }
596
597         spin_lock_init(&tgt_dev->tgt_dev_lock);
598         INIT_LIST_HEAD(&tgt_dev->UA_list);
599         spin_lock_init(&tgt_dev->thr_data_lock);
600         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
601         spin_lock_init(&tgt_dev->sn_lock);
602         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
603         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
604         tgt_dev->expected_sn = 1;
605         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
606         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
607         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
608                 atomic_set(&tgt_dev->sn_slots[i], 0);
609
610         if (dev->handler->parse_atomic &&
611             (sess->tgt->tgtt->preprocessing_done == NULL)) {
612                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
613                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
614                                 &tgt_dev->tgt_dev_flags);
615                 if (dev->handler->exec_atomic)
616                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
617                                 &tgt_dev->tgt_dev_flags);
618         }
619         if (dev->handler->exec_atomic) {
620                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
621                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
622                                 &tgt_dev->tgt_dev_flags);
623                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
624                                 &tgt_dev->tgt_dev_flags);
625                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
626                         &tgt_dev->tgt_dev_flags);
627         }
628         if (dev->handler->dev_done_atomic &&
629             sess->tgt->tgtt->xmit_response_atomic) {
630                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
631                         &tgt_dev->tgt_dev_flags);
632         }
633
634         spin_lock_bh(&scst_temp_UA_lock);
635         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
636                 SCST_LOAD_SENSE(scst_sense_reset_UA));
637         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
638         spin_unlock_bh(&scst_temp_UA_lock);
639
640         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
641
642         if (vtt->threads_num > 0) {
643                 rc = 0;
644                 if (dev->handler->threads_num > 0)
645                         rc = scst_add_dev_threads(dev, vtt->threads_num);
646                 else if (dev->handler->threads_num == 0)
647                         rc = scst_add_cmd_threads(vtt->threads_num);
648                 if (rc != 0)
649                         goto out_free;
650         }
651
652         if (dev->handler && dev->handler->attach_tgt) {
653                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
654                       tgt_dev);
655                 rc = dev->handler->attach_tgt(tgt_dev);
656                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
657                 if (rc != 0) {
658                         PRINT_ERROR("Device handler's %s attach_tgt() "
659                             "failed: %d", dev->handler->name, rc);
660                         goto out_thr_free;
661                 }
662         }
663
664         spin_lock_bh(&dev->dev_lock);
665         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
666         if (dev->dev_reserved)
667                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
668         spin_unlock_bh(&dev->dev_lock);
669
670         sess_tgt_dev_list_head =
671                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
672         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
673                       sess_tgt_dev_list_head);
674
675 out:
676         TRACE_EXIT();
677         return tgt_dev;
678
679 out_thr_free:
680         if (vtt->threads_num > 0) {
681                 if (dev->handler->threads_num > 0)
682                         scst_del_dev_threads(dev, vtt->threads_num);
683                 else if (dev->handler->threads_num == 0)
684                         scst_del_cmd_threads(vtt->threads_num);
685         }
686
687 out_free:
688         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
689         tgt_dev = NULL;
690         goto out;
691 }
692
693 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
694
695 /* No locks supposed to be held, scst_mutex - held */
696 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
697 {
698         TRACE_ENTRY();
699
700         scst_clear_reservation(tgt_dev);
701
702         /* With activity suspended the lock isn't needed, but let's be safe */
703         spin_lock_bh(&tgt_dev->tgt_dev_lock);
704         scst_free_all_UA(tgt_dev);
705         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
706
707         if (queue_UA) {
708                 spin_lock_bh(&scst_temp_UA_lock);
709                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
710                         SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
711                 scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
712                 spin_unlock_bh(&scst_temp_UA_lock);
713         }
714
715         TRACE_EXIT();
716         return;
717 }
718
719 /*
720  * scst_mutex supposed to be held, there must not be parallel activity in this
721  * session.
722  */
723 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
724 {
725         struct scst_device *dev = tgt_dev->dev;
726         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
727
728         TRACE_ENTRY();
729
730         tm_dbg_deinit_tgt_dev(tgt_dev);
731
732         spin_lock_bh(&dev->dev_lock);
733         list_del(&tgt_dev->dev_tgt_dev_list_entry);
734         spin_unlock_bh(&dev->dev_lock);
735
736         list_del(&tgt_dev->sess_tgt_dev_list_entry);
737
738         scst_clear_reservation(tgt_dev);
739         scst_free_all_UA(tgt_dev);
740
741         if (dev->handler && dev->handler->detach_tgt) {
742                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
743                       tgt_dev);
744                 dev->handler->detach_tgt(tgt_dev);
745                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
746         }
747
748         if (vtt->threads_num > 0) {
749                 if (dev->handler->threads_num > 0)
750                         scst_del_dev_threads(dev, vtt->threads_num);
751                 else if (dev->handler->threads_num == 0)
752                         scst_del_cmd_threads(vtt->threads_num);
753         }
754
755         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
756
757         TRACE_EXIT();
758         return;
759 }
760
761 /* scst_mutex supposed to be held */
762 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
763 {
764         int res = 0;
765         struct scst_acg_dev *acg_dev;
766         struct scst_tgt_dev *tgt_dev;
767
768         TRACE_ENTRY();
769
770         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
771                         acg_dev_list_entry) {
772                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
773                 if (tgt_dev == NULL) {
774                         res = -ENOMEM;
775                         goto out_free;
776                 }
777         }
778
779 out:
780         TRACE_EXIT();
781         return res;
782
783 out_free:
784         scst_sess_free_tgt_devs(sess);
785         goto out;
786 }
787
788 /*
789  * scst_mutex supposed to be held, there must not be parallel activity in this
790  * session.
791  */
792 static void scst_sess_free_tgt_devs(struct scst_session *sess)
793 {
794         int i;
795         struct scst_tgt_dev *tgt_dev, *t;
796
797         TRACE_ENTRY();
798
799         /* The session is going down, no users, so no locks */
800         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
801                 struct list_head *sess_tgt_dev_list_head =
802                         &sess->sess_tgt_dev_list_hash[i];
803                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
804                                 sess_tgt_dev_list_entry) {
805                         scst_free_tgt_dev(tgt_dev);
806                 }
807                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
808         }
809
810         TRACE_EXIT();
811         return;
812 }
813
814 /* The activity supposed to be suspended and scst_mutex held */
815 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
816                      uint64_t lun, int read_only)
817 {
818         int res = 0;
819         struct scst_acg_dev *acg_dev;
820         struct scst_tgt_dev *tgt_dev;
821         struct scst_session *sess;
822         LIST_HEAD(tmp_tgt_dev_list);
823
824         TRACE_ENTRY();
825
826         INIT_LIST_HEAD(&tmp_tgt_dev_list);
827
828 #ifdef CONFIG_SCST_EXTRACHECKS
829         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
830                 if (acg_dev->dev == dev) {
831                         PRINT_ERROR("Device is already in group %s",
832                                 acg->acg_name);
833                         res = -EINVAL;
834                         goto out;
835                 }
836         }
837 #endif
838
839         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
840         if (acg_dev == NULL) {
841                 res = -ENOMEM;
842                 goto out;
843         }
844         acg_dev->rd_only_flag = read_only;
845
846         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
847                 acg_dev);
848         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
849         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
850
851         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
852                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
853                 if (tgt_dev == NULL) {
854                         res = -ENOMEM;
855                         goto out_free;
856                 }
857                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
858                               &tmp_tgt_dev_list);
859         }
860
861 out:
862         if (res == 0) {
863                 if (dev->virt_name != NULL) {
864                         PRINT_INFO("Added device %s to group %s (LUN %lld, "
865                                 "rd_only %d)", dev->virt_name, acg->acg_name,
866                                 (long long unsigned int)lun,
867                                 read_only);
868                 } else {
869                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
870                                 "%lld, rd_only %d)",
871                                 dev->scsi_dev->host->host_no,
872                                 dev->scsi_dev->channel, dev->scsi_dev->id,
873                                 dev->scsi_dev->lun, acg->acg_name,
874                                 (long long unsigned int)lun,
875                                 read_only);
876                 }
877         }
878
879         TRACE_EXIT_RES(res);
880         return res;
881
882 out_free:
883         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
884                          extra_tgt_dev_list_entry) {
885                 scst_free_tgt_dev(tgt_dev);
886         }
887         scst_free_acg_dev(acg_dev);
888         goto out;
889 }
890
891 /* The activity supposed to be suspended and scst_mutex held */
892 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
893 {
894         int res = 0;
895         struct scst_acg_dev *acg_dev = NULL, *a;
896         struct scst_tgt_dev *tgt_dev, *tt;
897
898         TRACE_ENTRY();
899
900         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
901                 if (a->dev == dev) {
902                         acg_dev = a;
903                         break;
904                 }
905         }
906
907         if (acg_dev == NULL) {
908                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
909                 res = -EINVAL;
910                 goto out;
911         }
912
913         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
914                          dev_tgt_dev_list_entry) {
915                 if (tgt_dev->acg_dev == acg_dev)
916                         scst_free_tgt_dev(tgt_dev);
917         }
918         scst_free_acg_dev(acg_dev);
919
920 out:
921         if (res == 0) {
922                 if (dev->virt_name != NULL) {
923                         PRINT_INFO("Removed device %s from group %s",
924                                 dev->virt_name, acg->acg_name);
925                 } else {
926                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
927                                 dev->scsi_dev->host->host_no,
928                                 dev->scsi_dev->channel, dev->scsi_dev->id,
929                                 dev->scsi_dev->lun, acg->acg_name);
930                 }
931         }
932
933         TRACE_EXIT_RES(res);
934         return res;
935 }
936
937 /* scst_mutex supposed to be held */
938 int scst_acg_add_name(struct scst_acg *acg, const char *name)
939 {
940         int res = 0;
941         struct scst_acn *n;
942         int len;
943         char *nm;
944
945         TRACE_ENTRY();
946
947         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
948         {
949                 if (strcmp(n->name, name) == 0) {
950                         PRINT_ERROR("Name %s already exists in group %s",
951                                 name, acg->acg_name);
952                         res = -EINVAL;
953                         goto out;
954                 }
955         }
956
957         n = kmalloc(sizeof(*n), GFP_KERNEL);
958         if (n == NULL) {
959                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
960                 res = -ENOMEM;
961                 goto out;
962         }
963
964         len = strlen(name);
965         nm = kmalloc(len + 1, GFP_KERNEL);
966         if (nm == NULL) {
967                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
968                 res = -ENOMEM;
969                 goto out_free;
970         }
971
972         strcpy(nm, name);
973         n->name = nm;
974
975         list_add_tail(&n->acn_list_entry, &acg->acn_list);
976
977 out:
978         if (res == 0)
979                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
980
981         TRACE_EXIT_RES(res);
982         return res;
983
984 out_free:
985         kfree(n);
986         goto out;
987 }
988
989 /* scst_mutex supposed to be held */
990 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
991 {
992         int res = -EINVAL;
993         struct scst_acn *n;
994
995         TRACE_ENTRY();
996
997         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
998         {
999                 if (strcmp(n->name, name) == 0) {
1000                         list_del(&n->acn_list_entry);
1001                         kfree(n->name);
1002                         kfree(n);
1003                         res = 0;
1004                         break;
1005                 }
1006         }
1007
1008         if (res == 0) {
1009                 PRINT_INFO("Removed name %s from group %s", name,
1010                         acg->acg_name);
1011         } else {
1012                 PRINT_ERROR("Unable to find name %s in group %s", name,
1013                         acg->acg_name);
1014         }
1015
1016         TRACE_EXIT_RES(res);
1017         return res;
1018 }
1019
1020 static struct scst_cmd *scst_create_prepare_internal_cmd(
1021         struct scst_cmd *orig_cmd, int bufsize)
1022 {
1023         struct scst_cmd *res;
1024         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1025
1026         TRACE_ENTRY();
1027
1028         res = scst_alloc_cmd(gfp_mask);
1029         if (res == NULL)
1030                 goto out;
1031
1032         res->cmd_lists = orig_cmd->cmd_lists;
1033         res->sess = orig_cmd->sess;
1034         res->atomic = scst_cmd_atomic(orig_cmd);
1035         res->internal = 1;
1036         res->tgtt = orig_cmd->tgtt;
1037         res->tgt = orig_cmd->tgt;
1038         res->dev = orig_cmd->dev;
1039         res->tgt_dev = orig_cmd->tgt_dev;
1040         res->lun = orig_cmd->lun;
1041         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1042         res->data_direction = SCST_DATA_UNKNOWN;
1043         res->orig_cmd = orig_cmd;
1044         res->bufflen = bufsize;
1045
1046         scst_sess_get(res->sess);
1047         if (res->tgt_dev != NULL)
1048                 __scst_get(0);
1049
1050         res->state = SCST_CMD_STATE_PRE_PARSE;
1051
1052 out:
1053         TRACE_EXIT_HRES((unsigned long)res);
1054         return res;
1055 }
1056
1057 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1058 {
1059         int res = 0;
1060 #define sbuf_size 252
1061         static const uint8_t request_sense[6] =
1062             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1063         struct scst_cmd *rs_cmd;
1064
1065         TRACE_ENTRY();
1066
1067         if (orig_cmd->sense != NULL) {
1068                 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1069                         orig_cmd->sense, orig_cmd);
1070                 mempool_free(orig_cmd->sense, scst_sense_mempool);
1071                 orig_cmd->sense = NULL;
1072         }
1073
1074         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1075         if (rs_cmd == NULL)
1076                 goto out_error;
1077
1078         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1079         rs_cmd->cdb_len = sizeof(request_sense);
1080         rs_cmd->data_direction = SCST_DATA_READ;
1081         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1082         rs_cmd->expected_transfer_len = sbuf_size;
1083         rs_cmd->expected_values_set = 1;
1084
1085         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1086                 "cmd list", rs_cmd);
1087         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1088         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1089         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1090         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1091
1092 out:
1093         TRACE_EXIT_RES(res);
1094         return res;
1095
1096 out_error:
1097         res = -1;
1098         goto out;
1099 #undef sbuf_size
1100 }
1101
1102 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
1103 {
1104         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1105         uint8_t *buf;
1106         int len;
1107
1108         TRACE_ENTRY();
1109
1110         sBUG_ON(orig_cmd == NULL);
1111
1112         len = scst_get_buf_first(req_cmd, &buf);
1113
1114         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1115             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1116                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1117                         buf, len);
1118                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1119                         len);
1120         } else {
1121                 PRINT_ERROR("%s", "Unable to get the sense via "
1122                         "REQUEST SENSE, returning HARDWARE ERROR");
1123                 scst_set_cmd_error(orig_cmd,
1124                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1125         }
1126
1127         if (len > 0)
1128                 scst_put_buf(req_cmd, buf);
1129
1130         TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
1131                 "cmd list", orig_cmd);
1132         spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1133         list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
1134         wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
1135         spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1136
1137         TRACE_EXIT();
1138         return;
1139 }
1140
1141 int scst_finish_internal_cmd(struct scst_cmd *cmd)
1142 {
1143         int res;
1144
1145         TRACE_ENTRY();
1146
1147         sBUG_ON(!cmd->internal);
1148
1149         if (cmd->cdb[0] == REQUEST_SENSE)
1150                 scst_complete_request_sense(cmd);
1151
1152         __scst_cmd_put(cmd);
1153
1154         res = SCST_CMD_STATE_RES_CONT_NEXT;
1155
1156         TRACE_EXIT_HRES(res);
1157         return res;
1158 }
1159
1160 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1161 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1162 {
1163         struct scsi_request *req;
1164
1165         TRACE_ENTRY();
1166
1167         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1168                 if (req) {
1169                         if (req->sr_bufflen)
1170                                 kfree(req->sr_buffer);
1171                         scsi_release_request(req);
1172                 }
1173         }
1174
1175         TRACE_EXIT();
1176         return;
1177 }
1178
1179 static void scst_send_release(struct scst_device *dev)
1180 {
1181         struct scsi_request *req;
1182         struct scsi_device *scsi_dev;
1183         uint8_t cdb[6];
1184
1185         TRACE_ENTRY();
1186
1187         if (dev->scsi_dev == NULL)
1188                 goto out;
1189
1190         scsi_dev = dev->scsi_dev;
1191
1192         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1193         if (req == NULL) {
1194                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1195                             "to RELEASE device %d:%d:%d:%d",
1196                             scsi_dev->host->host_no, scsi_dev->channel,
1197                             scsi_dev->id, scsi_dev->lun);
1198                 goto out;
1199         }
1200
1201         memset(cdb, 0, sizeof(cdb));
1202         cdb[0] = RELEASE;
1203         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1204             ((scsi_dev->lun << 5) & 0xe0) : 0;
1205         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1206         req->sr_cmd_len = sizeof(cdb);
1207         req->sr_data_direction = SCST_DATA_NONE;
1208         req->sr_use_sg = 0;
1209         req->sr_bufflen = 0;
1210         req->sr_buffer = NULL;
1211         req->sr_request->rq_disk = dev->rq_disk;
1212         req->sr_sense_buffer[0] = 0;
1213
1214         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1215                 "mid-level", req);
1216         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1217                     scst_req_done, 15, 3);
1218
1219 out:
1220         TRACE_EXIT();
1221         return;
1222 }
1223 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1224 static void scst_send_release(struct scst_device *dev)
1225 {
1226         struct scsi_device *scsi_dev;
1227         unsigned char cdb[6];
1228         unsigned char *sense;
1229         int rc, i;
1230
1231         TRACE_ENTRY();
1232
1233         if (dev->scsi_dev == NULL)
1234                 goto out;
1235
1236         /* We can't afford missing RELEASE due to memory shortage */
1237         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1238
1239         scsi_dev = dev->scsi_dev;
1240
1241         for (i = 0; i < 5; i++) {
1242                 memset(cdb, 0, sizeof(cdb));
1243                 cdb[0] = RELEASE;
1244                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1245                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1246
1247                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1248
1249                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1250                         "SCSI mid-level");
1251                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1252                                 sense, 15, 0, 0);
1253                 TRACE_DBG("MODE_SENSE done: %x", rc);
1254
1255                 if (scsi_status_is_good(rc)) {
1256                         break;
1257                 } else {
1258                         PRINT_ERROR("RELEASE failed: %d", rc);
1259                         PRINT_BUFFER("RELEASE sense", sense,
1260                                 SCST_SENSE_BUFFERSIZE);
1261                         scst_check_internal_sense(dev, rc,
1262                                         sense, SCST_SENSE_BUFFERSIZE);
1263                 }
1264         }
1265
1266         kfree(sense);
1267
1268 out:
1269         TRACE_EXIT();
1270         return;
1271 }
1272 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1273
1274 /* scst_mutex supposed to be held */
1275 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1276 {
1277         struct scst_device *dev = tgt_dev->dev;
1278         int release = 0;
1279
1280         TRACE_ENTRY();
1281
1282         spin_lock_bh(&dev->dev_lock);
1283         if (dev->dev_reserved &&
1284             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1285                 /* This is one who holds the reservation */
1286                 struct scst_tgt_dev *tgt_dev_tmp;
1287                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1288                                     dev_tgt_dev_list_entry) {
1289                         clear_bit(SCST_TGT_DEV_RESERVED,
1290                                     &tgt_dev_tmp->tgt_dev_flags);
1291                 }
1292                 dev->dev_reserved = 0;
1293                 release = 1;
1294         }
1295         spin_unlock_bh(&dev->dev_lock);
1296
1297         if (release)
1298                 scst_send_release(dev);
1299
1300         TRACE_EXIT();
1301         return;
1302 }
1303
1304 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1305         const char *initiator_name)
1306 {
1307         struct scst_session *sess;
1308         int i;
1309         int len;
1310         char *nm;
1311
1312         TRACE_ENTRY();
1313
1314 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1315         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1316 #else
1317         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1318 #endif
1319         if (sess == NULL) {
1320                 TRACE(TRACE_OUT_OF_MEM, "%s",
1321                       "Allocation of scst_session failed");
1322                 goto out;
1323         }
1324 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1325         memset(sess, 0, sizeof(*sess));
1326 #endif
1327
1328         sess->init_phase = SCST_SESS_IPH_INITING;
1329         sess->shut_phase = SCST_SESS_SPH_READY;
1330         atomic_set(&sess->refcnt, 0);
1331         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1332                 struct list_head *sess_tgt_dev_list_head =
1333                          &sess->sess_tgt_dev_list_hash[i];
1334                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1335         }
1336         spin_lock_init(&sess->sess_list_lock);
1337         INIT_LIST_HEAD(&sess->search_cmd_list);
1338         sess->tgt = tgt;
1339         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1340         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1341
1342 #ifdef CONFIG_SCST_MEASURE_LATENCY
1343         spin_lock_init(&sess->meas_lock);
1344 #endif
1345
1346         len = strlen(initiator_name);
1347         nm = kmalloc(len + 1, gfp_mask);
1348         if (nm == NULL) {
1349                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1350                 goto out_free;
1351         }
1352
1353         strcpy(nm, initiator_name);
1354         sess->initiator_name = nm;
1355
1356 out:
1357         TRACE_EXIT();
1358         return sess;
1359
1360 out_free:
1361         kmem_cache_free(scst_sess_cachep, sess);
1362         sess = NULL;
1363         goto out;
1364 }
1365
1366 void scst_free_session(struct scst_session *sess)
1367 {
1368         TRACE_ENTRY();
1369
1370         mutex_lock(&scst_mutex);
1371
1372         TRACE_DBG("Removing sess %p from the list", sess);
1373         list_del(&sess->sess_list_entry);
1374         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1375         list_del(&sess->acg_sess_list_entry);
1376
1377         scst_sess_free_tgt_devs(sess);
1378
1379         wake_up_all(&sess->tgt->unreg_waitQ);
1380
1381         mutex_unlock(&scst_mutex);
1382
1383         kfree(sess->initiator_name);
1384         kmem_cache_free(scst_sess_cachep, sess);
1385
1386         TRACE_EXIT();
1387         return;
1388 }
1389
1390 void scst_free_session_callback(struct scst_session *sess)
1391 {
1392         struct completion *c;
1393
1394         TRACE_ENTRY();
1395
1396         TRACE_DBG("Freeing session %p", sess);
1397
1398         c = sess->shutdown_compl;
1399
1400         if (sess->unreg_done_fn) {
1401                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1402                 sess->unreg_done_fn(sess);
1403                 TRACE_DBG("%s", "unreg_done_fn() returned");
1404         }
1405         scst_free_session(sess);
1406
1407         if (c)
1408                 complete_all(c);
1409
1410         TRACE_EXIT();
1411         return;
1412 }
1413
1414 void scst_sched_session_free(struct scst_session *sess)
1415 {
1416         unsigned long flags;
1417
1418         TRACE_ENTRY();
1419
1420         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1421                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1422                         "shut phase %lx", sess, sess->shut_phase);
1423                 sBUG();
1424         }
1425
1426         spin_lock_irqsave(&scst_mgmt_lock, flags);
1427         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1428         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1429         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1430
1431         wake_up(&scst_mgmt_waitQ);
1432
1433         TRACE_EXIT();
1434         return;
1435 }
1436
1437 void scst_cmd_get(struct scst_cmd *cmd)
1438 {
1439         __scst_cmd_get(cmd);
1440 }
1441 EXPORT_SYMBOL(scst_cmd_get);
1442
1443 void scst_cmd_put(struct scst_cmd *cmd)
1444 {
1445         __scst_cmd_put(cmd);
1446 }
1447 EXPORT_SYMBOL(scst_cmd_put);
1448
1449 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1450 {
1451         struct scst_cmd *cmd;
1452
1453         TRACE_ENTRY();
1454
1455 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1456         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1457 #else
1458         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1459 #endif
1460         if (cmd == NULL) {
1461                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1462                 goto out;
1463         }
1464 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1465         memset(cmd, 0, sizeof(*cmd));
1466 #endif
1467
1468         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1469         cmd->start_time = jiffies;
1470         atomic_set(&cmd->cmd_ref, 1);
1471         cmd->cmd_lists = &scst_main_cmd_lists;
1472         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1473         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1474         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1475         cmd->retries = 0;
1476         cmd->data_len = -1;
1477         cmd->is_send_status = 1;
1478         cmd->resp_data_len = -1;
1479
1480         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1481         cmd->dbl_ua_orig_resp_data_len = -1;
1482
1483 out:
1484         TRACE_EXIT();
1485         return cmd;
1486 }
1487
1488 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1489 {
1490         scst_sess_put(cmd->sess);
1491
1492         /*
1493          * At this point tgt_dev can be dead, but the pointer remains non-NULL
1494          */
1495         if (likely(cmd->tgt_dev != NULL))
1496                 __scst_put();
1497
1498         scst_destroy_cmd(cmd);
1499         return;
1500 }
1501
1502 /* No locks supposed to be held */
1503 void scst_free_cmd(struct scst_cmd *cmd)
1504 {
1505         int destroy = 1;
1506
1507         TRACE_ENTRY();
1508
1509         TRACE_DBG("Freeing cmd %p (tag %llu)",
1510                   cmd, (long long unsigned int)cmd->tag);
1511
1512         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1513                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1514                         cmd, atomic_read(&scst_cmd_count));
1515         }
1516
1517         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1518                 cmd->dec_on_dev_needed);
1519
1520 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1521 #if defined(CONFIG_SCST_EXTRACHECKS)
1522         if (cmd->scsi_req) {
1523                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1524                         "scsi_req!");
1525                 scst_release_request(cmd);
1526         }
1527 #endif
1528 #endif
1529
1530         /*
1531          * Target driver can already free sg buffer before calling
1532          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
1533          */
1534         if (!cmd->tgt_data_buf_alloced)
1535                 scst_check_restore_sg_buff(cmd);
1536
1537         if (cmd->tgtt->on_free_cmd != NULL) {
1538                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1539                 cmd->tgtt->on_free_cmd(cmd);
1540                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1541         }
1542
1543         if (likely(cmd->dev != NULL)) {
1544                 struct scst_dev_type *handler = cmd->dev->handler;
1545                 if (handler->on_free_cmd != NULL) {
1546                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1547                               handler->name, cmd);
1548                         handler->on_free_cmd(cmd);
1549                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1550                                 handler->name);
1551                 }
1552         }
1553
1554         scst_release_space(cmd);
1555
1556         if (unlikely(cmd->sense != NULL)) {
1557                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1558                 mempool_free(cmd->sense, scst_sense_mempool);
1559                 cmd->sense = NULL;
1560         }
1561
1562         if (likely(cmd->tgt_dev != NULL)) {
1563 #ifdef CONFIG_SCST_EXTRACHECKS
1564                 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
1565                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1566                             "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1567                             cmd, cmd->cdb[0], cmd->tgtt->name,
1568                             (long long unsigned int)cmd->lun,
1569                             cmd->sn, cmd->tgt_dev->expected_sn);
1570                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1571                 }
1572 #endif
1573
1574                 if (unlikely(cmd->out_of_sn)) {
1575                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1576                                 "destroy=%d", cmd,
1577                                 (long long unsigned int)cmd->tag,
1578                                 cmd->sn, destroy);
1579                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1580                                         &cmd->cmd_flags);
1581                 }
1582         }
1583
1584         if (likely(destroy))
1585                 scst_destroy_put_cmd(cmd);
1586
1587         TRACE_EXIT();
1588         return;
1589 }
1590
1591 /* No locks supposed to be held. */
1592 void scst_check_retries(struct scst_tgt *tgt)
1593 {
1594         int need_wake_up = 0;
1595
1596         TRACE_ENTRY();
1597
1598         /*
1599          * We don't worry about overflow of finished_cmds, because we check
1600          * only for its change.
1601          */
1602         atomic_inc(&tgt->finished_cmds);
1603         /* See comment in scst_queue_retry_cmd() */
1604         smp_mb__after_atomic_inc();
1605         if (unlikely(tgt->retry_cmds > 0)) {
1606                 struct scst_cmd *c, *tc;
1607                 unsigned long flags;
1608
1609                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1610                       tgt->retry_cmds);
1611
1612                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1613                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1614                                 cmd_list_entry) {
1615                         tgt->retry_cmds--;
1616
1617                         TRACE_RETRY("Moving retry cmd %p to head of active "
1618                                 "cmd list (retry_cmds left %d)",
1619                                 c, tgt->retry_cmds);
1620                         spin_lock(&c->cmd_lists->cmd_list_lock);
1621                         list_move(&c->cmd_list_entry,
1622                                   &c->cmd_lists->active_cmd_list);
1623                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1624                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1625
1626                         need_wake_up++;
1627                         if (need_wake_up >= 2) /* "slow start" */
1628                                 break;
1629                 }
1630                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1631         }
1632
1633         TRACE_EXIT();
1634         return;
1635 }
1636
1637 void scst_tgt_retry_timer_fn(unsigned long arg)
1638 {
1639         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1640         unsigned long flags;
1641
1642         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1643
1644         spin_lock_irqsave(&tgt->tgt_lock, flags);
1645         tgt->retry_timer_active = 0;
1646         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1647
1648         scst_check_retries(tgt);
1649
1650         TRACE_EXIT();
1651         return;
1652 }
1653
1654 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
1655 {
1656         struct scst_mgmt_cmd *mcmd;
1657
1658         TRACE_ENTRY();
1659
1660         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1661         if (mcmd == NULL) {
1662                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1663                         "failed, some commands and their data could leak");
1664                 goto out;
1665         }
1666         memset(mcmd, 0, sizeof(*mcmd));
1667
1668 out:
1669         TRACE_EXIT();
1670         return mcmd;
1671 }
1672
1673 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1674 {
1675         unsigned long flags;
1676
1677         TRACE_ENTRY();
1678
1679         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1680         atomic_dec(&mcmd->sess->sess_cmd_count);
1681         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1682
1683         scst_sess_put(mcmd->sess);
1684
1685         if (mcmd->mcmd_tgt_dev != NULL)
1686                 __scst_put();
1687
1688         mempool_free(mcmd, scst_mgmt_mempool);
1689
1690         TRACE_EXIT();
1691         return;
1692 }
1693
1694 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1695 int scst_alloc_request(struct scst_cmd *cmd)
1696 {
1697         int res = 0;
1698         struct scsi_request *req;
1699         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1700
1701         TRACE_ENTRY();
1702
1703         /* cmd->dev->scsi_dev must be non-NULL here */
1704         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1705         if (req == NULL) {
1706                 TRACE(TRACE_OUT_OF_MEM, "%s",
1707                       "Allocation of scsi_request failed");
1708                 res = -ENOMEM;
1709                 goto out;
1710         }
1711
1712         cmd->scsi_req = req;
1713
1714         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1715         req->sr_cmd_len = cmd->cdb_len;
1716         req->sr_data_direction = cmd->data_direction;
1717         req->sr_use_sg = cmd->sg_cnt;
1718         req->sr_bufflen = cmd->bufflen;
1719         req->sr_buffer = cmd->sg;
1720         req->sr_request->rq_disk = cmd->dev->rq_disk;
1721         req->sr_sense_buffer[0] = 0;
1722
1723         cmd->scsi_req->upper_private_data = cmd;
1724
1725 out:
1726         TRACE_EXIT();
1727         return res;
1728 }
1729
1730 void scst_release_request(struct scst_cmd *cmd)
1731 {
1732         scsi_release_request(cmd->scsi_req);
1733         cmd->scsi_req = NULL;
1734 }
1735 #endif
1736
1737 int scst_alloc_space(struct scst_cmd *cmd)
1738 {
1739         gfp_t gfp_mask;
1740         int res = -ENOMEM;
1741         int atomic = scst_cmd_atomic(cmd);
1742         int flags;
1743         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1744
1745         TRACE_ENTRY();
1746
1747         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1748
1749         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1750         if (cmd->no_sgv)
1751                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1752
1753         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1754                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1755         if (cmd->sg == NULL)
1756                 goto out;
1757
1758         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1759                 static int ll;
1760                 if (ll < 10) {
1761                         PRINT_INFO("Unable to complete command due to "
1762                                 "SG IO count limitation (requested %d, "
1763                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1764                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1765                         ll++;
1766                 }
1767                 goto out_sg_free;
1768         }
1769
1770         res = 0;
1771
1772 out:
1773         TRACE_EXIT();
1774         return res;
1775
1776 out_sg_free:
1777         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1778         cmd->sgv = NULL;
1779         cmd->sg = NULL;
1780         cmd->sg_cnt = 0;
1781         goto out;
1782 }
1783
1784 static void scst_release_space(struct scst_cmd *cmd)
1785 {
1786         TRACE_ENTRY();
1787
1788         if (cmd->sgv == NULL)
1789                 goto out;
1790
1791         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
1792                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
1793                 goto out;
1794         }
1795
1796         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1797
1798         cmd->sgv = NULL;
1799         cmd->sg_cnt = 0;
1800         cmd->sg = NULL;
1801         cmd->bufflen = 0;
1802         cmd->data_len = 0;
1803
1804 out:
1805         TRACE_EXIT();
1806         return;
1807 }
1808
1809 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
1810 {
1811         struct scatterlist *src_sg, *dst_sg;
1812         unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
1813         struct page *src, *dst;
1814         unsigned int s, d, to_copy;
1815
1816         TRACE_ENTRY();
1817
1818         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
1819                 src_sg = cmd->tgt_sg;
1820                 src_sg_cnt = cmd->tgt_sg_cnt;
1821                 dst_sg = cmd->sg;
1822                 to_copy = cmd->bufflen;
1823         } else {
1824                 src_sg = cmd->sg;
1825                 src_sg_cnt = cmd->sg_cnt;
1826                 dst_sg = cmd->tgt_sg;
1827                 to_copy = cmd->resp_data_len;
1828         }
1829
1830         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
1831                 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
1832                 to_copy);
1833
1834         dst = sg_page(dst_sg);
1835         dst_len = dst_sg->length;
1836         dst_offs = dst_sg->offset;
1837
1838         s = 0;
1839         d = 0;
1840         src_offs = 0;
1841         while (s < src_sg_cnt) {
1842                 src = sg_page(&src_sg[s]);
1843                 src_len = src_sg[s].length;
1844                 src_offs += src_sg[s].offset;
1845
1846                 do {
1847                         unsigned int n;
1848
1849                         /*
1850                          * Himem pages are not allowed here, see the
1851                          * corresponding #warning in scst_main.c. Correct
1852                          * your target driver or dev handler to not alloc
1853                          * such pages!
1854                          */
1855                         EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
1856                                            PageHighMem(src));
1857
1858                         TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
1859                                 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
1860                                 cmd, to_copy, src, src_len, src_offs, dst,
1861                                 dst_len, dst_offs);
1862
1863                         if ((src_offs == 0) && (dst_offs == 0) &&
1864                             (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
1865                                 copy_page(page_address(dst), page_address(src));
1866                                 n = PAGE_SIZE;
1867                         } else {
1868                                 n = min(PAGE_SIZE - dst_offs,
1869                                         PAGE_SIZE - src_offs);
1870                                 n = min(n, src_len);
1871                                 n = min(n, dst_len);
1872                                 memcpy(page_address(dst) + dst_offs,
1873                                        page_address(src) + src_offs, n);
1874                                 dst_offs -= min(n, dst_offs);
1875                                 src_offs -= min(n, src_offs);
1876                         }
1877
1878                         TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
1879
1880                         to_copy -= n;
1881                         if (to_copy <= 0)
1882                                 goto out;
1883
1884                         src_len -= n;
1885                         dst_len -= n;
1886                         if (dst_len == 0) {
1887                                 d++;
1888                                 dst = sg_page(&dst_sg[d]);
1889                                 dst_len = dst_sg[d].length;
1890                                 dst_offs += dst_sg[d].offset;
1891                         }
1892                 } while (src_len > 0);
1893
1894                 s++;
1895         }
1896
1897 out:
1898         TRACE_EXIT();
1899         return;
1900 }
1901
1902 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1903
1904 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1905 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1906
1907 int scst_get_cdb_len(const uint8_t *cdb)
1908 {
1909         return SCST_GET_CDB_LEN(cdb[0]);
1910 }
1911
1912 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1913
1914 /* for special commands */
1915 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1916 {
1917         cmd->bufflen = 6;
1918         return 0;
1919 }
1920
1921 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1922 {
1923         cmd->bufflen = READ_CAP_LEN;
1924         return 0;
1925 }
1926
1927 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1928 {
1929         cmd->bufflen = 1;
1930         return 0;
1931 }
1932
1933 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1934 {
1935         uint8_t *p = (uint8_t *)cmd->cdb + off;
1936         int res = 0;
1937
1938         cmd->bufflen = 0;
1939         cmd->bufflen |= ((u32)p[0]) << 8;
1940         cmd->bufflen |= ((u32)p[1]);
1941
1942         switch (cmd->cdb[1] & 0x1f) {
1943         case 0:
1944         case 1:
1945         case 6:
1946                 if (cmd->bufflen != 0) {
1947                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1948                                 "allocation length for service action %x",
1949                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1950                         goto out_inval;
1951                 }
1952                 break;
1953         }
1954
1955         switch (cmd->cdb[1] & 0x1f) {
1956         case 0:
1957         case 1:
1958                 cmd->bufflen = 20;
1959                 break;
1960         case 6:
1961                 cmd->bufflen = 32;
1962                 break;
1963         case 8:
1964                 cmd->bufflen = max(28, cmd->bufflen);
1965                 break;
1966         default:
1967                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1968                         cmd->cdb[1] & 0x1f);
1969                 goto out_inval;
1970         }
1971
1972 out:
1973         return res;
1974
1975 out_inval:
1976         scst_set_cmd_error(cmd,
1977                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1978         res = 1;
1979         goto out;
1980 }
1981
1982 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1983 {
1984         cmd->bufflen = (u32)cmd->cdb[off];
1985         return 0;
1986 }
1987
1988 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1989 {
1990         cmd->bufflen = (u32)cmd->cdb[off];
1991         if (cmd->bufflen == 0)
1992                 cmd->bufflen = 256;
1993         return 0;
1994 }
1995
1996 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1997 {
1998         const uint8_t *p = cmd->cdb + off;
1999
2000         cmd->bufflen = 0;
2001         cmd->bufflen |= ((u32)p[0]) << 8;
2002         cmd->bufflen |= ((u32)p[1]);
2003
2004         return 0;
2005 }
2006
2007 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
2008 {
2009         const uint8_t *p = cmd->cdb + off;
2010
2011         cmd->bufflen = 0;
2012         cmd->bufflen |= ((u32)p[0]) << 16;
2013         cmd->bufflen |= ((u32)p[1]) << 8;
2014         cmd->bufflen |= ((u32)p[2]);
2015
2016         return 0;
2017 }
2018
2019 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
2020 {
2021         const uint8_t *p = cmd->cdb + off;
2022
2023         cmd->bufflen = 0;
2024         cmd->bufflen |= ((u32)p[0]) << 24;
2025         cmd->bufflen |= ((u32)p[1]) << 16;
2026         cmd->bufflen |= ((u32)p[2]) << 8;
2027         cmd->bufflen |= ((u32)p[3]);
2028
2029         return 0;
2030 }
2031
2032 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
2033 {
2034         cmd->bufflen = 0;
2035         return 0;
2036 }
2037
2038 int scst_get_cdb_info(struct scst_cmd *cmd)
2039 {
2040         int dev_type = cmd->dev->handler->type;
2041         int i, res = 0;
2042         uint8_t op;
2043         const struct scst_sdbops *ptr = NULL;
2044
2045         TRACE_ENTRY();
2046
2047         op = cmd->cdb[0];       /* get clear opcode */
2048
2049         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
2050                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
2051                 dev_type);
2052
2053         i = scst_scsi_op_list[op];
2054         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
2055                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
2056                         ptr = &scst_scsi_op_table[i];
2057                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
2058                               ptr->ops, ptr->devkey[0], /* disk     */
2059                               ptr->devkey[1],   /* tape     */
2060                               ptr->devkey[2],   /* printer */
2061                               ptr->devkey[3],   /* cpu      */
2062                               ptr->devkey[4],   /* cdr      */
2063                               ptr->devkey[5],   /* cdrom    */
2064                               ptr->devkey[6],   /* scanner */
2065                               ptr->devkey[7],   /* worm     */
2066                               ptr->devkey[8],   /* changer */
2067                               ptr->devkey[9],   /* commdev */
2068                               ptr->op_name);
2069                         TRACE_DBG("direction=%d flags=%d off=%d",
2070                               ptr->direction,
2071                               ptr->flags,
2072                               ptr->off);
2073                         break;
2074                 }
2075                 i++;
2076         }
2077
2078         if (ptr == NULL) {
2079                 /* opcode not found or now not used !!! */
2080                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
2081                       dev_type);
2082                 res = -1;
2083                 cmd->op_flags = SCST_INFO_INVALID;
2084                 goto out;
2085         }
2086
2087         cmd->cdb_len = SCST_GET_CDB_LEN(op);
2088         cmd->op_name = ptr->op_name;
2089         cmd->data_direction = ptr->direction;
2090         cmd->op_flags = ptr->flags;
2091         res = (*ptr->get_trans_len)(cmd, ptr->off);
2092
2093 out:
2094         TRACE_EXIT();
2095         return res;
2096 }
2097 EXPORT_SYMBOL(scst_get_cdb_info);
2098
2099 /*
2100  * Routine to extract a lun number from an 8-byte LUN structure
2101  * in network byte order (BE).
2102  * (see SAM-2, Section 4.12.3 page 40)
2103  * Supports 2 types of lun unpacking: peripheral and logical unit.
2104  */
2105 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2106 {
2107         uint64_t res = NO_SUCH_LUN;
2108         int address_method;
2109
2110         TRACE_ENTRY();
2111
2112         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2113
2114         if (unlikely(len < 2)) {
2115                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2116                         "more", len);
2117                 goto out;
2118         }
2119
2120         if (len > 2) {
2121                 switch (len) {
2122                 case 8:
2123                         if ((*((uint64_t *)lun) &
2124                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2125                                 goto out_err;
2126                         break;
2127                 case 4:
2128                         if (*((uint16_t *)&lun[2]) != 0)
2129                                 goto out_err;
2130                         break;
2131                 case 6:
2132                         if (*((uint32_t *)&lun[2]) != 0)
2133                                 goto out_err;
2134                         break;
2135                 default:
2136                         goto out_err;
2137                 }
2138         }
2139
2140         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
2141         switch (address_method) {
2142         case 0: /* peripheral device addressing method */
2143 #if 0
2144                 if (*lun) {
2145                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2146                              "peripheral device addressing method 0x%02x, "
2147                              "expected 0", *lun);
2148                         break;
2149                 }
2150                 res = *(lun + 1);
2151                 break;
2152 #else
2153                 /*
2154                  * Looks like it's legal to use it as flat space addressing
2155                  * method as well
2156                  */
2157
2158                 /* go through */
2159 #endif
2160
2161         case 1: /* flat space addressing method */
2162                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2163                 break;
2164
2165         case 2: /* logical unit addressing method */
2166                 if (*lun & 0x3f) {
2167                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2168                                     "addressing method 0x%02x, expected 0",
2169                                     *lun & 0x3f);
2170                         break;
2171                 }
2172                 if (*(lun + 1) & 0xe0) {
2173                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
2174                                     "addressing method 0x%02x, expected 0",
2175                                     (*(lun + 1) & 0xf8) >> 5);
2176                         break;
2177                 }
2178                 res = *(lun + 1) & 0x1f;
2179                 break;
2180
2181         case 3: /* extended logical unit addressing method */
2182         default:
2183                 PRINT_ERROR("Unimplemented LUN addressing method %u",
2184                             address_method);
2185                 break;
2186         }
2187
2188 out:
2189         TRACE_EXIT_RES((int)res);
2190         return res;
2191
2192 out_err:
2193         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2194         goto out;
2195 }
2196
2197 int scst_calc_block_shift(int sector_size)
2198 {
2199         int block_shift = 0;
2200         int t;
2201
2202         if (sector_size == 0)
2203                 sector_size = 512;
2204
2205         t = sector_size;
2206         while (1) {
2207                 if ((t & 1) != 0)
2208                         break;
2209                 t >>= 1;
2210                 block_shift++;
2211         }
2212         if (block_shift < 9) {
2213                 PRINT_ERROR("Wrong sector size %d", sector_size);
2214                 block_shift = -1;
2215         }
2216
2217         TRACE_EXIT_RES(block_shift);
2218         return block_shift;
2219 }
2220 EXPORT_SYMBOL(scst_calc_block_shift);
2221
2222 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2223         int (*get_block_shift)(struct scst_cmd *cmd))
2224 {
2225         int res = 0;
2226
2227         TRACE_ENTRY();
2228
2229         /*
2230          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2231          * therefore change them only if necessary
2232          */
2233
2234         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2235               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2236
2237         switch (cmd->cdb[0]) {
2238         case SERVICE_ACTION_IN:
2239                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2240                         cmd->bufflen = READ_CAP16_LEN;
2241                         cmd->data_direction = SCST_DATA_READ;
2242                 }
2243                 break;
2244         case VERIFY_6:
2245         case VERIFY:
2246         case VERIFY_12:
2247         case VERIFY_16:
2248                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2249                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2250                         cmd->bufflen = 0;
2251                         goto set_timeout;
2252                 } else
2253                         cmd->data_len = 0;
2254                 break;
2255         default:
2256                 /* It's all good */
2257                 break;
2258         }
2259
2260         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2261                 /*
2262                  * No need for locks here, since *_detach() can not be
2263                  * called, when there are existing commands.
2264                  */
2265                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2266         }
2267
2268 set_timeout:
2269         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2270                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2271         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2272                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2273         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2274                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2275
2276         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2277               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2278
2279         TRACE_EXIT_RES(res);
2280         return res;
2281 }
2282 EXPORT_SYMBOL(scst_sbc_generic_parse);
2283
2284 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2285         int (*get_block_shift)(struct scst_cmd *cmd))
2286 {
2287         int res = 0;
2288
2289         TRACE_ENTRY();
2290
2291         /*
2292          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2293          * therefore change them only if necessary
2294          */
2295
2296         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2297               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2298
2299         cmd->cdb[1] &= 0x1f;
2300
2301         switch (cmd->cdb[0]) {
2302         case VERIFY_6:
2303         case VERIFY:
2304         case VERIFY_12:
2305         case VERIFY_16:
2306                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2307                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2308                         cmd->bufflen = 0;
2309                         goto set_timeout;
2310                 }
2311                 break;
2312         default:
2313                 /* It's all good */
2314                 break;
2315         }
2316
2317         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2318                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2319
2320 set_timeout:
2321         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2322                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2323         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2324                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2325         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2326                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2327
2328         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2329                 cmd->data_direction);
2330
2331         TRACE_EXIT();
2332         return res;
2333 }
2334 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2335
2336 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2337         int (*get_block_shift)(struct scst_cmd *cmd))
2338 {
2339         int res = 0;
2340
2341         TRACE_ENTRY();
2342
2343         /*
2344          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2345          * therefore change them only if necessary
2346          */
2347
2348         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2349               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2350
2351         cmd->cdb[1] &= 0x1f;
2352
2353         switch (cmd->cdb[0]) {
2354         case VERIFY_6:
2355         case VERIFY:
2356         case VERIFY_12:
2357         case VERIFY_16:
2358                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2359                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2360                         cmd->bufflen = 0;
2361                         goto set_timeout;
2362                 }
2363                 break;
2364         default:
2365                 /* It's all good */
2366                 break;
2367         }
2368
2369         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2370                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2371
2372 set_timeout:
2373         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2374                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2375         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2376                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2377         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2378                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2379
2380         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2381                 cmd->data_direction);
2382
2383         TRACE_EXIT_RES(res);
2384         return res;
2385 }
2386 EXPORT_SYMBOL(scst_modisk_generic_parse);
2387
2388 int scst_tape_generic_parse(struct scst_cmd *cmd,
2389         int (*get_block_size)(struct scst_cmd *cmd))
2390 {
2391         int res = 0;
2392
2393         TRACE_ENTRY();
2394
2395         /*
2396          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2397          * therefore change them only if necessary
2398          */
2399
2400         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2401               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2402
2403         if (cmd->cdb[0] == READ_POSITION) {
2404                 int tclp = cmd->cdb[1] & 4;
2405                 int long_bit = cmd->cdb[1] & 2;
2406                 int bt = cmd->cdb[1] & 1;
2407
2408                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2409                         cmd->bufflen =
2410                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2411                         cmd->data_direction = SCST_DATA_READ;
2412                 } else {
2413                         cmd->bufflen = 0;
2414                         cmd->data_direction = SCST_DATA_NONE;
2415                 }
2416         }
2417
2418         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2419                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2420
2421         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2422                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2423         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2424                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2425         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2426                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2427
2428         TRACE_EXIT_RES(res);
2429         return res;
2430 }
2431 EXPORT_SYMBOL(scst_tape_generic_parse);
2432
2433 static int scst_null_parse(struct scst_cmd *cmd)
2434 {
2435         int res = 0;
2436
2437         TRACE_ENTRY();
2438
2439         /*
2440          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2441          * therefore change them only if necessary
2442          */
2443
2444         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2445               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2446 #if 0
2447         switch (cmd->cdb[0]) {
2448         default:
2449                 /* It's all good */
2450                 break;
2451         }
2452 #endif
2453         TRACE_DBG("res %d bufflen %d direct %d",
2454               res, cmd->bufflen, cmd->data_direction);
2455
2456         TRACE_EXIT();
2457         return res;
2458 }
2459
2460 int scst_changer_generic_parse(struct scst_cmd *cmd,
2461         int (*nothing)(struct scst_cmd *cmd))
2462 {
2463         int res = scst_null_parse(cmd);
2464
2465         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2466                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2467         else
2468                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2469
2470         return res;
2471 }
2472 EXPORT_SYMBOL(scst_changer_generic_parse);
2473
2474 int scst_processor_generic_parse(struct scst_cmd *cmd,
2475         int (*nothing)(struct scst_cmd *cmd))
2476 {
2477         int res = scst_null_parse(cmd);
2478
2479         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2480                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2481         else
2482                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2483
2484         return res;
2485 }
2486 EXPORT_SYMBOL(scst_processor_generic_parse);
2487
2488 int scst_raid_generic_parse(struct scst_cmd *cmd,
2489         int (*nothing)(struct scst_cmd *cmd))
2490 {
2491         int res = scst_null_parse(cmd);
2492
2493         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2494                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2495         else
2496                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2497
2498         return res;
2499 }
2500 EXPORT_SYMBOL(scst_raid_generic_parse);
2501
2502 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2503         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2504 {
2505         int opcode = cmd->cdb[0];
2506         int status = cmd->status;
2507         int res = SCST_CMD_STATE_DEFAULT;
2508
2509         TRACE_ENTRY();
2510
2511         /*
2512          * SCST sets good defaults for cmd->is_send_status and
2513          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2514          * therefore change them only if necessary
2515          */
2516
2517         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2518                 switch (opcode) {
2519                 case READ_CAPACITY:
2520                 {
2521                         /* Always keep track of disk capacity */
2522                         int buffer_size, sector_size, sh;
2523                         uint8_t *buffer;
2524
2525                         buffer_size = scst_get_buf_first(cmd, &buffer);
2526                         if (unlikely(buffer_size <= 0)) {
2527                                 if (buffer_size < 0) {
2528                                         PRINT_ERROR("%s: Unable to get the"
2529                                         " buffer (%d)", __func__, buffer_size);
2530                                 }
2531                                 goto out;
2532                         }
2533
2534                         sector_size =
2535                             ((buffer[4] << 24) | (buffer[5] << 16) |
2536                              (buffer[6] << 8) | (buffer[7] << 0));
2537                         scst_put_buf(cmd, buffer);
2538                         if (sector_size != 0)
2539                                 sh = scst_calc_block_shift(sector_size);
2540                         else
2541                                 sh = 0;
2542                         set_block_shift(cmd, sh);
2543                         TRACE_DBG("block_shift %d", sh);
2544                         break;
2545                 }
2546                 default:
2547                         /* It's all good */
2548                         break;
2549                 }
2550         }
2551
2552         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2553               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2554
2555 out:
2556         TRACE_EXIT_RES(res);
2557         return res;
2558 }
2559 EXPORT_SYMBOL(scst_block_generic_dev_done);
2560
2561 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2562         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2563 {
2564         int opcode = cmd->cdb[0];
2565         int res = SCST_CMD_STATE_DEFAULT;
2566         int buffer_size, bs;
2567         uint8_t *buffer = NULL;
2568
2569         TRACE_ENTRY();
2570
2571         /*
2572          * SCST sets good defaults for cmd->is_send_status and
2573          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2574          * therefore change them only if necessary
2575          */
2576
2577         switch (opcode) {
2578         case MODE_SENSE:
2579         case MODE_SELECT:
2580                 buffer_size = scst_get_buf_first(cmd, &buffer);
2581                 if (unlikely(buffer_size <= 0)) {
2582                         if (buffer_size < 0) {
2583                                 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2584                                         __func__, buffer_size);
2585                         }
2586                         goto out;
2587                 }
2588                 break;
2589         }
2590
2591         switch (opcode) {
2592         case MODE_SENSE:
2593                 TRACE_DBG("%s", "MODE_SENSE");
2594                 if ((cmd->cdb[2] & 0xC0) == 0) {
2595                         if (buffer[3] == 8) {
2596                                 bs = (buffer[9] << 16) |
2597                                     (buffer[10] << 8) | buffer[11];
2598                                 set_block_size(cmd, bs);
2599                         }
2600                 }
2601                 break;
2602         case MODE_SELECT:
2603                 TRACE_DBG("%s", "MODE_SELECT");
2604                 if (buffer[3] == 8) {
2605                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2606                             (buffer[11]);
2607                         set_block_size(cmd, bs);
2608                 }
2609                 break;
2610         default:
2611                 /* It's all good */
2612                 break;
2613         }
2614
2615         switch (opcode) {
2616         case MODE_SENSE:
2617         case MODE_SELECT:
2618                 scst_put_buf(cmd, buffer);
2619                 break;
2620         }
2621
2622 out:
2623         TRACE_EXIT_RES(res);
2624         return res;
2625 }
2626 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2627
2628 static void scst_check_internal_sense(struct scst_device *dev, int result,
2629         uint8_t *sense, int sense_len)
2630 {
2631         TRACE_ENTRY();
2632
2633         if (host_byte(result) == DID_RESET) {
2634                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2635                         "reset UA");
2636                 scst_set_sense(sense, sense_len,
2637                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2638                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2639         } else if ((status_byte(result) == CHECK_CONDITION) &&
2640                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2641                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2642
2643         TRACE_EXIT();
2644         return;
2645 }
2646
2647 int scst_obtain_device_parameters(struct scst_device *dev)
2648 {
2649         int res = 0, i;
2650         uint8_t cmd[16];
2651         uint8_t buffer[4+0x0A];
2652         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2653
2654         TRACE_ENTRY();
2655
2656         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2657
2658         for (i = 0; i < 5; i++) {
2659                 /* Get control mode page */
2660                 memset(cmd, 0, sizeof(cmd));
2661                 cmd[0] = MODE_SENSE;
2662                 cmd[1] = 8; /* DBD */
2663                 cmd[2] = 0x0A;
2664                 cmd[4] = sizeof(buffer);
2665
2666                 memset(buffer, 0, sizeof(buffer));
2667                 memset(sense_buffer, 0, sizeof(sense_buffer));
2668
2669                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2670                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2671                                 sizeof(buffer), sense_buffer, 15, 0, 0);
2672
2673                 TRACE_DBG("MODE_SENSE done: %x", res);
2674
2675                 if (scsi_status_is_good(res)) {
2676                         int q;
2677
2678                         PRINT_BUFF_FLAG(TRACE_SCSI,
2679                                 "Returned control mode page data",
2680                                 buffer, sizeof(buffer));
2681
2682                         dev->tst = buffer[4+2] >> 5;
2683                         q = buffer[4+3] >> 4;
2684                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2685                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2686                                         "%d:%d:%d:%d", dev->queue_alg,
2687                                         dev->scsi_dev->host->host_no,
2688                                         dev->scsi_dev->channel,
2689                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2690                         }
2691                         dev->queue_alg = q;
2692                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2693                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2694
2695                         /*
2696                          * Unfortunately, SCSI ML doesn't provide a way to
2697                          * specify commands task attribute, so we can rely on
2698                          * device's restricted reordering only.
2699                          */
2700                         dev->has_own_order_mgmt = !dev->queue_alg;
2701
2702                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2703                                 "Device %d:%d:%d:%d: TST %x, "
2704                                 "QUEUE ALG %x, SWP %x, TAS %x, "
2705                                 "has_own_order_mgmt %d",
2706                                 dev->scsi_dev->host->host_no,
2707                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2708                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2709                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2710
2711                         goto out;
2712                 } else {
2713 #if 0
2714                         if ((status_byte(res) == CHECK_CONDITION) &&
2715 #else
2716                         /*
2717                          * 3ware controller is buggy and returns CONDITION_GOOD
2718                          * instead of CHECK_CONDITION
2719                          */
2720                         if (
2721 #endif
2722                             SCST_SENSE_VALID(sense_buffer)) {
2723                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2724                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2725                                                 "Device %d:%d:%d:%d doesn't"
2726                                                 " support control mode page,"
2727                                                 " using defaults: TST %x,"
2728                                                 " QUEUE ALG %x, SWP %x, TAS %x,"
2729                                                 " has_own_order_mgmt %d",
2730                                                 dev->scsi_dev->host->host_no,
2731                                                 dev->scsi_dev->channel,
2732                                                 dev->scsi_dev->id,
2733                                                 dev->scsi_dev->lun,
2734                                                 dev->tst,
2735                                                 dev->queue_alg,
2736                                                 dev->swp,
2737                                                 dev->tas,
2738                                                 dev->has_own_order_mgmt);
2739                                         res = 0;
2740                                         goto out;
2741                                 } else if (sense_buffer[2] == NOT_READY) {
2742                                         TRACE(TRACE_SCSI,
2743                                                 "Device %d:%d:%d:%d not ready",
2744                                                 dev->scsi_dev->host->host_no,
2745                                                 dev->scsi_dev->channel,
2746                                                 dev->scsi_dev->id,
2747                                                 dev->scsi_dev->lun);
2748                                         res = 0;
2749                                         goto out;
2750                                 }
2751                         } else {
2752                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2753                                         "Internal MODE SENSE to "
2754                                         "device %d:%d:%d:%d failed: %x",
2755                                         dev->scsi_dev->host->host_no,
2756                                         dev->scsi_dev->channel,
2757                                         dev->scsi_dev->id,
2758                                         dev->scsi_dev->lun, res);
2759                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
2760                                         "MODE SENSE sense",
2761                                         sense_buffer, sizeof(sense_buffer));
2762                         }
2763                         scst_check_internal_sense(dev, res, sense_buffer,
2764                                         sizeof(sense_buffer));
2765                 }
2766         }
2767         res = -ENODEV;
2768
2769 out:
2770         TRACE_EXIT_RES(res);
2771         return res;
2772 }
2773 EXPORT_SYMBOL(scst_obtain_device_parameters);
2774
2775 /* Called under dev_lock and BH off */
2776 void scst_process_reset(struct scst_device *dev,
2777         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2778         struct scst_mgmt_cmd *mcmd, bool setUA)
2779 {
2780         struct scst_tgt_dev *tgt_dev;
2781         struct scst_cmd *cmd, *tcmd;
2782
2783         TRACE_ENTRY();
2784
2785         /* Clear RESERVE'ation, if necessary */
2786         if (dev->dev_reserved) {
2787                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2788                                     dev_tgt_dev_list_entry) {
2789                         TRACE(TRACE_MGMT_MINOR, "Clearing RESERVE'ation for "
2790                                 "tgt_dev lun %lld",
2791                                 (long long unsigned int)tgt_dev->lun);
2792                         clear_bit(SCST_TGT_DEV_RESERVED,
2793                                   &tgt_dev->tgt_dev_flags);
2794                 }
2795                 dev->dev_reserved = 0;
2796                 /*
2797                  * There is no need to send RELEASE, since the device is going
2798                  * to be resetted. Actually, since we can be in RESET TM
2799                  * function, it might be dangerous.
2800                  */
2801         }
2802
2803         dev->dev_double_ua_possible = 1;
2804
2805         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2806                 dev_tgt_dev_list_entry) {
2807                 struct scst_session *sess = tgt_dev->sess;
2808
2809                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2810                 scst_free_all_UA(tgt_dev);
2811                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2812
2813                 spin_lock_irq(&sess->sess_list_lock);
2814
2815                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2816                 list_for_each_entry(cmd, &sess->search_cmd_list,
2817                                 search_cmd_list_entry) {
2818                         if (cmd == exclude_cmd)
2819                                 continue;
2820                         if ((cmd->tgt_dev == tgt_dev) ||
2821                             ((cmd->tgt_dev == NULL) &&
2822                              (cmd->lun == tgt_dev->lun))) {
2823                                 scst_abort_cmd(cmd, mcmd,
2824                                         (tgt_dev->sess != originator), 0);
2825                         }
2826                 }
2827                 spin_unlock_irq(&sess->sess_list_lock);
2828         }
2829
2830         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2831                                 blocked_cmd_list_entry) {
2832                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2833                         list_del(&cmd->blocked_cmd_list_entry);
2834                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2835                                 "to active cmd list", cmd);
2836                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2837                         list_add_tail(&cmd->cmd_list_entry,
2838                                 &cmd->cmd_lists->active_cmd_list);
2839                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2840                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2841                 }
2842         }
2843
2844         if (setUA) {
2845                 /* BH already off */
2846                 spin_lock(&scst_temp_UA_lock);
2847                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2848                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2849                 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2850                         sizeof(scst_temp_UA));
2851                 spin_unlock(&scst_temp_UA_lock);
2852         }
2853
2854         TRACE_EXIT();
2855         return;
2856 }
2857
2858 int scst_set_pending_UA(struct scst_cmd *cmd)
2859 {
2860         int res = 0;
2861         struct scst_tgt_dev_UA *UA_entry;
2862
2863         TRACE_ENTRY();
2864
2865         TRACE(TRACE_MGMT_MINOR, "Setting pending UA cmd %p", cmd);
2866
2867         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2868
2869         /* UA list could be cleared behind us, so retest */
2870         if (list_empty(&cmd->tgt_dev->UA_list)) {
2871                 TRACE_DBG("%s",
2872                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2873                 res = -1;
2874                 goto out_unlock;
2875         }
2876
2877         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2878                               UA_list_entry);
2879
2880         TRACE_DBG("next %p UA_entry %p",
2881               cmd->tgt_dev->UA_list.next, UA_entry);
2882
2883         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2884                 sizeof(UA_entry->UA_sense_buffer));
2885
2886         cmd->ua_ignore = 1;
2887
2888         list_del(&UA_entry->UA_list_entry);
2889
2890         mempool_free(UA_entry, scst_ua_mempool);
2891
2892         if (list_empty(&cmd->tgt_dev->UA_list)) {
2893                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2894                           &cmd->tgt_dev->tgt_dev_flags);
2895         }
2896
2897         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2898
2899 out:
2900         TRACE_EXIT_RES(res);
2901         return res;
2902
2903 out_unlock:
2904         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2905         goto out;
2906 }
2907
2908 /* Called under tgt_dev_lock and BH off */
2909 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2910         const uint8_t *sense, int sense_len, int head)
2911 {
2912         struct scst_tgt_dev_UA *UA_entry = NULL;
2913
2914         TRACE_ENTRY();
2915
2916         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2917         if (UA_entry == NULL) {
2918                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2919                      "allocation failed. The UNIT ATTENTION "
2920                      "on some sessions will be missed");
2921                 PRINT_BUFFER("Lost UA", sense, sense_len);
2922                 goto out;
2923         }
2924         memset(UA_entry, 0, sizeof(*UA_entry));
2925
2926         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2927                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2928         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2929
2930         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2931
2932         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2933
2934         if (head)
2935                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2936         else
2937                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2938
2939 out:
2940         TRACE_EXIT();
2941         return;
2942 }
2943
2944 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2945         const uint8_t *sense, int sense_len, int head)
2946 {
2947         int skip_UA = 0;
2948         struct scst_tgt_dev_UA *UA_entry_tmp;
2949
2950         TRACE_ENTRY();
2951
2952         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2953
2954         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2955                             UA_list_entry) {
2956                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer,
2957                            sense_len) == 0) {
2958                         TRACE_MGMT_DBG("%s", "UA already exists");
2959                         skip_UA = 1;
2960                         break;
2961                 }
2962         }
2963
2964         if (skip_UA == 0)
2965                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2966
2967         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2968
2969         TRACE_EXIT();
2970         return;
2971 }
2972
2973 /* Called under dev_lock and BH off */
2974 void scst_dev_check_set_local_UA(struct scst_device *dev,
2975         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2976 {
2977         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2978
2979         TRACE_ENTRY();
2980
2981         if (exclude != NULL)
2982                 exclude_tgt_dev = exclude->tgt_dev;
2983
2984         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2985                         dev_tgt_dev_list_entry) {
2986                 if (tgt_dev != exclude_tgt_dev)
2987                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2988         }
2989
2990         TRACE_EXIT();
2991         return;
2992 }
2993
2994 /* Called under dev_lock and BH off */
2995 void __scst_dev_check_set_UA(struct scst_device *dev,
2996         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2997 {
2998         TRACE_ENTRY();
2999
3000         TRACE(TRACE_MGMT_MINOR, "Processing UA dev %p", dev);
3001
3002         /* Check for reset UA */
3003         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
3004                 scst_process_reset(dev,
3005                                    (exclude != NULL) ? exclude->sess : NULL,
3006                                    exclude, NULL, false);
3007
3008         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
3009
3010         TRACE_EXIT();
3011         return;
3012 }
3013
3014 /* Called under tgt_dev_lock or when tgt_dev is unused */
3015 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
3016 {
3017         struct scst_tgt_dev_UA *UA_entry, *t;
3018
3019         TRACE_ENTRY();
3020
3021         list_for_each_entry_safe(UA_entry, t,
3022                                  &tgt_dev->UA_list, UA_list_entry) {
3023                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
3024                                (long long unsigned int)tgt_dev->lun);
3025                 list_del(&UA_entry->UA_list_entry);
3026                 kfree(UA_entry);
3027         }
3028         INIT_LIST_HEAD(&tgt_dev->UA_list);
3029         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
3030
3031         TRACE_EXIT();
3032         return;
3033 }
3034
3035 /* No locks */
3036 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
3037 {
3038         struct scst_cmd *res = NULL, *cmd, *t;
3039         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
3040
3041         spin_lock_irq(&tgt_dev->sn_lock);
3042
3043         if (unlikely(tgt_dev->hq_cmd_count != 0))
3044                 goto out_unlock;
3045
3046 restart:
3047         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
3048                                 sn_cmd_list_entry) {
3049                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3050                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3051                 if (cmd->sn == expected_sn) {
3052                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
3053                                 cmd, cmd->sn, cmd->sn_set);
3054                         tgt_dev->def_cmd_count--;
3055                         list_del(&cmd->sn_cmd_list_entry);
3056                         if (res == NULL)
3057                                 res = cmd;
3058                         else {
3059                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3060                                 TRACE_SN("Adding cmd %p to active cmd list",
3061                                         cmd);
3062                                 list_add_tail(&cmd->cmd_list_entry,
3063                                         &cmd->cmd_lists->active_cmd_list);
3064                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3065                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3066                         }
3067                 }
3068         }
3069         if (res != NULL)
3070                 goto out_unlock;
3071
3072         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
3073                                 sn_cmd_list_entry) {
3074                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3075                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3076                 if (cmd->sn == expected_sn) {
3077                         atomic_t *slot = cmd->sn_slot;
3078                         /*
3079                          * !! At this point any pointer in cmd, except !!
3080                          * !! sn_slot and sn_cmd_list_entry, could be   !!
3081                          * !! already destroyed                         !!
3082                          */
3083                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
3084                                  cmd,
3085                                  (long long unsigned int)cmd->tag,
3086                                  cmd->sn);
3087                         tgt_dev->def_cmd_count--;
3088                         list_del(&cmd->sn_cmd_list_entry);
3089                         spin_unlock_irq(&tgt_dev->sn_lock);
3090                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
3091                                              &cmd->cmd_flags))
3092                                 scst_destroy_put_cmd(cmd);
3093                         scst_inc_expected_sn(tgt_dev, slot);
3094                         expected_sn = tgt_dev->expected_sn;
3095                         spin_lock_irq(&tgt_dev->sn_lock);
3096                         goto restart;
3097                 }
3098         }
3099
3100 out_unlock:
3101         spin_unlock_irq(&tgt_dev->sn_lock);
3102         return res;
3103 }
3104
3105 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
3106         struct scst_thr_data_hdr *data,
3107         void (*free_fn) (struct scst_thr_data_hdr *data))
3108 {
3109         data->owner_thr = current;
3110         atomic_set(&data->ref, 1);
3111         EXTRACHECKS_BUG_ON(free_fn == NULL);
3112         data->free_fn = free_fn;
3113         spin_lock(&tgt_dev->thr_data_lock);
3114         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
3115         spin_unlock(&tgt_dev->thr_data_lock);
3116 }
3117 EXPORT_SYMBOL(scst_add_thr_data);
3118
3119 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
3120 {
3121         spin_lock(&tgt_dev->thr_data_lock);
3122         while (!list_empty(&tgt_dev->thr_data_list)) {
3123                 struct scst_thr_data_hdr *d = list_entry(
3124                                 tgt_dev->thr_data_list.next, typeof(*d),
3125                                 thr_data_list_entry);
3126                 list_del(&d->thr_data_list_entry);
3127                 spin_unlock(&tgt_dev->thr_data_lock);
3128                 scst_thr_data_put(d);
3129                 spin_lock(&tgt_dev->thr_data_lock);
3130         }
3131         spin_unlock(&tgt_dev->thr_data_lock);
3132         return;
3133 }
3134 EXPORT_SYMBOL(scst_del_all_thr_data);
3135
3136 void scst_dev_del_all_thr_data(struct scst_device *dev)
3137 {
3138         struct scst_tgt_dev *tgt_dev;
3139
3140         TRACE_ENTRY();
3141
3142         mutex_lock(&scst_mutex);
3143
3144         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3145                                 dev_tgt_dev_list_entry) {
3146                 scst_del_all_thr_data(tgt_dev);
3147         }
3148
3149         mutex_unlock(&scst_mutex);
3150
3151         TRACE_EXIT();
3152         return;
3153 }
3154 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
3155
3156 struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
3157         struct task_struct *tsk)
3158 {
3159         struct scst_thr_data_hdr *res = NULL, *d;
3160
3161         spin_lock(&tgt_dev->thr_data_lock);
3162         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
3163                 if (d->owner_thr == tsk) {
3164                         res = d;
3165                         scst_thr_data_get(res);
3166                         break;
3167                 }
3168         }
3169         spin_unlock(&tgt_dev->thr_data_lock);
3170         return res;
3171 }
3172 EXPORT_SYMBOL(__scst_find_thr_data);
3173
3174 /* dev_lock supposed to be held and BH disabled */
3175 void __scst_block_dev(struct scst_device *dev)
3176 {
3177         dev->block_count++;
3178         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
3179 }
3180
3181 /* No locks */
3182 static void scst_block_dev(struct scst_device *dev, int outstanding)
3183 {
3184         spin_lock_bh(&dev->dev_lock);
3185         __scst_block_dev(dev);
3186         spin_unlock_bh(&dev->dev_lock);
3187
3188         /*
3189          * Memory barrier is necessary here, because we need to read
3190          * on_dev_count in wait_event() below after we increased block_count.
3191          * Otherwise, we can miss wake up in scst_dec_on_dev_cmd().
3192          * We use the explicit barrier, because spin_unlock_bh() doesn't
3193          * provide the necessary memory barrier functionality.
3194          */
3195         smp_mb();
3196
3197         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
3198                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
3199         wait_event(dev->on_dev_waitQ,
3200                 atomic_read(&dev->on_dev_count) <= outstanding);
3201         TRACE_MGMT_DBG("%s", "wait_event() returned");
3202 }
3203
3204 /* No locks */
3205 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3206 {
3207         sBUG_ON(cmd->needs_unblocking);
3208
3209         cmd->needs_unblocking = 1;
3210         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3211                        cmd, (long long unsigned int)cmd->tag);
3212
3213         scst_block_dev(cmd->dev, outstanding);
3214 }
3215
3216 /* No locks */
3217 void scst_unblock_dev(struct scst_device *dev)
3218 {
3219         spin_lock_bh(&dev->dev_lock);
3220         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3221                 dev->block_count-1, dev);
3222         if (--dev->block_count == 0)
3223                 scst_unblock_cmds(dev);
3224         spin_unlock_bh(&dev->dev_lock);
3225         sBUG_ON(dev->block_count < 0);
3226 }
3227
3228 /* No locks */
3229 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3230 {
3231         scst_unblock_dev(cmd->dev);
3232         cmd->needs_unblocking = 0;
3233 }
3234
3235 /* No locks */
3236 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3237 {
3238         int res = 0;
3239         struct scst_device *dev = cmd->dev;
3240
3241         TRACE_ENTRY();
3242
3243         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3244
3245         atomic_inc(&dev->on_dev_count);
3246         cmd->dec_on_dev_needed = 1;
3247         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3248
3249         if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
3250                 /*
3251                  * The original command can already block the device, so
3252                  * REQUEST SENSE command should always pass.
3253                  */
3254                 goto out;
3255         }
3256
3257 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3258         spin_lock_bh(&dev->dev_lock);
3259         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3260                 goto out_unlock;
3261         if (dev->block_count > 0) {
3262                 scst_dec_on_dev_cmd(cmd);
3263                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3264                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3265                 list_add_tail(&cmd->blocked_cmd_list_entry,
3266                               &dev->blocked_cmd_list);
3267                 res = 1;
3268         } else {
3269                 __scst_block_dev(dev);
3270                 cmd->inc_blocking = 1;
3271         }
3272         spin_unlock_bh(&dev->dev_lock);
3273         goto out;
3274 #else
3275 repeat:
3276         if (unlikely(dev->block_count > 0)) {
3277                 spin_lock_bh(&dev->dev_lock);
3278                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3279                         goto out_unlock;
3280                 if (dev->block_count > 0) {
3281                         scst_dec_on_dev_cmd(cmd);
3282                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking "
3283                                 "(tag %llu, dev %p)", cmd,
3284                                 (long long unsigned int)cmd->tag, dev);
3285                         list_add_tail(&cmd->blocked_cmd_list_entry,
3286                                       &dev->blocked_cmd_list);
3287                         res = 1;
3288                         spin_unlock_bh(&dev->dev_lock);
3289                         goto out;
3290                 } else {
3291                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3292                                 "continuing");
3293                 }
3294                 spin_unlock_bh(&dev->dev_lock);
3295         }
3296         if (unlikely(dev->dev_double_ua_possible)) {
3297                 spin_lock_bh(&dev->dev_lock);
3298                 if (dev->block_count == 0) {
3299                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3300                                 "cmds due to possible double reset UA (dev %p)",
3301                                 cmd, (long long unsigned int)cmd->tag, dev);
3302                         __scst_block_dev(dev);
3303                         cmd->inc_blocking = 1;
3304                 } else {
3305                         spin_unlock_bh(&dev->dev_lock);
3306                         TRACE_MGMT_DBG("Somebody blocked the device, "
3307                                 "repeating (count %d)", dev->block_count);
3308                         goto repeat;
3309                 }
3310                 spin_unlock_bh(&dev->dev_lock);
3311         }
3312 #endif
3313
3314 out:
3315         TRACE_EXIT_RES(res);
3316         return res;
3317
3318 out_unlock:
3319         spin_unlock_bh(&dev->dev_lock);
3320         goto out;
3321 }
3322
3323 /* Called under dev_lock */
3324 static void scst_unblock_cmds(struct scst_device *dev)
3325 {
3326 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3327         struct scst_cmd *cmd, *t;
3328         unsigned long flags;
3329
3330         TRACE_ENTRY();
3331
3332         local_irq_save(flags);
3333         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3334                                  blocked_cmd_list_entry) {
3335                 int brk = 0;
3336                 /*
3337                  * Since only one cmd per time is being executed, expected_sn
3338                  * can't change behind us, if the corresponding cmd is in
3339                  * blocked_cmd_list, but we could be called before
3340                  * scst_inc_expected_sn().
3341                  *
3342                  * For HQ commands SN is not set.
3343                  */
3344                 if (likely(!cmd->internal && cmd->sn_set)) {
3345                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3346                         if (cmd->tgt_dev == NULL)
3347                                 sBUG();
3348                         expected_sn = cmd->tgt_dev->expected_sn;
3349                         if (cmd->sn == expected_sn)
3350                                 brk = 1;
3351                         else if (cmd->sn != (expected_sn+1))
3352                                 continue;
3353                 }
3354
3355                 list_del(&cmd->blocked_cmd_list_entry);
3356                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3357                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3358                 list_add(&cmd->cmd_list_entry,
3359                          &cmd->cmd_lists->active_cmd_list);
3360                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3361                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3362                 if (brk)
3363                         break;
3364         }
3365         local_irq_restore(flags);
3366 #else /* CONFIG_SCST_STRICT_SERIALIZING */
3367         struct scst_cmd *cmd, *tcmd;
3368         unsigned long flags;
3369
3370         TRACE_ENTRY();
3371
3372         local_irq_save(flags);
3373         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3374                                  blocked_cmd_list_entry) {
3375                 list_del(&cmd->blocked_cmd_list_entry);
3376                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3377                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3378                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3379                         list_add(&cmd->cmd_list_entry,
3380                                 &cmd->cmd_lists->active_cmd_list);
3381                 else
3382                         list_add_tail(&cmd->cmd_list_entry,
3383                                 &cmd->cmd_lists->active_cmd_list);
3384                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3385                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3386         }
3387         local_irq_restore(flags);
3388 #endif /* CONFIG_SCST_STRICT_SERIALIZING */
3389
3390         TRACE_EXIT();
3391         return;
3392 }
3393
3394 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3395         struct scst_cmd *out_of_sn_cmd)
3396 {
3397         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3398
3399         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3400                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3401                 scst_make_deferred_commands_active(tgt_dev);
3402         } else {
3403                 out_of_sn_cmd->out_of_sn = 1;
3404                 spin_lock_irq(&tgt_dev->sn_lock);
3405                 tgt_dev->def_cmd_count++;
3406                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3407                               &tgt_dev->skipped_sn_list);
3408                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list"
3409                         " (expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3410                         tgt_dev->expected_sn);
3411                 spin_unlock_irq(&tgt_dev->sn_lock);
3412         }
3413
3414         return;
3415 }
3416
3417 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3418         struct scst_cmd *out_of_sn_cmd)
3419 {
3420         TRACE_ENTRY();
3421
3422         if (!out_of_sn_cmd->sn_set) {
3423                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3424                 goto out;
3425         }
3426
3427         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3428
3429 out:
3430         TRACE_EXIT();
3431         return;
3432 }
3433
3434 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3435 {
3436         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3437
3438         TRACE_ENTRY();
3439
3440         if (!cmd->hq_cmd_inced)
3441                 goto out;
3442
3443         spin_lock_irq(&tgt_dev->sn_lock);
3444         tgt_dev->hq_cmd_count--;
3445         spin_unlock_irq(&tgt_dev->sn_lock);
3446
3447         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3448
3449         /*
3450          * There is no problem in checking hq_cmd_count in the
3451          * non-locked state. In the worst case we will only have
3452          * unneeded run of the deferred commands.
3453          */
3454         if (tgt_dev->hq_cmd_count == 0)
3455                 scst_make_deferred_commands_active(tgt_dev);
3456
3457 out:
3458         TRACE_EXIT();
3459         return;
3460 }
3461
3462 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3463 {
3464         TRACE_ENTRY();
3465
3466         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3467                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3468                 atomic_read(&scst_cmd_count));
3469
3470         scst_done_cmd_mgmt(cmd);
3471
3472         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3473                 if (cmd->completed) {
3474                         /* It's completed and it's OK to return its result */
3475                         goto out;
3476                 }
3477
3478                 if (cmd->dev->tas) {
3479                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3480                                 "(tag %llu), returning TASK ABORTED ", cmd,
3481                                 (long long unsigned int)cmd->tag);
3482                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3483                 } else {
3484                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3485                                 "(tag %llu), aborting without delivery or "
3486                                 "notification",
3487                                 cmd, (long long unsigned int)cmd->tag);
3488                         /*
3489                          * There is no need to check/requeue possible UA,
3490                          * because, if it exists, it will be delivered
3491                          * by the "completed" branch above.
3492                          */
3493                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3494                 }
3495         }
3496
3497 out:
3498         TRACE_EXIT();
3499         return;
3500 }
3501
3502 void __init scst_scsi_op_list_init(void)
3503 {
3504         int i;
3505         uint8_t op = 0xff;
3506
3507         TRACE_ENTRY();
3508
3509         for (i = 0; i < 256; i++)
3510                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3511
3512         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3513                 if (scst_scsi_op_table[i].ops != op) {
3514                         op = scst_scsi_op_table[i].ops;
3515                         scst_scsi_op_list[op] = i;
3516                 }
3517         }
3518
3519         TRACE_EXIT();
3520         return;
3521 }
3522
3523 #ifdef CONFIG_SCST_DEBUG
3524 /* Original taken from the XFS code */
3525 unsigned long scst_random(void)
3526 {
3527         static int Inited;
3528         static unsigned long RandomValue;
3529         static DEFINE_SPINLOCK(lock);
3530         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3531         register long rv;
3532         register long lo;
3533         register long hi;
3534         unsigned long flags;
3535
3536         spin_lock_irqsave(&lock, flags);
3537         if (!Inited) {
3538                 RandomValue = jiffies;
3539                 Inited = 1;
3540         }
3541         rv = RandomValue;
3542         hi = rv / 127773;
3543         lo = rv % 127773;
3544         rv = 16807 * lo - 2836 * hi;
3545         if (rv <= 0)
3546                 rv += 2147483647;
3547         RandomValue = rv;
3548         spin_unlock_irqrestore(&lock, flags);
3549         return rv;
3550 }
3551 EXPORT_SYMBOL(scst_random);
3552 #endif
3553
3554 #ifdef CONFIG_SCST_DEBUG_TM
3555
3556 #define TM_DBG_STATE_ABORT              0
3557 #define TM_DBG_STATE_RESET              1
3558 #define TM_DBG_STATE_OFFLINE            2
3559
3560 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3561
3562 static void tm_dbg_timer_fn(unsigned long arg);
3563
3564 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3565 /* All serialized by scst_tm_dbg_lock */
3566 static struct {
3567         unsigned int tm_dbg_release:1;
3568         unsigned int tm_dbg_blocked:1;
3569 } tm_dbg_flags;
3570 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3571 static int tm_dbg_delayed_cmds_count;
3572 static int tm_dbg_passed_cmds_count;
3573 static int tm_dbg_state;
3574 static int tm_dbg_on_state_passes;
3575 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3576 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3577
3578 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3579
3580 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3581         struct scst_acg_dev *acg_dev)
3582 {
3583         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3584                 unsigned long flags;
3585                 /* Do TM debugging only for LUN 0 */
3586                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3587                 tm_dbg_p_cmd_list_waitQ =
3588                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3589                 tm_dbg_state = INIT_TM_DBG_STATE;
3590                 tm_dbg_on_state_passes =
3591                         tm_dbg_on_state_num_passes[tm_dbg_state];
3592                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3593                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3594                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3595                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3596         }
3597 }
3598
3599 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3600 {
3601         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3602                 unsigned long flags;
3603                 del_timer_sync(&tm_dbg_timer);
3604                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3605                 tm_dbg_p_cmd_list_waitQ = NULL;
3606                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3607         }
3608 }
3609
3610 static void tm_dbg_timer_fn(unsigned long arg)
3611 {
3612         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3613         tm_dbg_flags.tm_dbg_release = 1;
3614         /* Used to make sure that all woken up threads see the new value */
3615         smp_wmb();
3616         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3617 }
3618
3619 /* Called under scst_tm_dbg_lock and IRQs off */
3620 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3621 {
3622         switch (tm_dbg_state) {
3623         case TM_DBG_STATE_ABORT:
3624                 if (tm_dbg_delayed_cmds_count == 0) {
3625                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3626                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
3627                                 " for %ld.%ld seconds (%ld HZ), "
3628                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3629                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3630                         mod_timer(&tm_dbg_timer, jiffies + d);
3631 #if 0
3632                         tm_dbg_flags.tm_dbg_blocked = 1;
3633 #endif
3634                 } else {
3635                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3636                                 "(tag %llu), delayed_cmds_count=%d, "
3637                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3638                                 tm_dbg_delayed_cmds_count,
3639                                 tm_dbg_on_state_passes);
3640                         if (tm_dbg_delayed_cmds_count == 2)
3641                                 tm_dbg_flags.tm_dbg_blocked = 0;
3642                 }
3643                 break;
3644
3645         case TM_DBG_STATE_RESET:
3646         case TM_DBG_STATE_OFFLINE:
3647                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3648                         "(tag %llu), delayed_cmds_count=%d, "
3649                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3650                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3651                 tm_dbg_flags.tm_dbg_blocked = 1;
3652                 break;
3653
3654         default:
3655                 sBUG();
3656         }
3657         /* IRQs already off */
3658         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3659         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3660         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3661         cmd->tm_dbg_delayed = 1;
3662         tm_dbg_delayed_cmds_count++;
3663         return;
3664 }
3665
3666 /* No locks */
3667 void tm_dbg_check_released_cmds(void)
3668 {
3669         if (tm_dbg_flags.tm_dbg_release) {
3670                 struct scst_cmd *cmd, *tc;
3671                 spin_lock_irq(&scst_tm_dbg_lock);
3672                 list_for_each_entry_safe_reverse(cmd, tc,
3673                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3674                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3675                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3676                                 tm_dbg_delayed_cmds_count);
3677                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3678                         list_move(&cmd->cmd_list_entry,
3679                                 &cmd->cmd_lists->active_cmd_list);
3680                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3681                 }
3682                 tm_dbg_flags.tm_dbg_release = 0;
3683                 spin_unlock_irq(&scst_tm_dbg_lock);
3684         }
3685 }
3686
3687 /* Called under scst_tm_dbg_lock */
3688 static void tm_dbg_change_state(void)
3689 {
3690         tm_dbg_flags.tm_dbg_blocked = 0;
3691         if (--tm_dbg_on_state_passes == 0) {
3692                 switch (tm_dbg_state) {
3693                 case TM_DBG_STATE_ABORT:
3694                         TRACE_MGMT_DBG("%s", "Changing "
3695                             "tm_dbg_state to RESET");
3696                         tm_dbg_state =
3697                                 TM_DBG_STATE_RESET;
3698                         tm_dbg_flags.tm_dbg_blocked = 0;
3699                         break;
3700                 case TM_DBG_STATE_RESET:
3701                 case TM_DBG_STATE_OFFLINE:
3702 #ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
3703                             TRACE_MGMT_DBG("%s", "Changing "
3704                                     "tm_dbg_state to OFFLINE");
3705                             tm_dbg_state =
3706                                 TM_DBG_STATE_OFFLINE;
3707 #else
3708                             TRACE_MGMT_DBG("%s", "Changing "
3709                                     "tm_dbg_state to ABORT");
3710                             tm_dbg_state =
3711                                 TM_DBG_STATE_ABORT;
3712 #endif
3713                         break;
3714                 default:
3715                         sBUG();
3716                 }
3717                 tm_dbg_on_state_passes =
3718                     tm_dbg_on_state_num_passes[tm_dbg_state];
3719         }
3720
3721         TRACE_MGMT_DBG("%s", "Deleting timer");
3722         del_timer(&tm_dbg_timer);
3723 }
3724
3725 /* No locks */
3726 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3727 {
3728         int res = 0;
3729         unsigned long flags;
3730
3731         if (cmd->tm_dbg_immut)
3732                 goto out;
3733
3734         if (cmd->tm_dbg_delayed) {
3735                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3736                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3737                         "delayed_cmds_count=%d", cmd, cmd->tag,
3738                         tm_dbg_delayed_cmds_count);
3739
3740                 cmd->tm_dbg_immut = 1;
3741                 tm_dbg_delayed_cmds_count--;
3742                 if ((tm_dbg_delayed_cmds_count == 0) &&
3743                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3744                         tm_dbg_change_state();
3745                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3746         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3747                                         &cmd->tgt_dev->tgt_dev_flags)) {
3748                 /* Delay 50th command */
3749                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3750                 if (tm_dbg_flags.tm_dbg_blocked ||
3751                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3752                         tm_dbg_delay_cmd(cmd);
3753                         res = 1;
3754                 } else
3755                         cmd->tm_dbg_immut = 1;
3756                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3757         }
3758
3759 out:
3760         return res;
3761 }
3762
3763 /* No locks */
3764 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3765 {
3766         struct scst_cmd *c;
3767         unsigned long flags;
3768
3769         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3770         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3771                                 cmd_list_entry) {
3772                 if (c == cmd) {
3773                         TRACE_MGMT_DBG("Abort request for "
3774