c4ffba875f1e7347e0625a8eebb523231f0087d9
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/kthread.h>
26 #include <linux/cdrom.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29
30 #ifdef SCST_HIGHMEM
31 #include <linux/highmem.h>
32 #endif
33
34 #include "scst.h"
35 #include "scst_priv.h"
36 #include "scst_mem.h"
37
38 #include "scst_cdbprobe.h"
39
40 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
41 static void scst_check_internal_sense(struct scst_device *dev, int result,
42         uint8_t *sense, int sense_len);
43
44 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
45 {
46         int res = 0;
47         unsigned long gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
48
49         TRACE_ENTRY();
50
51         sBUG_ON(cmd->sense != NULL);
52
53         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
54         if (cmd->sense == NULL) {
55                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
56                         "The sense data will be lost!!", cmd->cdb[0]);
57                 res = -ENOMEM;
58                 goto out;
59         }
60
61         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
62
63 out:
64         TRACE_EXIT_RES(res);
65         return res;
66 }
67 EXPORT_SYMBOL(scst_alloc_sense);
68
69 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
70         const uint8_t *sense, unsigned int len)
71 {
72         int res;
73
74         TRACE_ENTRY();
75
76         res = scst_alloc_sense(cmd, atomic);
77         if (res != 0) {
78                 PRINT_BUFFER("Lost sense", sense, len);
79                 goto out;
80         }
81
82         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
83         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
84
85 out:
86         TRACE_EXIT_RES(res);
87         return res;
88 }
89 EXPORT_SYMBOL(scst_alloc_set_sense);
90
91 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
92 {
93         TRACE_ENTRY();
94
95         cmd->status = status;
96         cmd->host_status = DID_OK;
97
98         cmd->data_direction = SCST_DATA_NONE;
99         cmd->is_send_status = 1;
100         cmd->resp_data_len = 0;
101
102         cmd->completed = 1;
103
104         TRACE_EXIT();
105         return;
106 }
107 EXPORT_SYMBOL(scst_set_cmd_error_status);
108
109 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
110 {
111         int rc;
112
113         TRACE_ENTRY();
114
115         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
116
117         rc = scst_alloc_sense(cmd, 1);
118         if (rc != 0) {
119                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
120                         key, asc, ascq);
121                 goto out;
122         }
123
124         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
125         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
126
127 out:
128         TRACE_EXIT();
129         return;
130 }
131 EXPORT_SYMBOL(scst_set_cmd_error);
132
133 void scst_set_sense(uint8_t *buffer, int len, int key,
134         int asc, int ascq)
135 {
136         memset(buffer, 0, len);
137         buffer[0] = 0x70;       /* Error Code                   */
138         buffer[2] = key;        /* Sense Key                    */
139         buffer[7] = 0x0a;       /* Additional Sense Length      */
140         buffer[12] = asc;       /* ASC                          */
141         buffer[13] = ascq;      /* ASCQ                         */
142         TRACE_BUFFER("Sense set", buffer, len);
143         return;
144 }
145 EXPORT_SYMBOL(scst_set_sense);
146
147 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
148         unsigned int len)
149 {
150         TRACE_ENTRY();
151
152         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
153         scst_alloc_set_sense(cmd, 1, sense, len);
154
155         TRACE_EXIT();
156         return;
157 }
158 EXPORT_SYMBOL(scst_set_cmd_error_sense);
159
160 void scst_set_busy(struct scst_cmd *cmd)
161 {
162         int c = atomic_read(&cmd->sess->sess_cmd_count);
163
164         TRACE_ENTRY();
165
166         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
167                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
168                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
169                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
170                         cmd->sess->initiator_name, c,
171                         cmd->queue_type, cmd->sess->init_phase);
172         } else {
173                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
174                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
175                         "initiator %s (cmds count %d, queue_type %x, "
176                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
177                         cmd->queue_type, cmd->sess->init_phase);
178         }
179
180         TRACE_EXIT();
181         return;
182 }
183 EXPORT_SYMBOL(scst_set_busy);
184
185 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
186 {
187         int i, l;
188
189         TRACE_ENTRY();
190
191         scst_check_restore_sg_buff(cmd);
192         cmd->resp_data_len = resp_data_len;
193
194         if (resp_data_len == cmd->bufflen)
195                 goto out;
196
197         l = 0;
198         for (i = 0; i < cmd->sg_cnt; i++) {
199                 l += cmd->sg[i].length;
200                 if (l >= resp_data_len) {
201                         int left = resp_data_len - (l - cmd->sg[i].length);
202 #ifdef DEBUG
203                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
204                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
205                                 "left %d",
206                                 cmd, (long long unsigned int)cmd->tag,
207                                 resp_data_len, i,
208                                 cmd->sg[i].length, left);
209 #endif
210                         cmd->orig_sg_cnt = cmd->sg_cnt;
211                         cmd->orig_sg_entry = i;
212                         cmd->orig_entry_len = cmd->sg[i].length;
213                         cmd->sg_cnt = (left > 0) ? i+1 : i;
214                         cmd->sg[i].length = left;
215                         cmd->sg_buff_modified = 1;
216                         break;
217                 }
218         }
219
220 out:
221         TRACE_EXIT();
222         return;
223 }
224 EXPORT_SYMBOL(scst_set_resp_data_len);
225
226 /* Called under scst_mutex and suspended activity */
227 int scst_alloc_device(int gfp_mask, struct scst_device **out_dev)
228 {
229         struct scst_device *dev;
230         int res = 0;
231         static int dev_num; /* protected by scst_mutex */
232
233         TRACE_ENTRY();
234
235         dev = kzalloc(sizeof(*dev), gfp_mask);
236         if (dev == NULL) {
237                 TRACE(TRACE_OUT_OF_MEM, "%s",
238                         "Allocation of scst_device failed");
239                 res = -ENOMEM;
240                 goto out;
241         }
242
243         dev->handler = &scst_null_devtype;
244         dev->p_cmd_lists = &scst_main_cmd_lists;
245         atomic_set(&dev->dev_cmd_count, 0);
246         atomic_set(&dev->write_cmd_count, 0);
247         spin_lock_init(&dev->dev_lock);
248         atomic_set(&dev->on_dev_count, 0);
249         INIT_LIST_HEAD(&dev->blocked_cmd_list);
250         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
251         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
252         INIT_LIST_HEAD(&dev->threads_list);
253         init_waitqueue_head(&dev->on_dev_waitQ);
254         dev->dev_double_ua_possible = 1;
255         dev->dev_serialized = 1;
256         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
257         dev->dev_num = dev_num++;
258
259         *out_dev = dev;
260
261 out:
262         TRACE_EXIT_RES(res);
263         return res;
264 }
265
266 /* Called under scst_mutex and suspended activity */
267 void scst_free_device(struct scst_device *dev)
268 {
269         TRACE_ENTRY();
270
271 #ifdef EXTRACHECKS
272         if (!list_empty(&dev->dev_tgt_dev_list) ||
273             !list_empty(&dev->dev_acg_dev_list)) {
274                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
275                         "is not empty!", __func__);
276                 sBUG();
277         }
278 #endif
279
280         kfree(dev);
281
282         TRACE_EXIT();
283         return;
284 }
285
286 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
287         struct scst_device *dev, lun_t lun)
288 {
289         struct scst_acg_dev *res;
290
291         TRACE_ENTRY();
292
293 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
294         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
295 #else
296         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
297 #endif
298         if (res == NULL) {
299                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
300                 goto out;
301         }
302 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
303         memset(res, 0, sizeof(*res));
304 #endif
305
306         res->dev = dev;
307         res->acg = acg;
308         res->lun = lun;
309
310 out:
311         TRACE_EXIT_HRES(res);
312         return res;
313 }
314
315 /* The activity supposed to be suspended and scst_mutex held */
316 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
317 {
318         TRACE_ENTRY();
319
320         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
321                 acg_dev);
322         list_del(&acg_dev->acg_dev_list_entry);
323         list_del(&acg_dev->dev_acg_dev_list_entry);
324
325         kmem_cache_free(scst_acgd_cachep, acg_dev);
326
327         TRACE_EXIT();
328         return;
329 }
330
331 /* The activity supposed to be suspended and scst_mutex held */
332 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
333 {
334         struct scst_acg *acg;
335
336         TRACE_ENTRY();
337
338         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
339         if (acg == NULL) {
340                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
341                 goto out;
342         }
343
344         INIT_LIST_HEAD(&acg->acg_dev_list);
345         INIT_LIST_HEAD(&acg->acg_sess_list);
346         INIT_LIST_HEAD(&acg->acn_list);
347         acg->acg_name = acg_name;
348
349         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
350         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
351
352 out:
353         TRACE_EXIT_HRES(acg);
354         return acg;
355 }
356
357 /* The activity supposed to be suspended and scst_mutex held */
358 int scst_destroy_acg(struct scst_acg *acg)
359 {
360         struct scst_acn *n, *nn;
361         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
362         int res = 0;
363
364         TRACE_ENTRY();
365
366         if (!list_empty(&acg->acg_sess_list)) {
367                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
368                 res = -EBUSY;
369                 goto out;
370         }
371
372         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
373         list_del(&acg->scst_acg_list_entry);
374
375         /* Freeing acg_devs */
376         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
377                         acg_dev_list_entry) {
378                 struct scst_tgt_dev *tgt_dev, *tt;
379                 list_for_each_entry_safe(tgt_dev, tt,
380                                  &acg_dev->dev->dev_tgt_dev_list,
381                                  dev_tgt_dev_list_entry) {
382                         if (tgt_dev->acg_dev == acg_dev)
383                                 scst_free_tgt_dev(tgt_dev);
384                 }
385                 scst_free_acg_dev(acg_dev);
386         }
387
388         /* Freeing names */
389         list_for_each_entry_safe(n, nn, &acg->acn_list,
390                         acn_list_entry) {
391                 list_del(&n->acn_list_entry);
392                 kfree(n->name);
393                 kfree(n);
394         }
395         INIT_LIST_HEAD(&acg->acn_list);
396
397         kfree(acg);
398 out:
399         TRACE_EXIT_RES(res);
400         return res;
401 }
402
403 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
404 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
405         struct scst_acg_dev *acg_dev)
406 {
407         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
408         struct scst_tgt_dev *tgt_dev;
409         struct scst_device *dev = acg_dev->dev;
410         struct list_head *sess_tgt_dev_list_head;
411         struct scst_tgt_template *vtt = sess->tgt->tgtt;
412         int rc, i;
413
414         TRACE_ENTRY();
415
416 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
417         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
418 #else
419         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
420 #endif
421         if (tgt_dev == NULL) {
422                 TRACE(TRACE_OUT_OF_MEM, "%s",
423                       "Allocation of scst_tgt_dev failed");
424                 goto out;
425         }
426 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
427         memset(tgt_dev, 0, sizeof(*tgt_dev));
428 #endif
429
430         tgt_dev->dev = dev;
431         tgt_dev->lun = acg_dev->lun;
432         tgt_dev->acg_dev = acg_dev;
433         tgt_dev->sess = sess;
434         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
435
436         scst_sgv_pool_use_norm(tgt_dev);
437
438         if (dev->scsi_dev != NULL) {
439                 ini_sg = dev->scsi_dev->host->sg_tablesize;
440                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
441                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
442                                 ENABLE_CLUSTERING);
443         } else {
444                 ini_sg = (1 << 15) /* infinite */;
445                 ini_unchecked_isa_dma = 0;
446                 ini_use_clustering = 0;
447         }
448         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
449
450         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
451             !sess->tgt->tgtt->no_clustering)
452                 scst_sgv_pool_use_norm_clust(tgt_dev);
453
454         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
455                 scst_sgv_pool_use_dma(tgt_dev);
456         } else {
457 #ifdef SCST_HIGHMEM
458                 scst_sgv_pool_use_highmem(tgt_dev);
459 #endif
460         }
461
462         if (dev->scsi_dev != NULL) {
463                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
464                       "SCST lun=%Ld", dev->scsi_dev->host->host_no,
465                       dev->scsi_dev->channel, dev->scsi_dev->id,
466                       dev->scsi_dev->lun,
467                       (long long unsigned int)tgt_dev->lun);
468         } else {
469                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%Ld",
470                                dev->virt_name,
471                                (long long unsigned int)tgt_dev->lun);
472         }
473
474         spin_lock_init(&tgt_dev->tgt_dev_lock);
475         INIT_LIST_HEAD(&tgt_dev->UA_list);
476         spin_lock_init(&tgt_dev->thr_data_lock);
477         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
478         spin_lock_init(&tgt_dev->sn_lock);
479         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
480         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
481         tgt_dev->expected_sn = 1;
482         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
483         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
484         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
485                 atomic_set(&tgt_dev->sn_slots[i], 0);
486
487         if (dev->handler->parse_atomic &&
488             (sess->tgt->tgtt->preprocessing_done == NULL)) {
489                 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
490                     (sess->tgt->tgtt->rdy_to_xfer == NULL))
491                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
492                                 &tgt_dev->tgt_dev_flags);
493                 if (dev->handler->exec_atomic || (dev->handler->exec == NULL))
494                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
495                                 &tgt_dev->tgt_dev_flags);
496         }
497         if (dev->handler->exec_atomic || (dev->handler->exec == NULL)) {
498                 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
499                     (sess->tgt->tgtt->rdy_to_xfer == NULL))
500                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
501                                 &tgt_dev->tgt_dev_flags);
502                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
503                                 &tgt_dev->tgt_dev_flags);
504                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
505                         &tgt_dev->tgt_dev_flags);
506         }
507         if ((dev->handler->dev_done_atomic ||
508              (dev->handler->dev_done == NULL)) &&
509             sess->tgt->tgtt->xmit_response_atomic) {
510                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
511                         &tgt_dev->tgt_dev_flags);
512         }
513
514         spin_lock_bh(&scst_temp_UA_lock);
515         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
516                 SCST_LOAD_SENSE(scst_sense_reset_UA));
517         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
518         spin_unlock_bh(&scst_temp_UA_lock);
519
520         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
521
522         if (vtt->threads_num > 0) {
523                 rc = 0;
524                 if (dev->handler->threads_num > 0)
525                         rc = scst_add_dev_threads(dev, vtt->threads_num);
526                 else if (dev->handler->threads_num == 0)
527                         rc = scst_add_cmd_threads(vtt->threads_num);
528                 if (rc != 0)
529                         goto out_free;
530         }
531
532         if (dev->handler && dev->handler->attach_tgt) {
533                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
534                       tgt_dev);
535                 rc = dev->handler->attach_tgt(tgt_dev);
536                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
537                 if (rc != 0) {
538                         PRINT_ERROR("Device handler's %s attach_tgt() "
539                             "failed: %d", dev->handler->name, rc);
540                         goto out_thr_free;
541                 }
542         }
543
544         spin_lock_bh(&dev->dev_lock);
545         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
546         if (dev->dev_reserved)
547                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
548         spin_unlock_bh(&dev->dev_lock);
549
550         sess_tgt_dev_list_head =
551                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
552         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, sess_tgt_dev_list_head);
553
554 out:
555         TRACE_EXIT();
556         return tgt_dev;
557
558 out_thr_free:
559         if (vtt->threads_num > 0) {
560                 if (dev->handler->threads_num > 0)
561                         scst_del_dev_threads(dev, vtt->threads_num);
562                 else if (dev->handler->threads_num == 0)
563                         scst_del_cmd_threads(vtt->threads_num);
564         }
565
566 out_free:
567         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
568         tgt_dev = NULL;
569         goto out;
570 }
571
572 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
573
574 /* No locks supposed to be held, scst_mutex - held */
575 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
576 {
577         TRACE_ENTRY();
578
579         scst_clear_reservation(tgt_dev);
580
581         /* With activity suspended the lock isn't needed, but let's be safe */
582         spin_lock_bh(&tgt_dev->tgt_dev_lock);
583         scst_free_all_UA(tgt_dev);
584         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
585
586         spin_lock_bh(&scst_temp_UA_lock);
587         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
588                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
589         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
590         spin_unlock_bh(&scst_temp_UA_lock);
591
592         TRACE_EXIT();
593         return;
594 }
595
596 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
597 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
598 {
599         struct scst_device *dev = tgt_dev->dev;
600         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
601
602         TRACE_ENTRY();
603
604         tm_dbg_deinit_tgt_dev(tgt_dev);
605
606         spin_lock_bh(&dev->dev_lock);
607         list_del(&tgt_dev->dev_tgt_dev_list_entry);
608         spin_unlock_bh(&dev->dev_lock);
609
610         list_del(&tgt_dev->sess_tgt_dev_list_entry);
611
612         scst_clear_reservation(tgt_dev);
613         scst_free_all_UA(tgt_dev);
614
615         if (dev->handler && dev->handler->detach_tgt) {
616                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
617                       tgt_dev);
618                 dev->handler->detach_tgt(tgt_dev);
619                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
620         }
621
622         if (vtt->threads_num > 0) {
623                 if (dev->handler->threads_num > 0)
624                         scst_del_dev_threads(dev, vtt->threads_num);
625                 else if (dev->handler->threads_num == 0)
626                         scst_del_cmd_threads(vtt->threads_num);
627         }
628
629         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
630
631         TRACE_EXIT();
632         return;
633 }
634
635 /* scst_mutex supposed to be held */
636 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
637 {
638         int res = 0;
639         struct scst_acg_dev *acg_dev;
640         struct scst_tgt_dev *tgt_dev;
641
642         TRACE_ENTRY();
643
644         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
645                         acg_dev_list_entry) {
646                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
647                 if (tgt_dev == NULL) {
648                         res = -ENOMEM;
649                         goto out_free;
650                 }
651         }
652
653 out:
654         TRACE_EXIT();
655         return res;
656
657 out_free:
658         scst_sess_free_tgt_devs(sess);
659         goto out;
660 }
661
662 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
663 void scst_sess_free_tgt_devs(struct scst_session *sess)
664 {
665         int i;
666         struct scst_tgt_dev *tgt_dev, *t;
667
668         TRACE_ENTRY();
669
670         /* The session is going down, no users, so no locks */
671         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
672                 struct list_head *sess_tgt_dev_list_head =
673                         &sess->sess_tgt_dev_list_hash[i];
674                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
675                                 sess_tgt_dev_list_entry) {
676                         scst_free_tgt_dev(tgt_dev);
677                 }
678                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
679         }
680
681         TRACE_EXIT();
682         return;
683 }
684
685 /* The activity supposed to be suspended and scst_mutex held */
686 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
687         int read_only)
688 {
689         int res = 0;
690         struct scst_acg_dev *acg_dev;
691         struct scst_tgt_dev *tgt_dev;
692         struct scst_session *sess;
693         LIST_HEAD(tmp_tgt_dev_list);
694
695         TRACE_ENTRY();
696
697         INIT_LIST_HEAD(&tmp_tgt_dev_list);
698
699 #ifdef EXTRACHECKS
700         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
701                 if (acg_dev->dev == dev) {
702                         PRINT_ERROR("Device is already in group %s",
703                                 acg->acg_name);
704                         res = -EINVAL;
705                         goto out;
706                 }
707         }
708 #endif
709
710         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
711         if (acg_dev == NULL) {
712                 res = -ENOMEM;
713                 goto out;
714         }
715         acg_dev->rd_only_flag = read_only;
716
717         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
718                 acg_dev);
719         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
720         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
721
722         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry)
723         {
724                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
725                 if (tgt_dev == NULL) {
726                         res = -ENOMEM;
727                         goto out_free;
728                 }
729                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
730                               &tmp_tgt_dev_list);
731         }
732
733 out:
734         if (res == 0) {
735                 if (dev->virt_name != NULL) {
736                         PRINT_INFO("Added device %s to group %s (LUN %Ld, "
737                                 "rd_only %d)", dev->virt_name, acg->acg_name,
738                                 (long long unsigned int)lun,
739                                 read_only);
740                 } else {
741                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
742                                 "%Ld, rd_only %d)", dev->scsi_dev->host->host_no,
743                                 dev->scsi_dev->channel, dev->scsi_dev->id,
744                                 dev->scsi_dev->lun, acg->acg_name,
745                                 (long long unsigned int)lun,
746                                 read_only);
747                 }
748         }
749
750         TRACE_EXIT_RES(res);
751         return res;
752
753 out_free:
754         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
755                          extra_tgt_dev_list_entry) {
756                 scst_free_tgt_dev(tgt_dev);
757         }
758         scst_free_acg_dev(acg_dev);
759         goto out;
760 }
761
762 /* The activity supposed to be suspended and scst_mutex held */
763 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
764 {
765         int res = 0;
766         struct scst_acg_dev *acg_dev = NULL, *a;
767         struct scst_tgt_dev *tgt_dev, *tt;
768
769         TRACE_ENTRY();
770
771         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
772                 if (a->dev == dev) {
773                         acg_dev = a;
774                         break;
775                 }
776         }
777
778         if (acg_dev == NULL) {
779                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
780                 res = -EINVAL;
781                 goto out;
782         }
783
784         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
785                          dev_tgt_dev_list_entry) {
786                 if (tgt_dev->acg_dev == acg_dev)
787                         scst_free_tgt_dev(tgt_dev);
788         }
789         scst_free_acg_dev(acg_dev);
790
791 out:
792         if (res == 0) {
793                 if (dev->virt_name != NULL) {
794                         PRINT_INFO("Removed device %s from group %s",
795                                 dev->virt_name, acg->acg_name);
796                 } else {
797                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
798                                 dev->scsi_dev->host->host_no,
799                                 dev->scsi_dev->channel, dev->scsi_dev->id,
800                                 dev->scsi_dev->lun, acg->acg_name);
801                 }
802         }
803
804         TRACE_EXIT_RES(res);
805         return res;
806 }
807
808 /* scst_mutex supposed to be held */
809 int scst_acg_add_name(struct scst_acg *acg, const char *name)
810 {
811         int res = 0;
812         struct scst_acn *n;
813         int len;
814         char *nm;
815
816         TRACE_ENTRY();
817
818         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
819         {
820                 if (strcmp(n->name, name) == 0) {
821                         PRINT_ERROR("Name %s already exists in group %s",
822                                 name, acg->acg_name);
823                         res = -EINVAL;
824                         goto out;
825                 }
826         }
827
828         n = kmalloc(sizeof(*n), GFP_KERNEL);
829         if (n == NULL) {
830                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
831                 res = -ENOMEM;
832                 goto out;
833         }
834
835         len = strlen(name);
836         nm = kmalloc(len + 1, GFP_KERNEL);
837         if (nm == NULL) {
838                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
839                 res = -ENOMEM;
840                 goto out_free;
841         }
842
843         strcpy(nm, name);
844         n->name = nm;
845
846         list_add_tail(&n->acn_list_entry, &acg->acn_list);
847
848 out:
849         if (res == 0)
850                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
851
852         TRACE_EXIT_RES(res);
853         return res;
854
855 out_free:
856         kfree(n);
857         goto out;
858 }
859
860 /* scst_mutex supposed to be held */
861 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
862 {
863         int res = -EINVAL;
864         struct scst_acn *n;
865
866         TRACE_ENTRY();
867
868         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
869         {
870                 if (strcmp(n->name, name) == 0) {
871                         list_del(&n->acn_list_entry);
872                         kfree(n->name);
873                         kfree(n);
874                         res = 0;
875                         break;
876                 }
877         }
878
879         if (res == 0) {
880                 PRINT_INFO("Removed name %s from group %s", name,
881                         acg->acg_name);
882         } else {
883                 PRINT_ERROR("Unable to find name %s in group %s", name,
884                         acg->acg_name);
885         }
886
887         TRACE_EXIT_RES(res);
888         return res;
889 }
890
891 struct scst_cmd *scst_create_prepare_internal_cmd(
892         struct scst_cmd *orig_cmd, int bufsize)
893 {
894         struct scst_cmd *res;
895         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
896
897         TRACE_ENTRY();
898
899         res = scst_alloc_cmd(gfp_mask);
900         if (res == NULL)
901                 goto out;
902
903         res->cmd_lists = orig_cmd->cmd_lists;
904         res->sess = orig_cmd->sess;
905         res->state = SCST_CMD_STATE_DEV_PARSE;
906         res->atomic = scst_cmd_atomic(orig_cmd);
907         res->internal = 1;
908         res->tgtt = orig_cmd->tgtt;
909         res->tgt = orig_cmd->tgt;
910         res->dev = orig_cmd->dev;
911         res->tgt_dev = orig_cmd->tgt_dev;
912         res->lun = orig_cmd->lun;
913         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
914         res->data_direction = SCST_DATA_UNKNOWN;
915         res->orig_cmd = orig_cmd;
916
917         res->bufflen = bufsize;
918
919 out:
920         TRACE_EXIT_HRES((unsigned long)res);
921         return res;
922 }
923
924 void scst_free_internal_cmd(struct scst_cmd *cmd)
925 {
926         TRACE_ENTRY();
927
928         __scst_cmd_put(cmd);
929
930         TRACE_EXIT();
931         return;
932 }
933
934 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
935 {
936         int res = SCST_CMD_STATE_RES_CONT_NEXT;
937 #define sbuf_size 252
938         static const uint8_t request_sense[6] =
939             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
940         struct scst_cmd *rs_cmd;
941
942         TRACE_ENTRY();
943
944         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
945         if (rs_cmd == NULL)
946                 goto out_error;
947
948         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
949         rs_cmd->cdb_len = sizeof(request_sense);
950         rs_cmd->data_direction = SCST_DATA_READ;
951
952         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
953                 "cmd list ", rs_cmd);
954         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
955         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
956         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
957
958 out:
959         TRACE_EXIT_RES(res);
960         return res;
961
962 out_error:
963         res = -1;
964         goto out;
965 #undef sbuf_size
966 }
967
968 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
969 {
970         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
971         uint8_t *buf;
972         int len;
973
974         TRACE_ENTRY();
975
976         if (req_cmd->dev->handler->dev_done != NULL) {
977                 int rc;
978                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
979                       req_cmd->dev->handler->name, req_cmd);
980                 rc = req_cmd->dev->handler->dev_done(req_cmd);
981                 TRACE_DBG("Dev handler %s dev_done() returned %d",
982                       req_cmd->dev->handler->name, rc);
983         }
984
985         sBUG_ON(orig_cmd);
986
987         len = scst_get_buf_first(req_cmd, &buf);
988
989         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
990             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
991                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
992                         buf, len);
993                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
994                         len);
995         } else {
996                 PRINT_ERROR("%s", "Unable to get the sense via "
997                         "REQUEST SENSE, returning HARDWARE ERROR");
998                 scst_set_cmd_error(orig_cmd,
999                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1000         }
1001
1002         if (len > 0)
1003                 scst_put_buf(req_cmd, buf);
1004
1005         scst_free_internal_cmd(req_cmd);
1006
1007         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1008         return orig_cmd;
1009 }
1010
1011 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1012 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1013 {
1014         struct scsi_request *req;
1015
1016         TRACE_ENTRY();
1017
1018         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1019                 if (req) {
1020                         if (req->sr_bufflen)
1021                                 kfree(req->sr_buffer);
1022                         scsi_release_request(req);
1023                 }
1024         }
1025
1026         TRACE_EXIT();
1027         return;
1028 }
1029
1030 static void scst_send_release(struct scst_device *dev)
1031 {
1032         struct scsi_request *req;
1033         struct scsi_device *scsi_dev;
1034         uint8_t cdb[6];
1035
1036         TRACE_ENTRY();
1037
1038         if (dev->scsi_dev == NULL)
1039                 goto out;
1040
1041         scsi_dev = dev->scsi_dev;
1042
1043         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1044         if (req == NULL) {
1045                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1046                             "to RELEASE device %d:%d:%d:%d",
1047                             scsi_dev->host->host_no, scsi_dev->channel,
1048                             scsi_dev->id, scsi_dev->lun);
1049                 goto out;
1050         }
1051
1052         memset(cdb, 0, sizeof(cdb));
1053         cdb[0] = RELEASE;
1054         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1055             ((scsi_dev->lun << 5) & 0xe0) : 0;
1056         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1057         req->sr_cmd_len = sizeof(cdb);
1058         req->sr_data_direction = SCST_DATA_NONE;
1059         req->sr_use_sg = 0;
1060         req->sr_bufflen = 0;
1061         req->sr_buffer = NULL;
1062         req->sr_request->rq_disk = dev->rq_disk;
1063         req->sr_sense_buffer[0] = 0;
1064
1065         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1066                 "mid-level", req);
1067         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1068                     scst_req_done, SCST_DEFAULT_TIMEOUT, 3);
1069
1070 out:
1071         TRACE_EXIT();
1072         return;
1073 }
1074 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1075 static void scst_send_release(struct scst_device *dev)
1076 {
1077         struct scsi_device *scsi_dev;
1078         unsigned char cdb[6];
1079         unsigned char *sense;
1080         int rc, i;
1081
1082         TRACE_ENTRY();
1083
1084         if (dev->scsi_dev == NULL)
1085                 goto out;
1086
1087         /* We can't afford missing RELEASE due to memory shortage */
1088         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1089
1090         scsi_dev = dev->scsi_dev;
1091
1092         for (i = 0; i < 5; i++) {
1093                 memset(cdb, 0, sizeof(cdb));
1094                 cdb[0] = RELEASE;
1095                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1096                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1097
1098                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1099
1100                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1101                         "SCSI mid-level");
1102                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1103                                 sense, SCST_DEFAULT_TIMEOUT, 0, 0);
1104                 TRACE_DBG("MODE_SENSE done: %x", rc);
1105
1106                 if (scsi_status_is_good(rc)) {
1107                         break;
1108                 } else {
1109                         PRINT_ERROR("RELEASE failed: %d", rc);
1110                         PRINT_BUFFER("RELEASE sense", sense,
1111                                 SCST_SENSE_BUFFERSIZE);
1112                         scst_check_internal_sense(dev, rc,
1113                                         sense, SCST_SENSE_BUFFERSIZE);
1114                 }
1115         }
1116
1117         kfree(sense);
1118
1119 out:
1120         TRACE_EXIT();
1121         return;
1122 }
1123 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1124
1125 /* scst_mutex supposed to be held */
1126 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1127 {
1128         struct scst_device *dev = tgt_dev->dev;
1129         int release = 0;
1130
1131         TRACE_ENTRY();
1132
1133         spin_lock_bh(&dev->dev_lock);
1134         if (dev->dev_reserved &&
1135             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1136                 /* This is one who holds the reservation */
1137                 struct scst_tgt_dev *tgt_dev_tmp;
1138                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1139                                     dev_tgt_dev_list_entry) {
1140                         clear_bit(SCST_TGT_DEV_RESERVED,
1141                                     &tgt_dev_tmp->tgt_dev_flags);
1142                 }
1143                 dev->dev_reserved = 0;
1144                 release = 1;
1145         }
1146         spin_unlock_bh(&dev->dev_lock);
1147
1148         if (release)
1149                 scst_send_release(dev);
1150
1151         TRACE_EXIT();
1152         return;
1153 }
1154
1155 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
1156         const char *initiator_name)
1157 {
1158         struct scst_session *sess;
1159         int i;
1160         int len;
1161         char *nm;
1162
1163         TRACE_ENTRY();
1164
1165 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1166         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1167 #else
1168         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1169 #endif
1170         if (sess == NULL) {
1171                 TRACE(TRACE_OUT_OF_MEM, "%s",
1172                       "Allocation of scst_session failed");
1173                 goto out;
1174         }
1175 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1176         memset(sess, 0, sizeof(*sess));
1177 #endif
1178
1179         sess->init_phase = SCST_SESS_IPH_INITING;
1180         sess->shut_phase = SCST_SESS_SPH_READY;
1181         atomic_set(&sess->refcnt, 0);
1182         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1183                 struct list_head *sess_tgt_dev_list_head =
1184                          &sess->sess_tgt_dev_list_hash[i];
1185                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1186         }
1187         spin_lock_init(&sess->sess_list_lock);
1188         INIT_LIST_HEAD(&sess->search_cmd_list);
1189         sess->tgt = tgt;
1190         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1191         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1192
1193 #ifdef MEASURE_LATENCY
1194         spin_lock_init(&sess->meas_lock);
1195 #endif
1196
1197         len = strlen(initiator_name);
1198         nm = kmalloc(len + 1, gfp_mask);
1199         if (nm == NULL) {
1200                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1201                 goto out_free;
1202         }
1203
1204         strcpy(nm, initiator_name);
1205         sess->initiator_name = nm;
1206
1207 out:
1208         TRACE_EXIT();
1209         return sess;
1210
1211 out_free:
1212         kmem_cache_free(scst_sess_cachep, sess);
1213         sess = NULL;
1214         goto out;
1215 }
1216
1217 void scst_free_session(struct scst_session *sess)
1218 {
1219         TRACE_ENTRY();
1220
1221         mutex_lock(&scst_mutex);
1222
1223         TRACE_DBG("Removing sess %p from the list", sess);
1224         list_del(&sess->sess_list_entry);
1225         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1226         list_del(&sess->acg_sess_list_entry);
1227
1228         scst_sess_free_tgt_devs(sess);
1229
1230         wake_up_all(&sess->tgt->unreg_waitQ);
1231
1232         mutex_unlock(&scst_mutex);
1233
1234         kfree(sess->initiator_name);
1235         kmem_cache_free(scst_sess_cachep, sess);
1236
1237         TRACE_EXIT();
1238         return;
1239 }
1240
1241 void scst_free_session_callback(struct scst_session *sess)
1242 {
1243         struct completion *c;
1244
1245         TRACE_ENTRY();
1246
1247         TRACE_DBG("Freeing session %p", sess);
1248
1249         c = sess->shutdown_compl;
1250
1251         if (sess->unreg_done_fn) {
1252                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1253                 sess->unreg_done_fn(sess);
1254                 TRACE_DBG("%s", "unreg_done_fn() returned");
1255         }
1256         scst_free_session(sess);
1257
1258         if (c)
1259                 complete_all(c);
1260
1261         TRACE_EXIT();
1262         return;
1263 }
1264
1265 void scst_sched_session_free(struct scst_session *sess)
1266 {
1267         unsigned long flags;
1268
1269         TRACE_ENTRY();
1270
1271         spin_lock_irqsave(&scst_mgmt_lock, flags);
1272         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1273         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1274         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1275
1276         wake_up(&scst_mgmt_waitQ);
1277
1278         TRACE_EXIT();
1279         return;
1280 }
1281
1282 void scst_cmd_get(struct scst_cmd *cmd)
1283 {
1284         __scst_cmd_get(cmd);
1285 }
1286 EXPORT_SYMBOL(scst_cmd_get);
1287
1288 void scst_cmd_put(struct scst_cmd *cmd)
1289 {
1290         __scst_cmd_put(cmd);
1291 }
1292 EXPORT_SYMBOL(scst_cmd_put);
1293
1294 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
1295 {
1296         struct scst_cmd *cmd;
1297
1298         TRACE_ENTRY();
1299
1300 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1301         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1302 #else
1303         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1304 #endif
1305         if (cmd == NULL) {
1306                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1307                 goto out;
1308         }
1309 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1310         memset(cmd, 0, sizeof(*cmd));
1311 #endif
1312
1313         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1314         atomic_set(&cmd->cmd_ref, 1);
1315         cmd->cmd_lists = &scst_main_cmd_lists;
1316         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1317         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1318         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1319         cmd->retries = 0;
1320         cmd->data_len = -1;
1321         cmd->is_send_status = 1;
1322         cmd->resp_data_len = -1;
1323
1324 out:
1325         TRACE_EXIT();
1326         return cmd;
1327 }
1328
1329 void scst_destroy_put_cmd(struct scst_cmd *cmd)
1330 {
1331         scst_sess_put(cmd->sess);
1332
1333         /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1334         if (likely(cmd->tgt_dev != NULL))
1335                 __scst_put();
1336
1337         scst_destroy_cmd(cmd);
1338         return;
1339 }
1340
1341 /* No locks supposed to be held */
1342 void scst_free_cmd(struct scst_cmd *cmd)
1343 {
1344         int destroy = 1;
1345
1346         TRACE_ENTRY();
1347
1348         TRACE_DBG("Freeing cmd %p (tag %Lu)",
1349                   cmd, (long long unsigned int)cmd->tag);
1350
1351         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1352                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1353                         cmd, atomic_read(&scst_cmd_count));
1354         }
1355
1356         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1357                 cmd->dec_on_dev_needed);
1358
1359 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1360 #if defined(EXTRACHECKS)
1361         if (cmd->scsi_req) {
1362                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1363                         "scsi_req!");
1364                 scst_release_request(cmd);
1365         }
1366 #endif
1367 #endif
1368
1369         scst_check_restore_sg_buff(cmd);
1370
1371         if (unlikely(cmd->internal)) {
1372                 if (cmd->bufflen > 0)
1373                         scst_release_space(cmd);
1374                 scst_destroy_cmd(cmd);
1375                 goto out;
1376         }
1377
1378         if (cmd->tgtt->on_free_cmd != NULL) {
1379                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1380                 cmd->tgtt->on_free_cmd(cmd);
1381                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1382         }
1383
1384         if (likely(cmd->dev != NULL)) {
1385                 struct scst_dev_type *handler = cmd->dev->handler;
1386                 if (handler->on_free_cmd != NULL) {
1387                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1388                               handler->name, cmd);
1389                         handler->on_free_cmd(cmd);
1390                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1391                                 handler->name);
1392                 }
1393         }
1394
1395         scst_release_space(cmd);
1396
1397         if (unlikely(cmd->sense != NULL)) {
1398                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1399                 mempool_free(cmd->sense, scst_sense_mempool);
1400                 cmd->sense = NULL;
1401         }
1402
1403         if (likely(cmd->tgt_dev != NULL)) {
1404 #ifdef EXTRACHECKS
1405                 if (unlikely(!cmd->sent_to_midlev)) {
1406                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1407                              "%d, target %s, lun %Ld, sn %ld, expected_sn %ld)",
1408                              cmd, cmd->cdb[0], cmd->tgtt->name,
1409                              (long long unsigned int)cmd->lun,
1410                              cmd->sn, cmd->tgt_dev->expected_sn);
1411                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1412                 }
1413 #endif
1414
1415                 if (unlikely(cmd->out_of_sn)) {
1416                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1417                                 "destroy=%d", cmd,
1418                                 (long long unsigned int)cmd->tag,
1419                                 cmd->sn, destroy);
1420                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1421                                         &cmd->cmd_flags);
1422                 }
1423         }
1424
1425         if (likely(destroy))
1426                 scst_destroy_put_cmd(cmd);
1427
1428 out:
1429         TRACE_EXIT();
1430         return;
1431 }
1432
1433 /* No locks supposed to be held. */
1434 void scst_check_retries(struct scst_tgt *tgt)
1435 {
1436         int need_wake_up = 0;
1437
1438         TRACE_ENTRY();
1439
1440         /*
1441          * We don't worry about overflow of finished_cmds, because we check
1442          * only for its change
1443          */
1444         atomic_inc(&tgt->finished_cmds);
1445         smp_mb__after_atomic_inc();
1446         if (unlikely(tgt->retry_cmds > 0)) {
1447                 struct scst_cmd *c, *tc;
1448                 unsigned long flags;
1449
1450                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1451                       tgt->retry_cmds);
1452
1453                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1454                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1455                                 cmd_list_entry)
1456                 {
1457                         tgt->retry_cmds--;
1458
1459                         TRACE_RETRY("Moving retry cmd %p to head of active "
1460                                 "cmd list (retry_cmds left %d)", c, tgt->retry_cmds);
1461                         spin_lock(&c->cmd_lists->cmd_list_lock);
1462                         list_move(&c->cmd_list_entry, &c->cmd_lists->active_cmd_list);
1463                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1464                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1465
1466                         need_wake_up++;
1467                         if (need_wake_up >= 2) /* "slow start" */
1468                                 break;
1469                 }
1470                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1471         }
1472
1473         TRACE_EXIT();
1474         return;
1475 }
1476
1477 void scst_tgt_retry_timer_fn(unsigned long arg)
1478 {
1479         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1480         unsigned long flags;
1481
1482         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1483
1484         spin_lock_irqsave(&tgt->tgt_lock, flags);
1485         tgt->retry_timer_active = 0;
1486         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1487
1488         scst_check_retries(tgt);
1489
1490         TRACE_EXIT();
1491         return;
1492 }
1493
1494 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1495 {
1496         struct scst_mgmt_cmd *mcmd;
1497
1498         TRACE_ENTRY();
1499
1500         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1501         if (mcmd == NULL) {
1502                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1503                         "failed, some commands and their data could leak");
1504                 goto out;
1505         }
1506         memset(mcmd, 0, sizeof(*mcmd));
1507
1508 out:
1509         TRACE_EXIT();
1510         return mcmd;
1511 }
1512
1513 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1514 {
1515         unsigned long flags;
1516
1517         TRACE_ENTRY();
1518
1519         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1520         atomic_dec(&mcmd->sess->sess_cmd_count);
1521         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1522
1523         scst_sess_put(mcmd->sess);
1524
1525         if (mcmd->mcmd_tgt_dev != NULL)
1526                 __scst_put();
1527
1528         mempool_free(mcmd, scst_mgmt_mempool);
1529
1530         TRACE_EXIT();
1531         return;
1532 }
1533
1534 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1535 int scst_alloc_request(struct scst_cmd *cmd)
1536 {
1537         int res = 0;
1538         struct scsi_request *req;
1539         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1540
1541         TRACE_ENTRY();
1542
1543         /* cmd->dev->scsi_dev must be non-NULL here */
1544         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1545         if (req == NULL) {
1546                 TRACE(TRACE_OUT_OF_MEM, "%s",
1547                       "Allocation of scsi_request failed");
1548                 res = -ENOMEM;
1549                 goto out;
1550         }
1551
1552         cmd->scsi_req = req;
1553
1554         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1555         req->sr_cmd_len = cmd->cdb_len;
1556         req->sr_data_direction = cmd->data_direction;
1557         req->sr_use_sg = cmd->sg_cnt;
1558         req->sr_bufflen = cmd->bufflen;
1559         req->sr_buffer = cmd->sg;
1560         req->sr_request->rq_disk = cmd->dev->rq_disk;
1561         req->sr_sense_buffer[0] = 0;
1562
1563         cmd->scsi_req->upper_private_data = cmd;
1564
1565 out:
1566         TRACE_EXIT();
1567         return res;
1568 }
1569
1570 void scst_release_request(struct scst_cmd *cmd)
1571 {
1572         scsi_release_request(cmd->scsi_req);
1573         cmd->scsi_req = NULL;
1574 }
1575 #endif
1576
1577 int scst_alloc_space(struct scst_cmd *cmd)
1578 {
1579         int gfp_mask;
1580         int res = -ENOMEM;
1581         int atomic = scst_cmd_atomic(cmd);
1582         int flags;
1583         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1584         int bufflen = cmd->bufflen;
1585
1586         TRACE_ENTRY();
1587
1588         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1589
1590         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1591         if (cmd->no_sgv)
1592                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1593
1594         if (unlikely(cmd->bufflen == 0)) {
1595                 TRACE(TRACE_MGMT_MINOR, "Warning: data direction %d or/and "
1596                         "zero buffer length. Opcode 0x%x, handler %s, target "
1597                         "%s", cmd->data_direction, cmd->cdb[0],
1598                         cmd->dev->handler->name, cmd->tgtt->name);
1599                 /*
1600                  * Be on the safe side and alloc stub buffer. Neither target
1601                  * drivers, nor user space will touch it, since bufflen
1602                  * remains 0.
1603                  */
1604                 bufflen = PAGE_SIZE;
1605         }
1606
1607         cmd->sg = sgv_pool_alloc(tgt_dev->pool, bufflen, gfp_mask, flags,
1608                         &cmd->sg_cnt, &cmd->sgv, NULL);
1609         if (cmd->sg == NULL)
1610                 goto out;
1611
1612         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1613                 static int ll;
1614                 if (ll < 10) {
1615                         PRINT_INFO("Unable to complete command due to "
1616                                 "SG IO count limitation (requested %d, "
1617                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1618                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1619                         ll++;
1620                 }
1621                 goto out_sg_free;
1622         }
1623
1624         res = 0;
1625
1626 out:
1627         TRACE_EXIT();
1628         return res;
1629
1630 out_sg_free:
1631         sgv_pool_free(cmd->sgv);
1632         cmd->sgv = NULL;
1633         cmd->sg = NULL;
1634         cmd->sg_cnt = 0;
1635         goto out;
1636 }
1637
1638 void scst_release_space(struct scst_cmd *cmd)
1639 {
1640         TRACE_ENTRY();
1641
1642         if (cmd->sgv == NULL)
1643                 goto out;
1644
1645         if (cmd->data_buf_alloced) {
1646                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1647                 goto out;
1648         }
1649
1650         sgv_pool_free(cmd->sgv);
1651
1652         cmd->sgv = NULL;
1653         cmd->sg_cnt = 0;
1654         cmd->sg = NULL;
1655         cmd->bufflen = 0;
1656         cmd->data_len = 0;
1657
1658 out:
1659         TRACE_EXIT();
1660         return;
1661 }
1662
1663 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1664
1665 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1666 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1667
1668 int scst_get_cdb_len(const uint8_t *cdb)
1669 {
1670         return SCST_GET_CDB_LEN(cdb[0]);
1671 }
1672
1673 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1674
1675 /* for special commands */
1676 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1677 {
1678         cmd->bufflen = 6;
1679         return 0;
1680 }
1681
1682 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1683 {
1684         cmd->bufflen = READ_CAP_LEN;
1685         return 0;
1686 }
1687
1688 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1689 {
1690         cmd->bufflen = 1;
1691         return 0;
1692 }
1693
1694 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1695 {
1696         uint8_t *p = (uint8_t *)cmd->cdb + off;
1697         int res = 0;
1698
1699         cmd->bufflen = 0;
1700         cmd->bufflen |= ((u32)p[0]) << 8;
1701         cmd->bufflen |= ((u32)p[1]);
1702
1703         switch (cmd->cdb[1] & 0x1f) {
1704         case 0:
1705         case 1:
1706         case 6:
1707                 if (cmd->bufflen != 0) {
1708                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1709                                 "allocation length for service action %x",
1710                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1711                         goto out_inval;
1712                 }
1713                 break;
1714         }
1715
1716         switch (cmd->cdb[1] & 0x1f) {
1717         case 0:
1718         case 1:
1719                 cmd->bufflen = 20;
1720                 break;
1721         case 6:
1722                 cmd->bufflen = 32;
1723                 break;
1724         case 8:
1725                 cmd->bufflen = max(28, cmd->bufflen);
1726                 break;
1727         default:
1728                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1729                         cmd->cdb[1] & 0x1f);
1730                 goto out_inval;
1731         }
1732
1733 out:
1734         return res;
1735
1736 out_inval:
1737         scst_set_cmd_error(cmd,
1738                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1739         res = 1;
1740         goto out;
1741 }
1742
1743 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1744 {
1745         cmd->bufflen = (u32)cmd->cdb[off];
1746         return 0;
1747 }
1748
1749 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1750 {
1751         const uint8_t *p = cmd->cdb + off;
1752
1753         cmd->bufflen = 0;
1754         cmd->bufflen |= ((u32)p[0]) << 8;
1755         cmd->bufflen |= ((u32)p[1]);
1756
1757         return 0;
1758 }
1759
1760 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1761 {
1762         const uint8_t *p = cmd->cdb + off;
1763
1764         cmd->bufflen = 0;
1765         cmd->bufflen |= ((u32)p[0]) << 16;
1766         cmd->bufflen |= ((u32)p[1]) << 8;
1767         cmd->bufflen |= ((u32)p[2]);
1768
1769         return 0;
1770 }
1771
1772 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1773 {
1774         const uint8_t *p = cmd->cdb + off;
1775
1776         cmd->bufflen = 0;
1777         cmd->bufflen |= ((u32)p[0]) << 24;
1778         cmd->bufflen |= ((u32)p[1]) << 16;
1779         cmd->bufflen |= ((u32)p[2]) << 8;
1780         cmd->bufflen |= ((u32)p[3]);
1781
1782         return 0;
1783 }
1784
1785 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1786 {
1787         cmd->bufflen = 0;
1788         return 0;
1789 }
1790
1791 int scst_get_cdb_info(struct scst_cmd *cmd)
1792 {
1793         int dev_type = cmd->dev->handler->type;
1794         int i, res = 0;
1795         uint8_t op;
1796         const struct scst_sdbops *ptr = NULL;
1797
1798         TRACE_ENTRY();
1799
1800         op = cmd->cdb[0];       /* get clear opcode */
1801
1802         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1803                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1804                 dev_type);
1805
1806         i = scst_scsi_op_list[op];
1807         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1808                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1809                         ptr = &scst_scsi_op_table[i];
1810                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1811                               ptr->ops, ptr->devkey[0], /* disk     */
1812                               ptr->devkey[1],   /* tape     */
1813                               ptr->devkey[2],   /* printer */
1814                               ptr->devkey[3],   /* cpu      */
1815                               ptr->devkey[4],   /* cdr      */
1816                               ptr->devkey[5],   /* cdrom    */
1817                               ptr->devkey[6],   /* scanner */
1818                               ptr->devkey[7],   /* worm     */
1819                               ptr->devkey[8],   /* changer */
1820                               ptr->devkey[9],   /* commdev */
1821                               ptr->op_name);
1822                         TRACE_DBG("direction=%d flags=%d off=%d",
1823                               ptr->direction,
1824                               ptr->flags,
1825                               ptr->off);
1826                         break;
1827                 }
1828                 i++;
1829         }
1830
1831         if (ptr == NULL) {
1832                 /* opcode not found or now not used !!! */
1833                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1834                       dev_type);
1835                 res = -1;
1836                 cmd->op_flags = SCST_INFO_INVALID;
1837                 goto out;
1838         }
1839
1840         cmd->cdb_len = SCST_GET_CDB_LEN(op);
1841         cmd->op_name = ptr->op_name;
1842         cmd->data_direction = ptr->direction;
1843         cmd->op_flags = ptr->flags;
1844         res = (*ptr->get_trans_len)(cmd, ptr->off);
1845
1846 out:
1847         TRACE_EXIT();
1848         return res;
1849 }
1850 EXPORT_SYMBOL(scst_get_cdb_info);
1851
1852 /*
1853  * Routine to extract a lun number from an 8-byte LUN structure
1854  * in network byte order (BE).
1855  * (see SAM-2, Section 4.12.3 page 40)
1856  * Supports 2 types of lun unpacking: peripheral and logical unit.
1857  */
1858 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1859 {
1860         lun_t res = (lun_t)-1;
1861         int address_method;
1862
1863         TRACE_ENTRY();
1864
1865         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1866
1867         if (unlikely(len < 2)) {
1868                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1869                         "more", len);
1870                 goto out;
1871         }
1872
1873         if (len > 2) {
1874                 switch (len) {
1875                 case 8:
1876                         if ((*((uint64_t *)lun) &
1877                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1878                                 goto out_err;
1879                         break;
1880                 case 4:
1881                         if (*((uint16_t *)&lun[2]) != 0)
1882                                 goto out_err;
1883                         break;
1884                 case 6:
1885                         if (*((uint32_t *)&lun[2]) != 0)
1886                                 goto out_err;
1887                         break;
1888                 default:
1889                         goto out_err;
1890                 }
1891         }
1892
1893         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
1894         switch (address_method) {
1895         case 0: /* peripheral device addressing method */
1896 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1897                 if (*lun) {
1898                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1899                              "peripheral device addressing method 0x%02x, "
1900                              "expected 0", *lun);
1901                         break;
1902                 }
1903                 res = *(lun + 1);
1904                 break;
1905 #else
1906                 /* go through */
1907 #endif
1908
1909         case 1: /* flat space addressing method */
1910                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1911                 break;
1912
1913         case 2: /* logical unit addressing method */
1914                 if (*lun & 0x3f) {
1915                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1916                                     "addressing method 0x%02x, expected 0",
1917                                     *lun & 0x3f);
1918                         break;
1919                 }
1920                 if (*(lun + 1) & 0xe0) {
1921                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
1922                                     "addressing method 0x%02x, expected 0",
1923                                     (*(lun + 1) & 0xf8) >> 5);
1924                         break;
1925                 }
1926                 res = *(lun + 1) & 0x1f;
1927                 break;
1928
1929         case 3: /* extended logical unit addressing method */
1930         default:
1931                 PRINT_ERROR("Unimplemented LUN addressing method %u",
1932                             address_method);
1933                 break;
1934         }
1935
1936 out:
1937         TRACE_EXIT_RES((int)res);
1938         return res;
1939
1940 out_err:
1941         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
1942         goto out;
1943 }
1944
1945 int scst_calc_block_shift(int sector_size)
1946 {
1947         int block_shift = 0;
1948         int t;
1949
1950         if (sector_size == 0)
1951                 sector_size = 512;
1952
1953         t = sector_size;
1954         while (1) {
1955                 if ((t & 1) != 0)
1956                         break;
1957                 t >>= 1;
1958                 block_shift++;
1959         }
1960         if (block_shift < 9) {
1961                 PRINT_ERROR("Wrong sector size %d", sector_size);
1962                 block_shift = -1;
1963         }
1964
1965         TRACE_EXIT_RES(block_shift);
1966         return block_shift;
1967 }
1968 EXPORT_SYMBOL(scst_calc_block_shift);
1969
1970 int scst_sbc_generic_parse(struct scst_cmd *cmd,
1971         int (*get_block_shift)(struct scst_cmd *cmd))
1972 {
1973         int res = 0;
1974
1975         TRACE_ENTRY();
1976
1977         /*
1978          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
1979          * therefore change them only if necessary
1980          */
1981
1982         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
1983               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
1984
1985         switch (cmd->cdb[0]) {
1986         case SERVICE_ACTION_IN:
1987                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
1988                         cmd->bufflen = READ_CAP16_LEN;
1989                         cmd->data_direction = SCST_DATA_READ;
1990                 }
1991                 break;
1992         case VERIFY_6:
1993         case VERIFY:
1994         case VERIFY_12:
1995         case VERIFY_16:
1996                 if ((cmd->cdb[1] & BYTCHK) == 0) {
1997                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
1998                         cmd->bufflen = 0;
1999                         goto out;
2000                 } else
2001                         cmd->data_len = 0;
2002                 break;
2003         default:
2004                 /* It's all good */
2005                 break;
2006         }
2007
2008         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2009                 /*
2010                  * No need for locks here, since *_detach() can not be
2011                  * called, when there are existing commands.
2012                  */
2013                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2014         }
2015
2016 out:
2017         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2018               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2019
2020         TRACE_EXIT_RES(res);
2021         return res;
2022 }
2023 EXPORT_SYMBOL(scst_sbc_generic_parse);
2024
2025 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2026         int (*get_block_shift)(struct scst_cmd *cmd))
2027 {
2028         int res = 0;
2029
2030         TRACE_ENTRY();
2031
2032         /*
2033          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2034          * therefore change them only if necessary
2035          */
2036
2037         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2038               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2039
2040         cmd->cdb[1] &= 0x1f;
2041
2042         switch (cmd->cdb[0]) {
2043         case VERIFY_6:
2044         case VERIFY:
2045         case VERIFY_12:
2046         case VERIFY_16:
2047                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2048                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2049                         cmd->bufflen = 0;
2050                         goto out;
2051                 }
2052                 break;
2053         default:
2054                 /* It's all good */
2055                 break;
2056         }
2057
2058         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2059                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2060
2061 out:
2062         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2063                 cmd->data_direction);
2064
2065         TRACE_EXIT();
2066         return res;
2067 }
2068 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2069
2070 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2071         int (*get_block_shift)(struct scst_cmd *cmd))
2072 {
2073         int res = 0;
2074
2075         TRACE_ENTRY();
2076
2077         /*
2078          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2079          * therefore change them only if necessary
2080          */
2081
2082         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2083               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2084
2085         cmd->cdb[1] &= 0x1f;
2086
2087         switch (cmd->cdb[0]) {
2088         case VERIFY_6:
2089         case VERIFY:
2090         case VERIFY_12:
2091         case VERIFY_16:
2092                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2093                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2094                         cmd->bufflen = 0;
2095                         goto out;
2096                 }
2097                 break;
2098         default:
2099                 /* It's all good */
2100                 break;
2101         }
2102
2103         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2104                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2105
2106 out:
2107         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2108                 cmd->data_direction);
2109
2110         TRACE_EXIT_RES(res);
2111         return res;
2112 }
2113 EXPORT_SYMBOL(scst_modisk_generic_parse);
2114
2115 int scst_tape_generic_parse(struct scst_cmd *cmd,
2116         int (*get_block_size)(struct scst_cmd *cmd))
2117 {
2118         int res = 0;
2119
2120         TRACE_ENTRY();
2121
2122         /*
2123          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2124          * therefore change them only if necessary
2125          */
2126
2127         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2128               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2129
2130         if (cmd->cdb[0] == READ_POSITION) {
2131                 int tclp = cmd->cdb[1] & TCLP_BIT;
2132                 int long_bit = cmd->cdb[1] & LONG_BIT;
2133                 int bt = cmd->cdb[1] & BT_BIT;
2134
2135                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2136                         cmd->bufflen =
2137                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2138                         cmd->data_direction = SCST_DATA_READ;
2139                 } else {
2140                         cmd->bufflen = 0;
2141                         cmd->data_direction = SCST_DATA_NONE;
2142                 }
2143         }
2144
2145         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2146                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2147
2148         TRACE_EXIT_RES(res);
2149         return res;
2150 }
2151 EXPORT_SYMBOL(scst_tape_generic_parse);
2152
2153 static int scst_null_parse(struct scst_cmd *cmd)
2154 {
2155         int res = 0;
2156
2157         TRACE_ENTRY();
2158
2159         /*
2160          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2161          * therefore change them only if necessary
2162          */
2163
2164         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2165               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2166 #if 0
2167         switch (cmd->cdb[0]) {
2168         default:
2169                 /* It's all good */
2170                 break;
2171         }
2172 #endif
2173         TRACE_DBG("res %d bufflen %d direct %d",
2174               res, cmd->bufflen, cmd->data_direction);
2175
2176         TRACE_EXIT();
2177         return res;
2178 }
2179
2180 int scst_changer_generic_parse(struct scst_cmd *cmd,
2181         int (*nothing)(struct scst_cmd *cmd))
2182 {
2183         return scst_null_parse(cmd);
2184 }
2185 EXPORT_SYMBOL(scst_changer_generic_parse);
2186
2187 int scst_processor_generic_parse(struct scst_cmd *cmd,
2188         int (*nothing)(struct scst_cmd *cmd))
2189 {
2190         return scst_null_parse(cmd);
2191 }
2192 EXPORT_SYMBOL(scst_processor_generic_parse);
2193
2194 int scst_raid_generic_parse(struct scst_cmd *cmd,
2195         int (*nothing)(struct scst_cmd *cmd))
2196 {
2197         return scst_null_parse(cmd);
2198 }
2199 EXPORT_SYMBOL(scst_raid_generic_parse);
2200
2201 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2202         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2203 {
2204         int opcode = cmd->cdb[0];
2205         int status = cmd->status;
2206         int res = SCST_CMD_STATE_DEFAULT;
2207
2208         TRACE_ENTRY();
2209
2210         /*
2211          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2212          * based on cmd->status and cmd->data_direction, therefore change
2213          * them only if necessary
2214          */
2215
2216         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2217                 switch (opcode) {
2218                 case READ_CAPACITY:
2219                 {
2220                         /* Always keep track of disk capacity */
2221                         int buffer_size, sector_size, sh;
2222                         uint8_t *buffer;
2223
2224                         buffer_size = scst_get_buf_first(cmd, &buffer);
2225                         if (unlikely(buffer_size <= 0)) {
2226                                 PRINT_ERROR("%s: Unable to get the buffer "
2227                                         "(%d)", __func__, buffer_size);
2228                                 goto out;
2229                         }
2230
2231                         sector_size =
2232                             ((buffer[4] << 24) | (buffer[5] << 16) |
2233                              (buffer[6] << 8) | (buffer[7] << 0));
2234                         scst_put_buf(cmd, buffer);
2235                         if (sector_size != 0)
2236                                 sh = scst_calc_block_shift(sector_size);
2237                         else
2238                                 sh = 0;
2239                         set_block_shift(cmd, sh);
2240                         TRACE_DBG("block_shift %d", sh);
2241                         break;
2242                 }
2243                 default:
2244                         /* It's all good */
2245                         break;
2246                 }
2247         }
2248
2249         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2250               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2251
2252 out:
2253         TRACE_EXIT_RES(res);
2254         return res;
2255 }
2256 EXPORT_SYMBOL(scst_block_generic_dev_done);
2257
2258 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2259         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2260 {
2261         int opcode = cmd->cdb[0];
2262         int res = SCST_CMD_STATE_DEFAULT;
2263         int buffer_size, bs;
2264         uint8_t *buffer = NULL;
2265
2266         TRACE_ENTRY();
2267
2268         /*
2269          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2270          * based on cmd->status and cmd->data_direction, therefore change
2271          * them only if necessary
2272          */
2273
2274         switch (opcode) {
2275         case MODE_SENSE:
2276         case MODE_SELECT:
2277                 buffer_size = scst_get_buf_first(cmd, &buffer);
2278                 if (unlikely(buffer_size <= 0)) {
2279                         PRINT_ERROR("%s: Unable to get the buffer (%d)",
2280                                 __func__, buffer_size);
2281                         goto out;
2282                 }
2283                 break;
2284         }
2285
2286         switch (opcode) {
2287         case MODE_SENSE:
2288                 TRACE_DBG("%s", "MODE_SENSE");
2289                 if ((cmd->cdb[2] & 0xC0) == 0) {
2290                         if (buffer[3] == 8) {
2291                                 bs = (buffer[9] << 16) |
2292                                     (buffer[10] << 8) | buffer[11];
2293                                 set_block_size(cmd, bs);
2294                         }
2295                 }
2296                 break;
2297         case MODE_SELECT:
2298                 TRACE_DBG("%s", "MODE_SELECT");
2299                 if (buffer[3] == 8) {
2300                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2301                             (buffer[11]);
2302                         set_block_size(cmd, bs);
2303                 }
2304                 break;
2305         default:
2306                 /* It's all good */
2307                 break;
2308         }
2309
2310         switch (opcode) {
2311         case MODE_SENSE:
2312         case MODE_SELECT:
2313                 scst_put_buf(cmd, buffer);
2314                 break;
2315         }
2316
2317 out:
2318         TRACE_EXIT_RES(res);
2319         return res;
2320 }
2321 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2322
2323 static void scst_check_internal_sense(struct scst_device *dev, int result,
2324         uint8_t *sense, int sense_len)
2325 {
2326         TRACE_ENTRY();
2327
2328         if (host_byte(result) == DID_RESET) {
2329                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2330                         "reset UA");
2331                 scst_set_sense(sense, sense_len,
2332                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2333                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2334         } else if ((status_byte(result) == CHECK_CONDITION) &&
2335                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2336                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2337
2338         TRACE_EXIT();
2339         return;
2340 }
2341
2342 int scst_obtain_device_parameters(struct scst_device *dev)
2343 {
2344         int res = 0, i;
2345         uint8_t cmd[16];
2346         uint8_t buffer[4+0x0A];
2347         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2348
2349         TRACE_ENTRY();
2350
2351         sBUG_ON(in_interrupt() || in_atomic());
2352         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2353
2354         for (i = 0; i < 5; i++) {
2355                 /* Get control mode page */
2356                 memset(cmd, 0, sizeof(cmd));
2357                 cmd[0] = MODE_SENSE;
2358                 cmd[1] = 8; /* DBD */
2359                 cmd[2] = 0x0A;
2360                 cmd[4] = sizeof(buffer);
2361
2362                 memset(buffer, 0, sizeof(buffer));
2363                 memset(sense_buffer, 0, sizeof(sense_buffer));
2364
2365                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2366                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2367                            sizeof(buffer), sense_buffer, SCST_DEFAULT_TIMEOUT,
2368                             0, 0);
2369
2370                 TRACE_DBG("MODE_SENSE done: %x", res);
2371
2372                 if (scsi_status_is_good(res)) {
2373                         int q;
2374
2375                         PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode page data",
2376                                 buffer, sizeof(buffer));
2377
2378                         dev->tst = buffer[4+2] >> 5;
2379                         q = buffer[4+3] >> 4;
2380                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2381                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2382                                         "%d:%d:%d:%d", dev->queue_alg,
2383                                         dev->scsi_dev->host->host_no, dev->scsi_dev->channel,
2384                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2385                         }
2386                         dev->queue_alg = q;
2387                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2388                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2389
2390                         /*
2391                          * Unfortunately, SCSI ML doesn't provide a way to
2392                          * specify commands task attribute, so we can rely on
2393                          * device's restricted reordering only.
2394                          */
2395                         dev->has_own_order_mgmt = !dev->queue_alg;
2396
2397                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d: TST %x, "
2398                                 "QUEUE ALG %x, SWP %x, TAS %x, has_own_order_mgmt "
2399                                 "%d", dev->scsi_dev->host->host_no,
2400                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2401                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2402                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2403
2404                         goto out;
2405                 } else {
2406 #if 0 /* 3ware controller is buggy and returns CONDITION_GOOD instead of CHECK_CONDITION */
2407                         if ((status_byte(res) == CHECK_CONDITION) &&
2408 #else
2409                         if (
2410 #endif
2411                             SCST_SENSE_VALID(sense_buffer)) {
2412                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2413                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device "
2414                                                 "%d:%d:%d:%d doesn't support control "
2415                                                 "mode page, using defaults: TST "
2416                                                 "%x, QUEUE ALG %x, SWP %x, TAS %x, "
2417                                                 "has_own_order_mgmt %d",
2418                                                 dev->scsi_dev->host->host_no,
2419                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2420                                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2421                                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2422                                         res = 0;
2423                                         goto out;
2424                                 } else if (sense_buffer[2] == NOT_READY) {
2425                                         TRACE(TRACE_SCSI, "Device %d:%d:%d:%d not ready",
2426                                                 dev->scsi_dev->host->host_no,
2427                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2428                                                 dev->scsi_dev->lun);
2429                                         res = 0;
2430                                         goto out;
2431                                 }
2432                         } else {
2433                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Internal MODE SENSE to "
2434                                         "device %d:%d:%d:%d failed: %x",
2435                                         dev->scsi_dev->host->host_no,
2436                                         dev->scsi_dev->channel, dev->scsi_dev->id,
2437                                         dev->scsi_dev->lun, res);
2438                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR, "MODE SENSE "
2439                                         "sense", sense_buffer, sizeof(sense_buffer));
2440                         }
2441                         scst_check_internal_sense(dev, res, sense_buffer,
2442                                         sizeof(sense_buffer));
2443                 }
2444         }
2445         res = -ENODEV;
2446
2447 out:
2448         TRACE_EXIT_RES(res);
2449         return res;
2450 }
2451 EXPORT_SYMBOL(scst_obtain_device_parameters);
2452
2453 /* Called under dev_lock and BH off */
2454 void scst_process_reset(struct scst_device *dev,
2455         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2456         struct scst_mgmt_cmd *mcmd)
2457 {
2458         struct scst_tgt_dev *tgt_dev;
2459         struct scst_cmd *cmd, *tcmd;
2460
2461         TRACE_ENTRY();
2462
2463         /* Clear RESERVE'ation, if necessary */
2464         if (dev->dev_reserved) {
2465                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2466                                     dev_tgt_dev_list_entry) {
2467                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2468                                 "lun %Ld",
2469                                 (long long unsigned int)tgt_dev->lun);
2470                         clear_bit(SCST_TGT_DEV_RESERVED,
2471                                   &tgt_dev->tgt_dev_flags);
2472                 }
2473                 dev->dev_reserved = 0;
2474                 /*
2475                  * There is no need to send RELEASE, since the device is going
2476                  * to be resetted. Actually, since we can be in RESET TM
2477                  * function, it might be dangerous.
2478                  */
2479         }
2480
2481         dev->dev_double_ua_possible = 1;
2482         dev->dev_serialized = 1;
2483
2484         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2485                 dev_tgt_dev_list_entry) {
2486                 struct scst_session *sess = tgt_dev->sess;
2487
2488                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2489                 scst_free_all_UA(tgt_dev);
2490                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2491
2492                 spin_lock_irq(&sess->sess_list_lock);
2493
2494                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2495                 list_for_each_entry(cmd, &sess->search_cmd_list,
2496                                 search_cmd_list_entry) {
2497                         if (cmd == exclude_cmd)
2498                                 continue;
2499                         if ((cmd->tgt_dev == tgt_dev) ||
2500                             ((cmd->tgt_dev == NULL) &&
2501                              (cmd->lun == tgt_dev->lun))) {
2502                                 scst_abort_cmd(cmd, mcmd,
2503                                         (tgt_dev->sess != originator), 0);
2504                         }
2505                 }
2506                 spin_unlock_irq(&sess->sess_list_lock);
2507         }
2508
2509         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2510                                 blocked_cmd_list_entry) {
2511                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2512                         list_del(&cmd->blocked_cmd_list_entry);
2513                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2514                                 "to active cmd list", cmd);
2515                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2516                         list_add_tail(&cmd->cmd_list_entry,
2517                                 &cmd->cmd_lists->active_cmd_list);
2518                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2519                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2520                 }
2521         }
2522
2523         /* BH already off */
2524         spin_lock(&scst_temp_UA_lock);
2525         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2526                 SCST_LOAD_SENSE(scst_sense_reset_UA));
2527         scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2528                 sizeof(scst_temp_UA));
2529         spin_unlock(&scst_temp_UA_lock);
2530
2531         TRACE_EXIT();
2532         return;
2533 }
2534
2535 int scst_set_pending_UA(struct scst_cmd *cmd)
2536 {
2537         int res = 0;
2538         struct scst_tgt_dev_UA *UA_entry;
2539
2540         TRACE_ENTRY();
2541
2542         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2543
2544         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2545
2546         /* UA list could be cleared behind us, so retest */
2547         if (list_empty(&cmd->tgt_dev->UA_list)) {
2548                 TRACE_DBG("%s",
2549                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2550                 res = -1;
2551                 goto out_unlock;
2552         }
2553
2554         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2555                               UA_list_entry);
2556
2557         TRACE_DBG("next %p UA_entry %p",
2558               cmd->tgt_dev->UA_list.next, UA_entry);
2559
2560         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2561                 sizeof(UA_entry->UA_sense_buffer));
2562
2563         cmd->ua_ignore = 1;
2564
2565         list_del(&UA_entry->UA_list_entry);
2566
2567         mempool_free(UA_entry, scst_ua_mempool);
2568
2569         if (list_empty(&cmd->tgt_dev->UA_list)) {
2570                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2571                           &cmd->tgt_dev->tgt_dev_flags);
2572         }
2573
2574         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2575
2576 out:
2577         TRACE_EXIT_RES(res);
2578         return res;
2579
2580 out_unlock:
2581         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2582         goto out;
2583 }
2584
2585 /* Called under tgt_dev_lock and BH off */
2586 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2587         const uint8_t *sense, int sense_len, int head)
2588 {
2589         struct scst_tgt_dev_UA *UA_entry = NULL;
2590
2591         TRACE_ENTRY();
2592
2593         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2594         if (UA_entry == NULL) {
2595                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2596                      "allocation failed. The UNIT ATTENTION "
2597                      "on some sessions will be missed");
2598                 PRINT_BUFFER("Lost UA", sense, sense_len);
2599                 goto out;
2600         }
2601         memset(UA_entry, 0, sizeof(*UA_entry));
2602
2603         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2604                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2605         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2606
2607         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2608
2609         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2610
2611         if (head)
2612                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2613         else
2614                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2615
2616 out:
2617         TRACE_EXIT();
2618         return;
2619 }
2620
2621 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2622         const uint8_t *sense, int sense_len, int head)
2623 {
2624         int skip_UA = 0;
2625         struct scst_tgt_dev_UA *UA_entry_tmp;
2626
2627         TRACE_ENTRY();
2628
2629         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2630
2631         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2632                             UA_list_entry) {
2633                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, sense_len) == 0) {
2634                         TRACE_MGMT_DBG("%s", "UA already exists");
2635                         skip_UA = 1;
2636                         break;
2637                 }
2638         }
2639
2640         if (skip_UA == 0)
2641                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2642
2643         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2644
2645         TRACE_EXIT();
2646         return;
2647 }
2648
2649 /* Called under dev_lock and BH off */
2650 void scst_dev_check_set_local_UA(struct scst_device *dev,
2651         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2652 {
2653         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2654
2655         TRACE_ENTRY();
2656
2657         if (exclude != NULL)
2658                 exclude_tgt_dev = exclude->tgt_dev;
2659
2660         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2661                         dev_tgt_dev_list_entry) {
2662                 if (tgt_dev != exclude_tgt_dev)
2663                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2664         }
2665
2666         TRACE_EXIT();
2667         return;
2668 }
2669
2670 /* Called under dev_lock and BH off */
2671 void __scst_dev_check_set_UA(struct scst_device *dev,
2672         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2673 {
2674         TRACE_ENTRY();
2675
2676         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2677
2678         /* Check for reset UA */
2679         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2680                 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
2681                         exclude, NULL);
2682
2683         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2684
2685         TRACE_EXIT();
2686         return;
2687 }
2688
2689 /* Called under tgt_dev_lock or when tgt_dev is unused */
2690 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2691 {
2692         struct scst_tgt_dev_UA *UA_entry, *t;
2693
2694         TRACE_ENTRY();
2695
2696         list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
2697                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %Ld",
2698                                (long long unsigned int)tgt_dev->lun);
2699                 list_del(&UA_entry->UA_list_entry);
2700                 kfree(UA_entry);
2701         }
2702         INIT_LIST_HEAD(&tgt_dev->UA_list);
2703         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2704
2705         TRACE_EXIT();
2706         return;
2707 }
2708
2709 /* No locks */
2710 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2711 {
2712         struct scst_cmd *res = NULL, *cmd, *t;
2713         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2714
2715         spin_lock_irq(&tgt_dev->sn_lock);
2716
2717         if (unlikely(tgt_dev->hq_cmd_count != 0))
2718                 goto out_unlock;
2719
2720 restart:
2721         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2722                                 sn_cmd_list_entry) {
2723                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2724                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2725                 if (cmd->sn == expected_sn) {
2726                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2727                                 cmd, cmd->sn, cmd->sn_set);
2728                         tgt_dev->def_cmd_count--;
2729                         list_del(&cmd->sn_cmd_list_entry);
2730                         if (res == NULL)
2731                                 res = cmd;
2732                         else {
2733                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2734                                 TRACE_SN("Adding cmd %p to active cmd list",
2735                                         cmd);
2736                                 list_add_tail(&cmd->cmd_list_entry,
2737                                         &cmd->cmd_lists->active_cmd_list);
2738                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2739                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2740                         }
2741                 }
2742         }
2743         if (res != NULL)
2744                 goto out_unlock;
2745
2746         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2747                                 sn_cmd_list_entry) {
2748                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2749                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2750                 if (cmd->sn == expected_sn) {
2751                         atomic_t *slot = cmd->sn_slot;
2752                         /*
2753                          * !! At this point any pointer in cmd, except !!
2754                          * !! sn_slot and sn_cmd_list_entry, could be   !!
2755                          * !! already destroyed                         !!
2756                          */
2757                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2758                                  cmd,
2759                                  (long long unsigned int)cmd->tag,
2760                                  cmd->sn);
2761                         tgt_dev->def_cmd_count--;
2762                         list_del(&cmd->sn_cmd_list_entry);
2763                         spin_unlock_irq(&tgt_dev->sn_lock);
2764                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2765                                              &cmd->cmd_flags))
2766                                 scst_destroy_put_cmd(cmd);
2767                         scst_inc_expected_sn(tgt_dev, slot);
2768                         expected_sn = tgt_dev->expected_sn;
2769                         spin_lock_irq(&tgt_dev->sn_lock);
2770                         goto restart;
2771                 }
2772         }
2773
2774 out_unlock:
2775         spin_unlock_irq(&tgt_dev->sn_lock);
2776         return res;
2777 }
2778
2779 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2780         struct scst_thr_data_hdr *data,
2781         void (*free_fn) (struct scst_thr_data_hdr *data))
2782 {
2783         data->pid = current->pid;
2784         atomic_set(&data->ref, 1);
2785         EXTRACHECKS_BUG_ON(free_fn == NULL);
2786         data->free_fn = free_fn;
2787         spin_lock(&tgt_dev->thr_data_lock);
2788         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2789         spin_unlock(&tgt_dev->thr_data_lock);
2790 }
2791 EXPORT_SYMBOL(scst_add_thr_data);
2792
2793 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2794 {
2795         spin_lock(&tgt_dev->thr_data_lock);
2796         while (!list_empty(&tgt_dev->thr_data_list)) {
2797                 struct scst_thr_data_hdr *d = list_entry(
2798                                 tgt_dev->thr_data_list.next, typeof(*d),
2799                                 thr_data_list_entry);
2800                 list_del(&d->thr_data_list_entry);
2801                 spin_unlock(&tgt_dev->thr_data_lock);
2802                 scst_thr_data_put(d);
2803                 spin_lock(&tgt_dev->thr_data_lock);
2804         }
2805         spin_unlock(&tgt_dev->thr_data_lock);
2806         return;
2807 }
2808 EXPORT_SYMBOL(scst_del_all_thr_data);
2809
2810 void scst_dev_del_all_thr_data(struct scst_device *dev)
2811 {
2812         struct scst_tgt_dev *tgt_dev;
2813
2814         TRACE_ENTRY();
2815
2816         mutex_lock(&scst_mutex);
2817
2818         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2819                                 dev_tgt_dev_list_entry) {
2820                 scst_del_all_thr_data(tgt_dev);
2821         }
2822
2823         mutex_unlock(&scst_mutex);
2824
2825         TRACE_EXIT();
2826         return;
2827 }
2828 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
2829
2830 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2831 {
2832         struct scst_thr_data_hdr *res = NULL, *d;
2833
2834         spin_lock(&tgt_dev->thr_data_lock);
2835         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2836                 if (d->pid == current->pid) {
2837                         res = d;
2838                         scst_thr_data_get(res);
2839                         break;
2840                 }
2841         }
2842         spin_unlock(&tgt_dev->thr_data_lock);
2843         return res;
2844 }
2845 EXPORT_SYMBOL(scst_find_thr_data);
2846
2847 /* dev_lock supposed to be held and BH disabled */
2848 void __scst_block_dev(struct scst_device *dev)
2849 {
2850         dev->block_count++;
2851         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
2852 }
2853
2854 /* No locks */
2855 void scst_block_dev(struct scst_device *dev, int outstanding)
2856 {
2857         spin_lock_bh(&dev->dev_lock);
2858         __scst_block_dev(dev);
2859         spin_unlock_bh(&dev->dev_lock);
2860
2861         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
2862         smp_mb();
2863
2864         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
2865                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
2866         wait_event(dev->on_dev_waitQ,
2867                 atomic_read(&dev->on_dev_count) <= outstanding);
2868         TRACE_MGMT_DBG("%s", "wait_event() returned");
2869 }
2870
2871 /* No locks */
2872 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
2873 {
2874         sBUG_ON(cmd->needs_unblocking);
2875
2876         cmd->needs_unblocking = 1;
2877         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
2878                        cmd, (long long unsigned int)cmd->tag);
2879
2880         scst_block_dev(cmd->dev, outstanding);
2881 }
2882
2883 /* No locks */
2884 void scst_unblock_dev(struct scst_device *dev)
2885 {
2886         spin_lock_bh(&dev->dev_lock);
2887         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
2888                 dev->block_count-1, dev);
2889         if (--dev->block_count == 0)
2890                 scst_unblock_cmds(dev);
2891         spin_unlock_bh(&dev->dev_lock);
2892         sBUG_ON(dev->block_count < 0);
2893 }
2894
2895 /* No locks */
2896 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
2897 {
2898         scst_unblock_dev(cmd->dev);
2899         cmd->needs_unblocking = 0;
2900 }
2901
2902 /* No locks */
2903 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
2904 {
2905         int res = 0;
2906         struct scst_device *dev = cmd->dev;
2907
2908         TRACE_ENTRY();
2909
2910         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
2911
2912         atomic_inc(&dev->on_dev_count);
2913         cmd->dec_on_dev_needed = 1;
2914         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
2915
2916 #ifdef STRICT_SERIALIZING
2917         spin_lock_bh(&dev->dev_lock);
2918         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2919                 goto out_unlock;
2920         if (dev->block_count > 0) {
2921                 scst_dec_on_dev_cmd(cmd);
2922                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
2923                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
2924                 list_add_tail(&cmd->blocked_cmd_list_entry,
2925                               &dev->blocked_cmd_list);
2926                 res = 1;
2927         } else {
2928                 __scst_block_dev(dev);
2929                 cmd->inc_blocking = 1;
2930         }
2931         spin_unlock_bh(&dev->dev_lock);
2932         goto out;
2933 #else
2934 repeat:
2935         if (unlikely(dev->block_count > 0)) {
2936                 spin_lock_bh(&dev->dev_lock);
2937                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2938                         goto out_unlock;
2939                 barrier(); /* to reread block_count */
2940                 if (dev->block_count > 0) {
2941                         scst_dec_on_dev_cmd(cmd);
2942                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
2943                                 "serializing (tag %llu, dev %p)", cmd,
2944                                 (long long unsigned int)cmd->tag, dev);
2945                         list_add_tail(&cmd->blocked_cmd_list_entry,
2946                                       &dev->blocked_cmd_list);
2947                         res = 1;
2948                         spin_unlock_bh(&dev->dev_lock);
2949                         goto out;
2950                 } else {
2951                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
2952                                 "continuing");
2953                 }
2954                 spin_unlock_bh(&dev->dev_lock);
2955         }
2956         if (unlikely(dev->dev_serialized)) {
2957                 spin_lock_bh(&dev->dev_lock);
2958                 barrier(); /* to reread block_count */
2959                 if (dev->block_count == 0) {
2960                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
2961                                 "cmds due to serializing (dev %p)", cmd,
2962                                 (long long unsigned int)cmd->tag, dev);
2963                         __scst_block_dev(dev);
2964                         cmd->inc_blocking = 1;
2965                 } else {
2966                         spin_unlock_bh(&dev->dev_lock);
2967                         TRACE_MGMT_DBG("Somebody blocked the device, "
2968                                 "repeating (count %d)", dev->block_count);
2969                         goto repeat;
2970                 }
2971                 spin_unlock_bh(&dev->dev_lock);
2972         }
2973 #endif
2974
2975 out:
2976         TRACE_EXIT_RES(res);
2977         return res;
2978
2979 out_unlock:
2980         spin_unlock_bh(&dev->dev_lock);
2981         goto out;
2982 }
2983
2984 /* Called under dev_lock */
2985 void scst_unblock_cmds(struct scst_device *dev)
2986 {
2987 #ifdef STRICT_SERIALIZING
2988         struct scst_cmd *cmd, *t;
2989         unsigned long flags;
2990
2991         TRACE_ENTRY();
2992
2993         local_irq_save(flags);
2994         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
2995                                  blocked_cmd_list_entry) {
2996                 int brk = 0;
2997                 /*
2998                  * Since only one cmd per time is being executed, expected_sn
2999                  * can't change behind us, if the corresponding cmd is in
3000                  * blocked_cmd_list, but we could be called before
3001                  * scst_inc_expected_sn().
3002                  */
3003                 if (likely(!cmd->internal && !cmd->retry)) {
3004                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
3005                         if (cmd->tgt_dev == NULL)
3006                                 sBUG();
3007                         expected_sn = cmd->tgt_dev->expected_sn;
3008                         if (cmd->sn == expected_sn)
3009                                 brk = 1;
3010                         else if (cmd->sn != (expected_sn+1))
3011                                 continue;
3012                 }
3013
3014                 list_del(&cmd->blocked_cmd_list_entry);
3015                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3016                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3017                 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
3018                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3019                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3020                 if (brk)
3021                         break;
3022         }
3023         local_irq_restore(flags);
3024 #else /* STRICT_SERIALIZING */
3025         struct scst_cmd *cmd, *tcmd;
3026         unsigned long flags;
3027
3028         TRACE_ENTRY();
3029
3030         local_irq_save(flags);
3031         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3032                                  blocked_cmd_list_entry) {
3033                 list_del(&cmd->blocked_cmd_list_entry);
3034                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3035                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3036                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3037                         list_add(&cmd->cmd_list_entry,
3038                                 &cmd->cmd_lists->active_cmd_list);
3039                 else
3040                         list_add_tail(&cmd->cmd_list_entry,
3041                                 &cmd->cmd_lists->active_cmd_list);
3042                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3043                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3044         }
3045         local_irq_restore(flags);
3046 #endif /* STRICT_SERIALIZING */
3047
3048         TRACE_EXIT();
3049         return;
3050 }
3051
3052 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3053         struct scst_cmd *out_of_sn_cmd)
3054 {
3055         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3056
3057         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3058                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3059                 scst_make_deferred_commands_active(tgt_dev, out_of_sn_cmd);
3060         } else {
3061                 out_of_sn_cmd->out_of_sn = 1;
3062                 spin_lock_irq(&tgt_dev->sn_lock);
3063                 tgt_dev->def_cmd_count++;
3064                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3065                               &tgt_dev->skipped_sn_list);
3066                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list "
3067                         "(expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3068                         tgt_dev->expected_sn);
3069                 spin_unlock_irq(&tgt_dev->sn_lock);
3070         }
3071
3072         return;
3073 }
3074
3075 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3076         struct scst_cmd *out_of_sn_cmd)
3077 {
3078         TRACE_ENTRY();
3079
3080         if (!out_of_sn_cmd->sn_set) {
3081                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3082                 goto out;
3083         }
3084
3085         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3086
3087 out:
3088         TRACE_EXIT();
3089         return;
3090 }
3091
3092 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3093 {
3094         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3095
3096         TRACE_ENTRY();
3097
3098         if (!cmd->hq_cmd_inced)
3099                 goto out;
3100
3101         spin_lock_irq(&tgt_dev->sn_lock);
3102         tgt_dev->hq_cmd_count--;
3103         spin_unlock_irq(&tgt_dev->sn_lock);
3104
3105         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3106
3107         /*
3108          * There is no problem in checking hq_cmd_count in the
3109          * non-locked state. In the worst case we will only have
3110          * unneeded run of the deferred commands.
3111          */
3112         if (tgt_dev->hq_cmd_count == 0)
3113                 scst_make_deferred_commands_active(tgt_dev, cmd);
3114
3115 out:
3116         TRACE_EXIT();
3117         return;
3118 }
3119
3120 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3121 {
3122         TRACE_ENTRY();
3123
3124         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3125                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3126                 atomic_read(&scst_cmd_count));
3127
3128         scst_done_cmd_mgmt(cmd);
3129
3130         smp_rmb();
3131         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3132                 if (cmd->completed) {
3133                         /* It's completed and it's OK to return its result */
3134                         goto out;
3135                 }
3136
3137                 if (cmd->dev->tas) {
3138                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3139                                 "(tag %llu), returning TASK ABORTED ", cmd,
3140                                 (long long unsigned int)cmd->tag);
3141                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3142                 } else {
3143                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3144                                 "(tag %llu), aborting without delivery or "
3145                                 "notification",
3146                                 cmd, (long long unsigned int)cmd->tag);
3147                         /*
3148                          * There is no need to check/requeue possible UA,
3149                          * because, if it exists, it will be delivered
3150                          * by the "completed" branch above.
3151                          */
3152                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3153                 }
3154         }
3155
3156 out:
3157         TRACE_EXIT();
3158         return;
3159 }
3160
3161 void __init scst_scsi_op_list_init(void)
3162 {
3163         int i;
3164         uint8_t op = 0xff;
3165
3166         TRACE_ENTRY();
3167
3168         for (i = 0; i < 256; i++)
3169                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3170
3171         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3172                 if (scst_scsi_op_table[i].ops != op) {
3173                         op = scst_scsi_op_table[i].ops;
3174                         scst_scsi_op_list[op] = i;
3175                 }
3176         }
3177
3178         TRACE_EXIT();
3179         return;
3180 }
3181
3182 #ifdef DEBUG
3183 /* Original taken from the XFS code */
3184 unsigned long scst_random(void)
3185 {
3186         static int Inited;
3187         static unsigned long RandomValue;
3188         static DEFINE_SPINLOCK(lock);
3189         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3190         register long rv;
3191         register long lo;
3192         register long hi;
3193         unsigned long flags;
3194
3195         spin_lock_irqsave(&lock, flags);
3196         if (!Inited) {
3197                 RandomValue = jiffies;
3198                 Inited = 1;
3199         }
3200         rv = RandomValue;
3201         hi = rv / 127773;
3202         lo = rv % 127773;
3203         rv = 16807 * lo - 2836 * hi;
3204         if (rv <= 0)
3205                 rv += 2147483647;
3206         RandomValue = rv;
3207         spin_unlock_irqrestore(&lock, flags);
3208         return rv;
3209 }
3210 EXPORT_SYMBOL(scst_random);
3211 #endif
3212
3213 #ifdef DEBUG_TM
3214
3215 #define TM_DBG_STATE_ABORT              0
3216 #define TM_DBG_STATE_RESET              1
3217 #define TM_DBG_STATE_OFFLINE            2
3218
3219 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3220
3221 static void tm_dbg_timer_fn(unsigned long arg);
3222
3223 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3224 /* All serialized by scst_tm_dbg_lock */
3225 struct {
3226         unsigned int tm_dbg_release:1;
3227         unsigned int tm_dbg_blocked:1;
3228 } tm_dbg_flags;
3229 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3230 static int tm_dbg_delayed_cmds_count;
3231 static int tm_dbg_passed_cmds_count;
3232 static int tm_dbg_state;
3233 static int tm_dbg_on_state_passes;
3234 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3235 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3236
3237 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3238
3239 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3240         struct scst_acg_dev *acg_dev)
3241 {
3242         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3243                 unsigned long flags;
3244                 /* Do TM debugging only for LUN 0 */
3245                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3246                 tm_dbg_p_cmd_list_waitQ =
3247                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3248                 tm_dbg_state = INIT_TM_DBG_STATE;
3249                 tm_dbg_on_state_passes =
3250                         tm_dbg_on_state_num_passes[tm_dbg_state];
3251                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3252                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3253                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3254                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3255         }
3256 }
3257
3258 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3259 {
3260         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3261                 unsigned long flags;
3262                 del_timer_sync(&tm_dbg_timer);
3263                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3264                 tm_dbg_p_cmd_list_waitQ = NULL;
3265                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3266         }
3267 }
3268
3269 static void tm_dbg_timer_fn(unsigned long arg)
3270 {
3271         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3272         tm_dbg_flags.tm_dbg_release = 1;
3273         smp_wmb();
3274         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3275 }
3276
3277 /* Called under scst_tm_dbg_lock and IRQs off */
3278 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3279 {
3280         switch (tm_dbg_state) {
3281         case TM_DBG_STATE_ABORT:
3282                 if (tm_dbg_delayed_cmds_count == 0) {
3283                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3284                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu) "
3285                                 "for %ld.%ld seconds (%ld HZ), "
3286                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3287                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3288                         mod_timer(&tm_dbg_timer, jiffies + d);
3289 #if 0
3290                         tm_dbg_flags.tm_dbg_blocked = 1;
3291 #endif
3292                 } else {
3293                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3294                                 "(tag %llu), delayed_cmds_count=%d, "
3295                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3296                                 tm_dbg_delayed_cmds_count,
3297                                 tm_dbg_on_state_passes);
3298                         if (tm_dbg_delayed_cmds_count == 2)
3299                                 tm_dbg_flags.tm_dbg_blocked = 0;
3300                 }
3301                 break;
3302
3303         case TM_DBG_STATE_RESET:
3304         case TM_DBG_STATE_OFFLINE:
3305                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3306                         "(tag %llu), delayed_cmds_count=%d, "
3307                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3308                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3309                 tm_dbg_flags.tm_dbg_blocked = 1;
3310                 break;
3311
3312         default:
3313                 sBUG();
3314         }
3315         /* IRQs already off */
3316         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3317         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3318         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3319         cmd->tm_dbg_delayed = 1;
3320         tm_dbg_delayed_cmds_count++;
3321         return;
3322 }
3323
3324 /* No locks */
3325 void tm_dbg_check_released_cmds(void)
3326 {
3327         if (tm_dbg_flags.tm_dbg_release) {
3328                 struct scst_cmd *cmd, *tc;
3329                 spin_lock_irq(&scst_tm_dbg_lock);
3330                 list_for_each_entry_safe_reverse(cmd, tc,
3331                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3332                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3333                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3334                                 tm_dbg_delayed_cmds_count);
3335                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3336                         list_move(&cmd->cmd_list_entry,
3337                                 &cmd->cmd_lists->active_cmd_list);
3338                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3339                 }
3340                 tm_dbg_flags.tm_dbg_release = 0;
3341                 spin_unlock_irq(&scst_tm_dbg_lock);
3342         }
3343 }
3344
3345 /* Called under scst_tm_dbg_lock */
3346 static void tm_dbg_change_state(void)
3347 {
3348         tm_dbg_flags.tm_dbg_blocked = 0;
3349         if (--tm_dbg_on_state_passes == 0) {
3350                 switch (tm_dbg_state) {
3351                 case TM_DBG_STATE_ABORT:
3352                         TRACE_MGMT_DBG("%s", "Changing "
3353                             "tm_dbg_state to RESET");
3354                         tm_dbg_state =
3355                                 TM_DBG_STATE_RESET;
3356                         tm_dbg_flags.tm_dbg_blocked = 0;
3357                         break;
3358                 case TM_DBG_STATE_RESET:
3359                 case TM_DBG_STATE_OFFLINE:
3360                         if (TM_DBG_GO_OFFLINE) {
3361                             TRACE_MGMT_DBG("%s", "Changing "
3362                                     "tm_dbg_state to OFFLINE");
3363                             tm_dbg_state =
3364                                 TM_DBG_STATE_OFFLINE;
3365                         } else {
3366                             TRACE_MGMT_DBG("%s", "Changing "
3367                                     "tm_dbg_state to ABORT");
3368                             tm_dbg_state =
3369                                 TM_DBG_STATE_ABORT;
3370                         }
3371                         break;
3372                 default:
3373                         sBUG();
3374                 }
3375                 tm_dbg_on_state_passes =
3376                     tm_dbg_on_state_num_passes[tm_dbg_state];
3377         }
3378
3379         TRACE_MGMT_DBG("%s", "Deleting timer");
3380         del_timer(&tm_dbg_timer);
3381 }
3382
3383 /* No locks */
3384 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3385 {
3386         int res = 0;
3387         unsigned long flags;
3388
3389         if (cmd->tm_dbg_immut)
3390                 goto out;
3391
3392         if (cmd->tm_dbg_delayed) {
3393                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3394                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3395                         "delayed_cmds_count=%d", cmd, cmd->tag,
3396                         tm_dbg_delayed_cmds_count);
3397
3398                 cmd->tm_dbg_immut = 1;
3399                 tm_dbg_delayed_cmds_count--;
3400                 if ((tm_dbg_delayed_cmds_count == 0) &&
3401                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3402                         tm_dbg_change_state();
3403                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3404         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3405                                         &cmd->tgt_dev->tgt_dev_flags)) {
3406                 /* Delay 50th command */
3407                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3408                 if (tm_dbg_flags.tm_dbg_blocked ||
3409                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3410                         tm_dbg_delay_cmd(cmd);
3411                         res = 1;
3412                 } else
3413                         cmd->tm_dbg_immut = 1;
3414                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3415         }
3416
3417 out:
3418         return res;
3419 }
3420
3421 /* No locks */
3422 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3423 {
3424         struct scst_cmd *c;
3425         unsigned long flags;
3426
3427         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3428         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3429                                 cmd_list_entry) {
3430                 if (c == cmd) {
3431                         TRACE_MGMT_DBG("Abort request for "
3432                                 "delayed cmd %p (tag=%llu), moving it to "
3433                                 "active cmd list (delayed_cmds_count=%d)",
3434                                 c, c->tag, tm_dbg_delayed_cmds_count);
3435
3436                         if (!test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3437                                 /* Test how completed commands handled */
3438                                 if (((scst_random() % 10) == 5)) {
3439                                         scst_set_cmd_error(cmd,
3440                                            SCST_LOAD_SENSE(scst_sense_hardw_error));
3441                                         /* It's completed now */
3442                                 }
3443                         }
3444
3445                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3446                         list_move(&c->cmd_list_entry,
3447                                 &c->cmd_lists->active_cmd_list);
3448                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3449                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3450                         break;
3451                 }
3452         }
3453         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3454 }
3455
3456 /* Might be called under scst_mutex */
3457 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3458 {
3459         unsigned long flags;
3460
3461         if (dev != NULL) {
3462                 struct scst_tgt_dev *tgt_dev;
3463                 bool found = 0;
3464
3465                 spin_lock_bh(&dev->dev_lock);
3466                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3467                                             dev_tgt_dev_list_entry) {
3468                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3469                                         &tgt_dev->tgt_dev_flags)) {
3470                                 found = 1;
3471                                 break;
3472                         }
3473                 }
3474                 spin_unlock_bh(&dev->dev_lock);
3475
3476                 if (!found)
3477                         goto out;
3478         }
3479
3480         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3481         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3482                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3483                         tm_dbg_delayed_cmds_count);
3484                 tm_dbg_change_state();
3485                 tm_dbg_flags.tm_dbg_release = 1;
3486                 smp_wmb();
3487                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3488                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3489         } else {
3490                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3491         }
3492         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3493
3494 out:
3495         return;
3496 }
3497
3498 int tm_dbg_is_release(void)
3499 {
3500         return tm_dbg_flags.tm_dbg_release;
3501 }
3502 #endif /* DEBUG_TM */
3503
3504 #ifdef DEBUG_SN
3505 void scst_check_debug_sn(struct scst_cmd *cmd)
3506 {
3507         static DEFINE_SPINLOCK(lock);
3508         static int type;
3509         static int cnt;
3510         unsigned long flags;
3511         int old = cmd->queue_type;
3512
3513         spin_lock_irqsave(&lock, flags);
3514
3515         if (cnt == 0) {
3516                 if ((scst_random() % 1000) == 500) {
3517                         if ((scst_random() % 3) == 1)
3518                                 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3519                         else
3520                                 type = SCST_CMD_QUEUE_ORDERED;
3521                         do {
3522                                 cnt = scst_random() % 10;
3523                         } while (cnt == 0);
3524                 } else
3525                         goto out_unlock;
3526         }
3527
3528         cmd->queue_type = type;
3529         cnt--;
3530
3531         if (((scst_random() % 1000) == 750))
3532                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3533         else if (((scst_random() % 1000) == 751))
3534                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3535         else if (((scst_random() % 1000) == 752))
3536                 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3537
3538         TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3539                 cmd->queue_type, cnt);
3540
3541 out_unlock:
3542         spin_unlock_irqrestore(&lock, flags);
3543         return;
3544 }
3545 #endif /* DEBUG_SN */