Patch from Bart Van Assche <bart.vanassche@gmail.com>:
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/kthread.h>
26 #include <linux/cdrom.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29
30 #ifdef SCST_HIGHMEM
31 #include <linux/highmem.h>
32 #endif
33
34 #include "scst.h"
35 #include "scst_priv.h"
36 #include "scst_mem.h"
37
38 #include "scst_cdbprobe.h"
39
40 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
41 static void scst_check_internal_sense(struct scst_device *dev, int result,
42         uint8_t *sense, int sense_len);
43
44 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
45 {
46         int res = 0;
47         unsigned long gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
48
49         TRACE_ENTRY();
50
51         sBUG_ON(cmd->sense != NULL);
52
53         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
54         if (cmd->sense == NULL) {
55                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
56                         "The sense data will be lost!!", cmd->cdb[0]);
57                 res = -ENOMEM;
58                 goto out;
59         }
60
61         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
62
63 out:
64         TRACE_EXIT_RES(res);
65         return res;
66 }
67
68 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
69         const uint8_t *sense, unsigned int len)
70 {
71         int res;
72
73         TRACE_ENTRY();
74
75         res = scst_alloc_sense(cmd, atomic);
76         if (res != 0) {
77                 PRINT_BUFFER("Lost sense", sense, len);
78                 goto out;
79         }
80
81         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
82         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
83
84 out:
85         TRACE_EXIT_RES(res);
86         return res;
87 }
88
89 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
90 {
91         TRACE_ENTRY();
92
93         cmd->status = status;
94         cmd->host_status = DID_OK;
95
96         cmd->data_direction = SCST_DATA_NONE;
97         cmd->is_send_status = 1;
98         cmd->resp_data_len = 0;
99
100         cmd->completed = 1;
101
102         TRACE_EXIT();
103         return;
104 }
105
106 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
107 {
108         int rc;
109
110         TRACE_ENTRY();
111
112         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
113
114         rc = scst_alloc_sense(cmd, 1);
115         if (rc != 0) {
116                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
117                         key, asc, ascq);
118                 goto out;
119         }
120
121         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
122         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
123
124 out:
125         TRACE_EXIT();
126         return;
127 }
128
129 void scst_set_sense(uint8_t *buffer, int len, int key,
130         int asc, int ascq)
131 {
132         memset(buffer, 0, len);
133         buffer[0] = 0x70;       /* Error Code                   */
134         buffer[2] = key;        /* Sense Key                    */
135         buffer[7] = 0x0a;       /* Additional Sense Length      */
136         buffer[12] = asc;       /* ASC                          */
137         buffer[13] = ascq;      /* ASCQ                         */
138         TRACE_BUFFER("Sense set", buffer, len);
139         return;
140 }
141
142 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
143         unsigned int len)
144 {
145         TRACE_ENTRY();
146
147         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
148         scst_alloc_set_sense(cmd, 1, sense, len);
149
150         TRACE_EXIT();
151         return;
152 }
153
154 void scst_set_busy(struct scst_cmd *cmd)
155 {
156         int c = atomic_read(&cmd->sess->sess_cmd_count);
157
158         TRACE_ENTRY();
159
160         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
161                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
162                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
163                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
164                         cmd->sess->initiator_name, c,
165                         cmd->queue_type, cmd->sess->init_phase);
166         } else {
167                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
168                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
169                         "initiator %s (cmds count %d, queue_type %x, "
170                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
171                         cmd->queue_type, cmd->sess->init_phase);
172         }
173
174         TRACE_EXIT();
175         return;
176 }
177
178 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
179 {
180         int i, l;
181
182         TRACE_ENTRY();
183
184         scst_check_restore_sg_buff(cmd);
185         cmd->resp_data_len = resp_data_len;
186
187         if (resp_data_len == cmd->bufflen)
188                 goto out;
189
190         l = 0;
191         for (i = 0; i < cmd->sg_cnt; i++) {
192                 l += cmd->sg[i].length;
193                 if (l >= resp_data_len) {
194                         int left = resp_data_len - (l - cmd->sg[i].length);
195 #ifdef DEBUG
196                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
197                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
198                                 "left %d",
199                                 cmd, (long long unsigned int)cmd->tag,
200                                 resp_data_len, i,
201                                 cmd->sg[i].length, left);
202 #endif
203                         cmd->orig_sg_cnt = cmd->sg_cnt;
204                         cmd->orig_sg_entry = i;
205                         cmd->orig_entry_len = cmd->sg[i].length;
206                         cmd->sg_cnt = (left > 0) ? i+1 : i;
207                         cmd->sg[i].length = left;
208                         cmd->sg_buff_modified = 1;
209                         break;
210                 }
211         }
212
213 out:
214         TRACE_EXIT();
215         return;
216 }
217
218 /* Called under scst_mutex and suspended activity */
219 int scst_alloc_device(int gfp_mask, struct scst_device **out_dev)
220 {
221         struct scst_device *dev;
222         int res = 0;
223         static int dev_num; /* protected by scst_mutex */
224
225         TRACE_ENTRY();
226
227         dev = kzalloc(sizeof(*dev), gfp_mask);
228         if (dev == NULL) {
229                 TRACE(TRACE_OUT_OF_MEM, "%s",
230                         "Allocation of scst_device failed");
231                 res = -ENOMEM;
232                 goto out;
233         }
234
235         dev->handler = &scst_null_devtype;
236         dev->p_cmd_lists = &scst_main_cmd_lists;
237         atomic_set(&dev->dev_cmd_count, 0);
238         atomic_set(&dev->write_cmd_count, 0);
239         spin_lock_init(&dev->dev_lock);
240         atomic_set(&dev->on_dev_count, 0);
241         INIT_LIST_HEAD(&dev->blocked_cmd_list);
242         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
243         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
244         INIT_LIST_HEAD(&dev->threads_list);
245         init_waitqueue_head(&dev->on_dev_waitQ);
246         dev->dev_double_ua_possible = 1;
247         dev->dev_serialized = 1;
248         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
249         dev->dev_num = dev_num++;
250
251         *out_dev = dev;
252
253 out:
254         TRACE_EXIT_RES(res);
255         return res;
256 }
257
258 /* Called under scst_mutex and suspended activity */
259 void scst_free_device(struct scst_device *dev)
260 {
261         TRACE_ENTRY();
262
263 #ifdef EXTRACHECKS
264         if (!list_empty(&dev->dev_tgt_dev_list) ||
265             !list_empty(&dev->dev_acg_dev_list)) {
266                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
267                         "is not empty!", __func__);
268                 sBUG();
269         }
270 #endif
271
272         kfree(dev);
273
274         TRACE_EXIT();
275         return;
276 }
277
278 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
279         struct scst_device *dev, lun_t lun)
280 {
281         struct scst_acg_dev *res;
282
283         TRACE_ENTRY();
284
285 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
286         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
287 #else
288         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
289 #endif
290         if (res == NULL) {
291                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
292                 goto out;
293         }
294 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
295         memset(res, 0, sizeof(*res));
296 #endif
297
298         res->dev = dev;
299         res->acg = acg;
300         res->lun = lun;
301
302 out:
303         TRACE_EXIT_HRES(res);
304         return res;
305 }
306
307 /* The activity supposed to be suspended and scst_mutex held */
308 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
309 {
310         TRACE_ENTRY();
311
312         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
313                 acg_dev);
314         list_del(&acg_dev->acg_dev_list_entry);
315         list_del(&acg_dev->dev_acg_dev_list_entry);
316
317         kmem_cache_free(scst_acgd_cachep, acg_dev);
318
319         TRACE_EXIT();
320         return;
321 }
322
323 /* The activity supposed to be suspended and scst_mutex held */
324 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
325 {
326         struct scst_acg *acg;
327
328         TRACE_ENTRY();
329
330         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
331         if (acg == NULL) {
332                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
333                 goto out;
334         }
335
336         INIT_LIST_HEAD(&acg->acg_dev_list);
337         INIT_LIST_HEAD(&acg->acg_sess_list);
338         INIT_LIST_HEAD(&acg->acn_list);
339         acg->acg_name = acg_name;
340
341         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
342         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
343
344 out:
345         TRACE_EXIT_HRES(acg);
346         return acg;
347 }
348
349 /* The activity supposed to be suspended and scst_mutex held */
350 int scst_destroy_acg(struct scst_acg *acg)
351 {
352         struct scst_acn *n, *nn;
353         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
354         int res = 0;
355
356         TRACE_ENTRY();
357
358         if (!list_empty(&acg->acg_sess_list)) {
359                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
360                 res = -EBUSY;
361                 goto out;
362         }
363
364         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
365         list_del(&acg->scst_acg_list_entry);
366
367         /* Freeing acg_devs */
368         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
369                         acg_dev_list_entry) {
370                 struct scst_tgt_dev *tgt_dev, *tt;
371                 list_for_each_entry_safe(tgt_dev, tt,
372                                  &acg_dev->dev->dev_tgt_dev_list,
373                                  dev_tgt_dev_list_entry) {
374                         if (tgt_dev->acg_dev == acg_dev)
375                                 scst_free_tgt_dev(tgt_dev);
376                 }
377                 scst_free_acg_dev(acg_dev);
378         }
379
380         /* Freeing names */
381         list_for_each_entry_safe(n, nn, &acg->acn_list,
382                         acn_list_entry) {
383                 list_del(&n->acn_list_entry);
384                 kfree(n->name);
385                 kfree(n);
386         }
387         INIT_LIST_HEAD(&acg->acn_list);
388
389         kfree(acg);
390 out:
391         TRACE_EXIT_RES(res);
392         return res;
393 }
394
395 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
396 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
397         struct scst_acg_dev *acg_dev)
398 {
399         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
400         struct scst_tgt_dev *tgt_dev;
401         struct scst_device *dev = acg_dev->dev;
402         struct list_head *sess_tgt_dev_list_head;
403         struct scst_tgt_template *vtt = sess->tgt->tgtt;
404         int rc, i;
405
406         TRACE_ENTRY();
407
408 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
409         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
410 #else
411         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
412 #endif
413         if (tgt_dev == NULL) {
414                 TRACE(TRACE_OUT_OF_MEM, "%s",
415                       "Allocation of scst_tgt_dev failed");
416                 goto out;
417         }
418 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
419         memset(tgt_dev, 0, sizeof(*tgt_dev));
420 #endif
421
422         tgt_dev->dev = dev;
423         tgt_dev->lun = acg_dev->lun;
424         tgt_dev->acg_dev = acg_dev;
425         tgt_dev->sess = sess;
426         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
427
428         scst_sgv_pool_use_norm(tgt_dev);
429
430         if (dev->scsi_dev != NULL) {
431                 ini_sg = dev->scsi_dev->host->sg_tablesize;
432                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
433                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
434                                 ENABLE_CLUSTERING);
435         } else {
436                 ini_sg = (1 << 15) /* infinite */;
437                 ini_unchecked_isa_dma = 0;
438                 ini_use_clustering = 0;
439         }
440         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
441
442         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
443             !sess->tgt->tgtt->no_clustering)
444                 scst_sgv_pool_use_norm_clust(tgt_dev);
445
446         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
447                 scst_sgv_pool_use_dma(tgt_dev);
448         } else {
449 #ifdef SCST_HIGHMEM
450                 scst_sgv_pool_use_highmem(tgt_dev);
451 #endif
452         }
453
454         if (dev->scsi_dev != NULL) {
455                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
456                       "SCST lun=%Ld", dev->scsi_dev->host->host_no,
457                       dev->scsi_dev->channel, dev->scsi_dev->id,
458                       dev->scsi_dev->lun,
459                       (long long unsigned int)tgt_dev->lun);
460         } else {
461                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%Ld",
462                                dev->virt_name,
463                                (long long unsigned int)tgt_dev->lun);
464         }
465
466         spin_lock_init(&tgt_dev->tgt_dev_lock);
467         INIT_LIST_HEAD(&tgt_dev->UA_list);
468         spin_lock_init(&tgt_dev->thr_data_lock);
469         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
470         spin_lock_init(&tgt_dev->sn_lock);
471         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
472         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
473         tgt_dev->expected_sn = 1;
474         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
475         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
476         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
477                 atomic_set(&tgt_dev->sn_slots[i], 0);
478
479         if (dev->handler->parse_atomic &&
480             (sess->tgt->tgtt->preprocessing_done == NULL)) {
481                 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
482                     (sess->tgt->tgtt->rdy_to_xfer == NULL))
483                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
484                                 &tgt_dev->tgt_dev_flags);
485                 if (dev->handler->exec_atomic || (dev->handler->exec == NULL))
486                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
487                                 &tgt_dev->tgt_dev_flags);
488         }
489         if (dev->handler->exec_atomic || (dev->handler->exec == NULL)) {
490                 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
491                     (sess->tgt->tgtt->rdy_to_xfer == NULL))
492                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
493                                 &tgt_dev->tgt_dev_flags);
494                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
495                                 &tgt_dev->tgt_dev_flags);
496                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
497                         &tgt_dev->tgt_dev_flags);
498         }
499         if ((dev->handler->dev_done_atomic ||
500              (dev->handler->dev_done == NULL)) &&
501             sess->tgt->tgtt->xmit_response_atomic) {
502                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
503                         &tgt_dev->tgt_dev_flags);
504         }
505
506         spin_lock_bh(&scst_temp_UA_lock);
507         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
508                 SCST_LOAD_SENSE(scst_sense_reset_UA));
509         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
510         spin_unlock_bh(&scst_temp_UA_lock);
511
512         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
513
514         if (vtt->threads_num > 0) {
515                 rc = 0;
516                 if (dev->handler->threads_num > 0)
517                         rc = scst_add_dev_threads(dev, vtt->threads_num);
518                 else if (dev->handler->threads_num == 0)
519                         rc = scst_add_cmd_threads(vtt->threads_num);
520                 if (rc != 0)
521                         goto out_free;
522         }
523
524         if (dev->handler && dev->handler->attach_tgt) {
525                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
526                       tgt_dev);
527                 rc = dev->handler->attach_tgt(tgt_dev);
528                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
529                 if (rc != 0) {
530                         PRINT_ERROR("Device handler's %s attach_tgt() "
531                             "failed: %d", dev->handler->name, rc);
532                         goto out_thr_free;
533                 }
534         }
535
536         spin_lock_bh(&dev->dev_lock);
537         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
538         if (dev->dev_reserved)
539                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
540         spin_unlock_bh(&dev->dev_lock);
541
542         sess_tgt_dev_list_head =
543                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
544         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, sess_tgt_dev_list_head);
545
546 out:
547         TRACE_EXIT();
548         return tgt_dev;
549
550 out_thr_free:
551         if (vtt->threads_num > 0) {
552                 if (dev->handler->threads_num > 0)
553                         scst_del_dev_threads(dev, vtt->threads_num);
554                 else if (dev->handler->threads_num == 0)
555                         scst_del_cmd_threads(vtt->threads_num);
556         }
557
558 out_free:
559         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
560         tgt_dev = NULL;
561         goto out;
562 }
563
564 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
565
566 /* No locks supposed to be held, scst_mutex - held */
567 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
568 {
569         TRACE_ENTRY();
570
571         scst_clear_reservation(tgt_dev);
572
573         /* With activity suspended the lock isn't needed, but let's be safe */
574         spin_lock_bh(&tgt_dev->tgt_dev_lock);
575         scst_free_all_UA(tgt_dev);
576         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
577
578         spin_lock_bh(&scst_temp_UA_lock);
579         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
580                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
581         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
582         spin_unlock_bh(&scst_temp_UA_lock);
583
584         TRACE_EXIT();
585         return;
586 }
587
588 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
589 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
590 {
591         struct scst_device *dev = tgt_dev->dev;
592         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
593
594         TRACE_ENTRY();
595
596         tm_dbg_deinit_tgt_dev(tgt_dev);
597
598         spin_lock_bh(&dev->dev_lock);
599         list_del(&tgt_dev->dev_tgt_dev_list_entry);
600         spin_unlock_bh(&dev->dev_lock);
601
602         list_del(&tgt_dev->sess_tgt_dev_list_entry);
603
604         scst_clear_reservation(tgt_dev);
605         scst_free_all_UA(tgt_dev);
606
607         if (dev->handler && dev->handler->detach_tgt) {
608                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
609                       tgt_dev);
610                 dev->handler->detach_tgt(tgt_dev);
611                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
612         }
613
614         if (vtt->threads_num > 0) {
615                 if (dev->handler->threads_num > 0)
616                         scst_del_dev_threads(dev, vtt->threads_num);
617                 else if (dev->handler->threads_num == 0)
618                         scst_del_cmd_threads(vtt->threads_num);
619         }
620
621         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
622
623         TRACE_EXIT();
624         return;
625 }
626
627 /* scst_mutex supposed to be held */
628 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
629 {
630         int res = 0;
631         struct scst_acg_dev *acg_dev;
632         struct scst_tgt_dev *tgt_dev;
633
634         TRACE_ENTRY();
635
636         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
637                         acg_dev_list_entry) {
638                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
639                 if (tgt_dev == NULL) {
640                         res = -ENOMEM;
641                         goto out_free;
642                 }
643         }
644
645 out:
646         TRACE_EXIT();
647         return res;
648
649 out_free:
650         scst_sess_free_tgt_devs(sess);
651         goto out;
652 }
653
654 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
655 void scst_sess_free_tgt_devs(struct scst_session *sess)
656 {
657         int i;
658         struct scst_tgt_dev *tgt_dev, *t;
659
660         TRACE_ENTRY();
661
662         /* The session is going down, no users, so no locks */
663         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
664                 struct list_head *sess_tgt_dev_list_head =
665                         &sess->sess_tgt_dev_list_hash[i];
666                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
667                                 sess_tgt_dev_list_entry) {
668                         scst_free_tgt_dev(tgt_dev);
669                 }
670                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
671         }
672
673         TRACE_EXIT();
674         return;
675 }
676
677 /* The activity supposed to be suspended and scst_mutex held */
678 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
679         int read_only)
680 {
681         int res = 0;
682         struct scst_acg_dev *acg_dev;
683         struct scst_tgt_dev *tgt_dev;
684         struct scst_session *sess;
685         LIST_HEAD(tmp_tgt_dev_list);
686
687         TRACE_ENTRY();
688
689         INIT_LIST_HEAD(&tmp_tgt_dev_list);
690
691 #ifdef EXTRACHECKS
692         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
693                 if (acg_dev->dev == dev) {
694                         PRINT_ERROR("Device is already in group %s",
695                                 acg->acg_name);
696                         res = -EINVAL;
697                         goto out;
698                 }
699         }
700 #endif
701
702         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
703         if (acg_dev == NULL) {
704                 res = -ENOMEM;
705                 goto out;
706         }
707         acg_dev->rd_only_flag = read_only;
708
709         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
710                 acg_dev);
711         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
712         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
713
714         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry)
715         {
716                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
717                 if (tgt_dev == NULL) {
718                         res = -ENOMEM;
719                         goto out_free;
720                 }
721                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
722                               &tmp_tgt_dev_list);
723         }
724
725 out:
726         if (res == 0) {
727                 if (dev->virt_name != NULL) {
728                         PRINT_INFO("Added device %s to group %s (LUN %Ld, "
729                                 "rd_only %d)", dev->virt_name, acg->acg_name,
730                                 (long long unsigned int)lun,
731                                 read_only);
732                 } else {
733                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
734                                 "%Ld, rd_only %d)", dev->scsi_dev->host->host_no,
735                                 dev->scsi_dev->channel, dev->scsi_dev->id,
736                                 dev->scsi_dev->lun, acg->acg_name,
737                                 (long long unsigned int)lun,
738                                 read_only);
739                 }
740         }
741
742         TRACE_EXIT_RES(res);
743         return res;
744
745 out_free:
746         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
747                          extra_tgt_dev_list_entry) {
748                 scst_free_tgt_dev(tgt_dev);
749         }
750         scst_free_acg_dev(acg_dev);
751         goto out;
752 }
753
754 /* The activity supposed to be suspended and scst_mutex held */
755 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
756 {
757         int res = 0;
758         struct scst_acg_dev *acg_dev = NULL, *a;
759         struct scst_tgt_dev *tgt_dev, *tt;
760
761         TRACE_ENTRY();
762
763         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
764                 if (a->dev == dev) {
765                         acg_dev = a;
766                         break;
767                 }
768         }
769
770         if (acg_dev == NULL) {
771                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
772                 res = -EINVAL;
773                 goto out;
774         }
775
776         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
777                          dev_tgt_dev_list_entry) {
778                 if (tgt_dev->acg_dev == acg_dev)
779                         scst_free_tgt_dev(tgt_dev);
780         }
781         scst_free_acg_dev(acg_dev);
782
783 out:
784         if (res == 0) {
785                 if (dev->virt_name != NULL) {
786                         PRINT_INFO("Removed device %s from group %s",
787                                 dev->virt_name, acg->acg_name);
788                 } else {
789                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
790                                 dev->scsi_dev->host->host_no,
791                                 dev->scsi_dev->channel, dev->scsi_dev->id,
792                                 dev->scsi_dev->lun, acg->acg_name);
793                 }
794         }
795
796         TRACE_EXIT_RES(res);
797         return res;
798 }
799
800 /* scst_mutex supposed to be held */
801 int scst_acg_add_name(struct scst_acg *acg, const char *name)
802 {
803         int res = 0;
804         struct scst_acn *n;
805         int len;
806         char *nm;
807
808         TRACE_ENTRY();
809
810         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
811         {
812                 if (strcmp(n->name, name) == 0) {
813                         PRINT_ERROR("Name %s already exists in group %s",
814                                 name, acg->acg_name);
815                         res = -EINVAL;
816                         goto out;
817                 }
818         }
819
820         n = kmalloc(sizeof(*n), GFP_KERNEL);
821         if (n == NULL) {
822                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
823                 res = -ENOMEM;
824                 goto out;
825         }
826
827         len = strlen(name);
828         nm = kmalloc(len + 1, GFP_KERNEL);
829         if (nm == NULL) {
830                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
831                 res = -ENOMEM;
832                 goto out_free;
833         }
834
835         strcpy(nm, name);
836         n->name = nm;
837
838         list_add_tail(&n->acn_list_entry, &acg->acn_list);
839
840 out:
841         if (res == 0)
842                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
843
844         TRACE_EXIT_RES(res);
845         return res;
846
847 out_free:
848         kfree(n);
849         goto out;
850 }
851
852 /* scst_mutex supposed to be held */
853 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
854 {
855         int res = -EINVAL;
856         struct scst_acn *n;
857
858         TRACE_ENTRY();
859
860         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
861         {
862                 if (strcmp(n->name, name) == 0) {
863                         list_del(&n->acn_list_entry);
864                         kfree(n->name);
865                         kfree(n);
866                         res = 0;
867                         break;
868                 }
869         }
870
871         if (res == 0) {
872                 PRINT_INFO("Removed name %s from group %s", name,
873                         acg->acg_name);
874         } else {
875                 PRINT_ERROR("Unable to find name %s in group %s", name,
876                         acg->acg_name);
877         }
878
879         TRACE_EXIT_RES(res);
880         return res;
881 }
882
883 struct scst_cmd *scst_create_prepare_internal_cmd(
884         struct scst_cmd *orig_cmd, int bufsize)
885 {
886         struct scst_cmd *res;
887         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
888
889         TRACE_ENTRY();
890
891         res = scst_alloc_cmd(gfp_mask);
892         if (res == NULL)
893                 goto out;
894
895         res->cmd_lists = orig_cmd->cmd_lists;
896         res->sess = orig_cmd->sess;
897         res->state = SCST_CMD_STATE_DEV_PARSE;
898         res->atomic = scst_cmd_atomic(orig_cmd);
899         res->internal = 1;
900         res->tgtt = orig_cmd->tgtt;
901         res->tgt = orig_cmd->tgt;
902         res->dev = orig_cmd->dev;
903         res->tgt_dev = orig_cmd->tgt_dev;
904         res->lun = orig_cmd->lun;
905         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
906         res->data_direction = SCST_DATA_UNKNOWN;
907         res->orig_cmd = orig_cmd;
908
909         res->bufflen = bufsize;
910
911 out:
912         TRACE_EXIT_HRES((unsigned long)res);
913         return res;
914 }
915
916 void scst_free_internal_cmd(struct scst_cmd *cmd)
917 {
918         TRACE_ENTRY();
919
920         __scst_cmd_put(cmd);
921
922         TRACE_EXIT();
923         return;
924 }
925
926 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
927 {
928         int res = SCST_CMD_STATE_RES_CONT_NEXT;
929 #define sbuf_size 252
930         static const uint8_t request_sense[6] =
931             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
932         struct scst_cmd *rs_cmd;
933
934         TRACE_ENTRY();
935
936         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
937         if (rs_cmd == NULL)
938                 goto out_error;
939
940         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
941         rs_cmd->cdb_len = sizeof(request_sense);
942         rs_cmd->data_direction = SCST_DATA_READ;
943
944         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
945                 "cmd list ", rs_cmd);
946         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
947         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
948         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
949
950 out:
951         TRACE_EXIT_RES(res);
952         return res;
953
954 out_error:
955         res = -1;
956         goto out;
957 #undef sbuf_size
958 }
959
960 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
961 {
962         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
963         uint8_t *buf;
964         int len;
965
966         TRACE_ENTRY();
967
968         if (req_cmd->dev->handler->dev_done != NULL) {
969                 int rc;
970                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
971                       req_cmd->dev->handler->name, req_cmd);
972                 rc = req_cmd->dev->handler->dev_done(req_cmd);
973                 TRACE_DBG("Dev handler %s dev_done() returned %d",
974                       req_cmd->dev->handler->name, rc);
975         }
976
977         sBUG_ON(orig_cmd);
978
979         len = scst_get_buf_first(req_cmd, &buf);
980
981         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
982             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
983                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
984                         buf, len);
985                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
986                         len);
987         } else {
988                 PRINT_ERROR("%s", "Unable to get the sense via "
989                         "REQUEST SENSE, returning HARDWARE ERROR");
990                 scst_set_cmd_error(orig_cmd,
991                         SCST_LOAD_SENSE(scst_sense_hardw_error));
992         }
993
994         if (len > 0)
995                 scst_put_buf(req_cmd, buf);
996
997         scst_free_internal_cmd(req_cmd);
998
999         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1000         return orig_cmd;
1001 }
1002
1003 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1004 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1005 {
1006         struct scsi_request *req;
1007
1008         TRACE_ENTRY();
1009
1010         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1011                 if (req) {
1012                         if (req->sr_bufflen)
1013                                 kfree(req->sr_buffer);
1014                         scsi_release_request(req);
1015                 }
1016         }
1017
1018         TRACE_EXIT();
1019         return;
1020 }
1021
1022 static void scst_send_release(struct scst_device *dev)
1023 {
1024         struct scsi_request *req;
1025         struct scsi_device *scsi_dev;
1026         uint8_t cdb[6];
1027
1028         TRACE_ENTRY();
1029
1030         if (dev->scsi_dev == NULL)
1031                 goto out;
1032
1033         scsi_dev = dev->scsi_dev;
1034
1035         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1036         if (req == NULL) {
1037                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1038                             "to RELEASE device %d:%d:%d:%d",
1039                             scsi_dev->host->host_no, scsi_dev->channel,
1040                             scsi_dev->id, scsi_dev->lun);
1041                 goto out;
1042         }
1043
1044         memset(cdb, 0, sizeof(cdb));
1045         cdb[0] = RELEASE;
1046         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1047             ((scsi_dev->lun << 5) & 0xe0) : 0;
1048         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1049         req->sr_cmd_len = sizeof(cdb);
1050         req->sr_data_direction = SCST_DATA_NONE;
1051         req->sr_use_sg = 0;
1052         req->sr_bufflen = 0;
1053         req->sr_buffer = NULL;
1054         req->sr_request->rq_disk = dev->rq_disk;
1055         req->sr_sense_buffer[0] = 0;
1056
1057         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1058                 "mid-level", req);
1059         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1060                     scst_req_done, SCST_DEFAULT_TIMEOUT, 3);
1061
1062 out:
1063         TRACE_EXIT();
1064         return;
1065 }
1066 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1067 static void scst_send_release(struct scst_device *dev)
1068 {
1069         struct scsi_device *scsi_dev;
1070         unsigned char cdb[6];
1071         unsigned char *sense;
1072         int rc, i;
1073
1074         TRACE_ENTRY();
1075
1076         if (dev->scsi_dev == NULL)
1077                 goto out;
1078
1079         /* We can't afford missing RELEASE due to memory shortage */
1080         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1081
1082         scsi_dev = dev->scsi_dev;
1083
1084         for (i = 0; i < 5; i++) {
1085                 memset(cdb, 0, sizeof(cdb));
1086                 cdb[0] = RELEASE;
1087                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1088                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1089
1090                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1091
1092                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1093                         "SCSI mid-level");
1094                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1095                                 sense, SCST_DEFAULT_TIMEOUT, 0, 0);
1096                 TRACE_DBG("MODE_SENSE done: %x", rc);
1097
1098                 if (scsi_status_is_good(rc)) {
1099                         break;
1100                 } else {
1101                         PRINT_ERROR("RELEASE failed: %d", rc);
1102                         PRINT_BUFFER("RELEASE sense", sense,
1103                                 SCST_SENSE_BUFFERSIZE);
1104                         scst_check_internal_sense(dev, rc,
1105                                         sense, SCST_SENSE_BUFFERSIZE);
1106                 }
1107         }
1108
1109         kfree(sense);
1110
1111 out:
1112         TRACE_EXIT();
1113         return;
1114 }
1115 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1116
1117 /* scst_mutex supposed to be held */
1118 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1119 {
1120         struct scst_device *dev = tgt_dev->dev;
1121         int release = 0;
1122
1123         TRACE_ENTRY();
1124
1125         spin_lock_bh(&dev->dev_lock);
1126         if (dev->dev_reserved &&
1127             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1128                 /* This is one who holds the reservation */
1129                 struct scst_tgt_dev *tgt_dev_tmp;
1130                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1131                                     dev_tgt_dev_list_entry) {
1132                         clear_bit(SCST_TGT_DEV_RESERVED,
1133                                     &tgt_dev_tmp->tgt_dev_flags);
1134                 }
1135                 dev->dev_reserved = 0;
1136                 release = 1;
1137         }
1138         spin_unlock_bh(&dev->dev_lock);
1139
1140         if (release)
1141                 scst_send_release(dev);
1142
1143         TRACE_EXIT();
1144         return;
1145 }
1146
1147 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
1148         const char *initiator_name)
1149 {
1150         struct scst_session *sess;
1151         int i;
1152         int len;
1153         char *nm;
1154
1155         TRACE_ENTRY();
1156
1157 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1158         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1159 #else
1160         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1161 #endif
1162         if (sess == NULL) {
1163                 TRACE(TRACE_OUT_OF_MEM, "%s",
1164                       "Allocation of scst_session failed");
1165                 goto out;
1166         }
1167 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1168         memset(sess, 0, sizeof(*sess));
1169 #endif
1170
1171         sess->init_phase = SCST_SESS_IPH_INITING;
1172         sess->shut_phase = SCST_SESS_SPH_READY;
1173         atomic_set(&sess->refcnt, 0);
1174         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1175                 struct list_head *sess_tgt_dev_list_head =
1176                          &sess->sess_tgt_dev_list_hash[i];
1177                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1178         }
1179         spin_lock_init(&sess->sess_list_lock);
1180         INIT_LIST_HEAD(&sess->search_cmd_list);
1181         sess->tgt = tgt;
1182         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1183         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1184
1185 #ifdef MEASURE_LATENCY
1186         spin_lock_init(&sess->meas_lock);
1187 #endif
1188
1189         len = strlen(initiator_name);
1190         nm = kmalloc(len + 1, gfp_mask);
1191         if (nm == NULL) {
1192                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1193                 goto out_free;
1194         }
1195
1196         strcpy(nm, initiator_name);
1197         sess->initiator_name = nm;
1198
1199 out:
1200         TRACE_EXIT();
1201         return sess;
1202
1203 out_free:
1204         kmem_cache_free(scst_sess_cachep, sess);
1205         sess = NULL;
1206         goto out;
1207 }
1208
1209 void scst_free_session(struct scst_session *sess)
1210 {
1211         TRACE_ENTRY();
1212
1213         mutex_lock(&scst_mutex);
1214
1215         TRACE_DBG("Removing sess %p from the list", sess);
1216         list_del(&sess->sess_list_entry);
1217         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1218         list_del(&sess->acg_sess_list_entry);
1219
1220         scst_sess_free_tgt_devs(sess);
1221
1222         wake_up_all(&sess->tgt->unreg_waitQ);
1223
1224         mutex_unlock(&scst_mutex);
1225
1226         kfree(sess->initiator_name);
1227         kmem_cache_free(scst_sess_cachep, sess);
1228
1229         TRACE_EXIT();
1230         return;
1231 }
1232
1233 void scst_free_session_callback(struct scst_session *sess)
1234 {
1235         struct completion *c;
1236
1237         TRACE_ENTRY();
1238
1239         TRACE_DBG("Freeing session %p", sess);
1240
1241         c = sess->shutdown_compl;
1242
1243         if (sess->unreg_done_fn) {
1244                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1245                 sess->unreg_done_fn(sess);
1246                 TRACE_DBG("%s", "unreg_done_fn() returned");
1247         }
1248         scst_free_session(sess);
1249
1250         if (c)
1251                 complete_all(c);
1252
1253         TRACE_EXIT();
1254         return;
1255 }
1256
1257 void scst_sched_session_free(struct scst_session *sess)
1258 {
1259         unsigned long flags;
1260
1261         TRACE_ENTRY();
1262
1263         spin_lock_irqsave(&scst_mgmt_lock, flags);
1264         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1265         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1266         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1267
1268         wake_up(&scst_mgmt_waitQ);
1269
1270         TRACE_EXIT();
1271         return;
1272 }
1273
1274 void scst_cmd_get(struct scst_cmd *cmd)
1275 {
1276         __scst_cmd_get(cmd);
1277 }
1278
1279 void scst_cmd_put(struct scst_cmd *cmd)
1280 {
1281         __scst_cmd_put(cmd);
1282 }
1283
1284 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
1285 {
1286         struct scst_cmd *cmd;
1287
1288         TRACE_ENTRY();
1289
1290 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1291         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1292 #else
1293         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1294 #endif
1295         if (cmd == NULL) {
1296                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1297                 goto out;
1298         }
1299 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1300         memset(cmd, 0, sizeof(*cmd));
1301 #endif
1302
1303         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1304         atomic_set(&cmd->cmd_ref, 1);
1305         cmd->cmd_lists = &scst_main_cmd_lists;
1306         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1307         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1308         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1309         cmd->retries = 0;
1310         cmd->data_len = -1;
1311         cmd->is_send_status = 1;
1312         cmd->resp_data_len = -1;
1313
1314 out:
1315         TRACE_EXIT();
1316         return cmd;
1317 }
1318
1319 void scst_destroy_put_cmd(struct scst_cmd *cmd)
1320 {
1321         scst_sess_put(cmd->sess);
1322
1323         /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1324         if (likely(cmd->tgt_dev != NULL))
1325                 __scst_put();
1326
1327         scst_destroy_cmd(cmd);
1328         return;
1329 }
1330
1331 /* No locks supposed to be held */
1332 void scst_free_cmd(struct scst_cmd *cmd)
1333 {
1334         int destroy = 1;
1335
1336         TRACE_ENTRY();
1337
1338         TRACE_DBG("Freeing cmd %p (tag %Lu)",
1339                   cmd, (long long unsigned int)cmd->tag);
1340
1341         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1342                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1343                         cmd, atomic_read(&scst_cmd_count));
1344         }
1345
1346         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1347                 cmd->dec_on_dev_needed);
1348
1349 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1350 #if defined(EXTRACHECKS)
1351         if (cmd->scsi_req) {
1352                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1353                         "scsi_req!");
1354                 scst_release_request(cmd);
1355         }
1356 #endif
1357 #endif
1358
1359         scst_check_restore_sg_buff(cmd);
1360
1361         if (unlikely(cmd->internal)) {
1362                 if (cmd->bufflen > 0)
1363                         scst_release_space(cmd);
1364                 scst_destroy_cmd(cmd);
1365                 goto out;
1366         }
1367
1368         if (cmd->tgtt->on_free_cmd != NULL) {
1369                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1370                 cmd->tgtt->on_free_cmd(cmd);
1371                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1372         }
1373
1374         if (likely(cmd->dev != NULL)) {
1375                 struct scst_dev_type *handler = cmd->dev->handler;
1376                 if (handler->on_free_cmd != NULL) {
1377                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1378                               handler->name, cmd);
1379                         handler->on_free_cmd(cmd);
1380                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1381                                 handler->name);
1382                 }
1383         }
1384
1385         scst_release_space(cmd);
1386
1387         if (unlikely(cmd->sense != NULL)) {
1388                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1389                 mempool_free(cmd->sense, scst_sense_mempool);
1390                 cmd->sense = NULL;
1391         }
1392
1393         if (likely(cmd->tgt_dev != NULL)) {
1394 #ifdef EXTRACHECKS
1395                 if (unlikely(!cmd->sent_to_midlev)) {
1396                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1397                              "%d, target %s, lun %Ld, sn %ld, expected_sn %ld)",
1398                              cmd, cmd->cdb[0], cmd->tgtt->name,
1399                              (long long unsigned int)cmd->lun,
1400                              cmd->sn, cmd->tgt_dev->expected_sn);
1401                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1402                 }
1403 #endif
1404
1405                 if (unlikely(cmd->out_of_sn)) {
1406                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1407                                 "destroy=%d", cmd,
1408                                 (long long unsigned int)cmd->tag,
1409                                 cmd->sn, destroy);
1410                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1411                                         &cmd->cmd_flags);
1412                 }
1413         }
1414
1415         if (likely(destroy))
1416                 scst_destroy_put_cmd(cmd);
1417
1418 out:
1419         TRACE_EXIT();
1420         return;
1421 }
1422
1423 /* No locks supposed to be held. */
1424 void scst_check_retries(struct scst_tgt *tgt)
1425 {
1426         int need_wake_up = 0;
1427
1428         TRACE_ENTRY();
1429
1430         /*
1431          * We don't worry about overflow of finished_cmds, because we check
1432          * only for its change
1433          */
1434         atomic_inc(&tgt->finished_cmds);
1435         smp_mb__after_atomic_inc();
1436         if (unlikely(tgt->retry_cmds > 0)) {
1437                 struct scst_cmd *c, *tc;
1438                 unsigned long flags;
1439
1440                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1441                       tgt->retry_cmds);
1442
1443                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1444                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1445                                 cmd_list_entry)
1446                 {
1447                         tgt->retry_cmds--;
1448
1449                         TRACE_RETRY("Moving retry cmd %p to head of active "
1450                                 "cmd list (retry_cmds left %d)", c, tgt->retry_cmds);
1451                         spin_lock(&c->cmd_lists->cmd_list_lock);
1452                         list_move(&c->cmd_list_entry, &c->cmd_lists->active_cmd_list);
1453                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1454                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1455
1456                         need_wake_up++;
1457                         if (need_wake_up >= 2) /* "slow start" */
1458                                 break;
1459                 }
1460                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1461         }
1462
1463         TRACE_EXIT();
1464         return;
1465 }
1466
1467 void scst_tgt_retry_timer_fn(unsigned long arg)
1468 {
1469         struct scst_tgt *tgt = (struct scst_tgt *)arg;
1470         unsigned long flags;
1471
1472         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1473
1474         spin_lock_irqsave(&tgt->tgt_lock, flags);
1475         tgt->retry_timer_active = 0;
1476         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1477
1478         scst_check_retries(tgt);
1479
1480         TRACE_EXIT();
1481         return;
1482 }
1483
1484 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1485 {
1486         struct scst_mgmt_cmd *mcmd;
1487
1488         TRACE_ENTRY();
1489
1490         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1491         if (mcmd == NULL) {
1492                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1493                         "failed, some commands and their data could leak");
1494                 goto out;
1495         }
1496         memset(mcmd, 0, sizeof(*mcmd));
1497
1498 out:
1499         TRACE_EXIT();
1500         return mcmd;
1501 }
1502
1503 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1504 {
1505         unsigned long flags;
1506
1507         TRACE_ENTRY();
1508
1509         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1510         atomic_dec(&mcmd->sess->sess_cmd_count);
1511         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1512
1513         scst_sess_put(mcmd->sess);
1514
1515         if (mcmd->mcmd_tgt_dev != NULL)
1516                 __scst_put();
1517
1518         mempool_free(mcmd, scst_mgmt_mempool);
1519
1520         TRACE_EXIT();
1521         return;
1522 }
1523
1524 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1525 int scst_alloc_request(struct scst_cmd *cmd)
1526 {
1527         int res = 0;
1528         struct scsi_request *req;
1529         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1530
1531         TRACE_ENTRY();
1532
1533         /* cmd->dev->scsi_dev must be non-NULL here */
1534         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1535         if (req == NULL) {
1536                 TRACE(TRACE_OUT_OF_MEM, "%s",
1537                       "Allocation of scsi_request failed");
1538                 res = -ENOMEM;
1539                 goto out;
1540         }
1541
1542         cmd->scsi_req = req;
1543
1544         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1545         req->sr_cmd_len = cmd->cdb_len;
1546         req->sr_data_direction = cmd->data_direction;
1547         req->sr_use_sg = cmd->sg_cnt;
1548         req->sr_bufflen = cmd->bufflen;
1549         req->sr_buffer = cmd->sg;
1550         req->sr_request->rq_disk = cmd->dev->rq_disk;
1551         req->sr_sense_buffer[0] = 0;
1552
1553         cmd->scsi_req->upper_private_data = cmd;
1554
1555 out:
1556         TRACE_EXIT();
1557         return res;
1558 }
1559
1560 void scst_release_request(struct scst_cmd *cmd)
1561 {
1562         scsi_release_request(cmd->scsi_req);
1563         cmd->scsi_req = NULL;
1564 }
1565 #endif
1566
1567 int scst_alloc_space(struct scst_cmd *cmd)
1568 {
1569         int gfp_mask;
1570         int res = -ENOMEM;
1571         int atomic = scst_cmd_atomic(cmd);
1572         int flags;
1573         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1574         int bufflen = cmd->bufflen;
1575
1576         TRACE_ENTRY();
1577
1578         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1579
1580         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1581         if (cmd->no_sgv)
1582                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1583
1584         if (unlikely(cmd->bufflen == 0)) {
1585                 TRACE(TRACE_MGMT_MINOR, "Warning: data direction %d or/and "
1586                         "zero buffer length. Opcode 0x%x, handler %s, target "
1587                         "%s", cmd->data_direction, cmd->cdb[0],
1588                         cmd->dev->handler->name, cmd->tgtt->name);
1589                 /*
1590                  * Be on the safe side and alloc stub buffer. Neither target
1591                  * drivers, nor user space will touch it, since bufflen
1592                  * remains 0.
1593                  */
1594                 bufflen = PAGE_SIZE;
1595         }
1596
1597         cmd->sg = sgv_pool_alloc(tgt_dev->pool, bufflen, gfp_mask, flags,
1598                         &cmd->sg_cnt, &cmd->sgv, NULL);
1599         if (cmd->sg == NULL)
1600                 goto out;
1601
1602         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1603                 static int ll;
1604                 if (ll < 10) {
1605                         PRINT_INFO("Unable to complete command due to "
1606                                 "SG IO count limitation (requested %d, "
1607                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1608                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1609                         ll++;
1610                 }
1611                 goto out_sg_free;
1612         }
1613
1614         res = 0;
1615
1616 out:
1617         TRACE_EXIT();
1618         return res;
1619
1620 out_sg_free:
1621         sgv_pool_free(cmd->sgv);
1622         cmd->sgv = NULL;
1623         cmd->sg = NULL;
1624         cmd->sg_cnt = 0;
1625         goto out;
1626 }
1627
1628 void scst_release_space(struct scst_cmd *cmd)
1629 {
1630         TRACE_ENTRY();
1631
1632         if (cmd->sgv == NULL)
1633                 goto out;
1634
1635         if (cmd->data_buf_alloced) {
1636                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1637                 goto out;
1638         }
1639
1640         sgv_pool_free(cmd->sgv);
1641
1642         cmd->sgv = NULL;
1643         cmd->sg_cnt = 0;
1644         cmd->sg = NULL;
1645         cmd->bufflen = 0;
1646         cmd->data_len = 0;
1647
1648 out:
1649         TRACE_EXIT();
1650         return;
1651 }
1652
1653 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1654
1655 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1656 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1657
1658 int scst_get_cdb_len(const uint8_t *cdb)
1659 {
1660         return SCST_GET_CDB_LEN(cdb[0]);
1661 }
1662
1663 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1664
1665 /* for special commands */
1666 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1667 {
1668         cmd->bufflen = 6;
1669         return 0;
1670 }
1671
1672 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1673 {
1674         cmd->bufflen = READ_CAP_LEN;
1675         return 0;
1676 }
1677
1678 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1679 {
1680         cmd->bufflen = 1;
1681         return 0;
1682 }
1683
1684 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1685 {
1686         uint8_t *p = (uint8_t *)cmd->cdb + off;
1687         int res = 0;
1688
1689         cmd->bufflen = 0;
1690         cmd->bufflen |= ((u32)p[0]) << 8;
1691         cmd->bufflen |= ((u32)p[1]);
1692
1693         switch (cmd->cdb[1] & 0x1f) {
1694         case 0:
1695         case 1:
1696         case 6:
1697                 if (cmd->bufflen != 0) {
1698                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1699                                 "allocation length for service action %x",
1700                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1701                         goto out_inval;
1702                 }
1703                 break;
1704         }
1705
1706         switch (cmd->cdb[1] & 0x1f) {
1707         case 0:
1708         case 1:
1709                 cmd->bufflen = 20;
1710                 break;
1711         case 6:
1712                 cmd->bufflen = 32;
1713                 break;
1714         case 8:
1715                 cmd->bufflen = max(28, cmd->bufflen);
1716                 break;
1717         default:
1718                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1719                         cmd->cdb[1] & 0x1f);
1720                 goto out_inval;
1721         }
1722
1723 out:
1724         return res;
1725
1726 out_inval:
1727         scst_set_cmd_error(cmd,
1728                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1729         res = 1;
1730         goto out;
1731 }
1732
1733 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1734 {
1735         cmd->bufflen = (u32)cmd->cdb[off];
1736         return 0;
1737 }
1738
1739 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1740 {
1741         const uint8_t *p = cmd->cdb + off;
1742
1743         cmd->bufflen = 0;
1744         cmd->bufflen |= ((u32)p[0]) << 8;
1745         cmd->bufflen |= ((u32)p[1]);
1746
1747         return 0;
1748 }
1749
1750 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1751 {
1752         const uint8_t *p = cmd->cdb + off;
1753
1754         cmd->bufflen = 0;
1755         cmd->bufflen |= ((u32)p[0]) << 16;
1756         cmd->bufflen |= ((u32)p[1]) << 8;
1757         cmd->bufflen |= ((u32)p[2]);
1758
1759         return 0;
1760 }
1761
1762 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1763 {
1764         const uint8_t *p = cmd->cdb + off;
1765
1766         cmd->bufflen = 0;
1767         cmd->bufflen |= ((u32)p[0]) << 24;
1768         cmd->bufflen |= ((u32)p[1]) << 16;
1769         cmd->bufflen |= ((u32)p[2]) << 8;
1770         cmd->bufflen |= ((u32)p[3]);
1771
1772         return 0;
1773 }
1774
1775 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1776 {
1777         cmd->bufflen = 0;
1778         return 0;
1779 }
1780
1781 int scst_get_cdb_info(struct scst_cmd *cmd)
1782 {
1783         int dev_type = cmd->dev->handler->type;
1784         int i, res = 0;
1785         uint8_t op;
1786         const struct scst_sdbops *ptr = NULL;
1787
1788         TRACE_ENTRY();
1789
1790         op = cmd->cdb[0];       /* get clear opcode */
1791
1792         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1793                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1794                 dev_type);
1795
1796         i = scst_scsi_op_list[op];
1797         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1798                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1799                         ptr = &scst_scsi_op_table[i];
1800                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1801                               ptr->ops, ptr->devkey[0], /* disk     */
1802                               ptr->devkey[1],   /* tape     */
1803                               ptr->devkey[2],   /* printer */
1804                               ptr->devkey[3],   /* cpu      */
1805                               ptr->devkey[4],   /* cdr      */
1806                               ptr->devkey[5],   /* cdrom    */
1807                               ptr->devkey[6],   /* scanner */
1808                               ptr->devkey[7],   /* worm     */
1809                               ptr->devkey[8],   /* changer */
1810                               ptr->devkey[9],   /* commdev */
1811                               ptr->op_name);
1812                         TRACE_DBG("direction=%d flags=%d off=%d",
1813                               ptr->direction,
1814                               ptr->flags,
1815                               ptr->off);
1816                         break;
1817                 }
1818                 i++;
1819         }
1820
1821         if (ptr == NULL) {
1822                 /* opcode not found or now not used !!! */
1823                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1824                       dev_type);
1825                 res = -1;
1826                 cmd->op_flags = SCST_INFO_INVALID;
1827                 goto out;
1828         }
1829
1830         cmd->cdb_len = SCST_GET_CDB_LEN(op);
1831         cmd->op_name = ptr->op_name;
1832         cmd->data_direction = ptr->direction;
1833         cmd->op_flags = ptr->flags;
1834         res = (*ptr->get_trans_len)(cmd, ptr->off);
1835
1836 out:
1837         TRACE_EXIT();
1838         return res;
1839 }
1840
1841 /*
1842  * Routine to extract a lun number from an 8-byte LUN structure
1843  * in network byte order (BE).
1844  * (see SAM-2, Section 4.12.3 page 40)
1845  * Supports 2 types of lun unpacking: peripheral and logical unit.
1846  */
1847 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1848 {
1849         lun_t res = (lun_t)-1;
1850         int address_method;
1851
1852         TRACE_ENTRY();
1853
1854         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1855
1856         if (unlikely(len < 2)) {
1857                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1858                         "more", len);
1859                 goto out;
1860         }
1861
1862         if (len > 2) {
1863                 switch (len) {
1864                 case 8:
1865                         if ((*((uint64_t *)lun) &
1866                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1867                                 goto out_err;
1868                         break;
1869                 case 4:
1870                         if (*((uint16_t *)&lun[2]) != 0)
1871                                 goto out_err;
1872                         break;
1873                 case 6:
1874                         if (*((uint32_t *)&lun[2]) != 0)
1875                                 goto out_err;
1876                         break;
1877                 default:
1878                         goto out_err;
1879                 }
1880         }
1881
1882         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
1883         switch (address_method) {
1884         case 0: /* peripheral device addressing method */
1885 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1886                 if (*lun) {
1887                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1888                              "peripheral device addressing method 0x%02x, "
1889                              "expected 0", *lun);
1890                         break;
1891                 }
1892                 res = *(lun + 1);
1893                 break;
1894 #else
1895                 /* go through */
1896 #endif
1897
1898         case 1: /* flat space addressing method */
1899                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1900                 break;
1901
1902         case 2: /* logical unit addressing method */
1903                 if (*lun & 0x3f) {
1904                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1905                                     "addressing method 0x%02x, expected 0",
1906                                     *lun & 0x3f);
1907                         break;
1908                 }
1909                 if (*(lun + 1) & 0xe0) {
1910                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
1911                                     "addressing method 0x%02x, expected 0",
1912                                     (*(lun + 1) & 0xf8) >> 5);
1913                         break;
1914                 }
1915                 res = *(lun + 1) & 0x1f;
1916                 break;
1917
1918         case 3: /* extended logical unit addressing method */
1919         default:
1920                 PRINT_ERROR("Unimplemented LUN addressing method %u",
1921                             address_method);
1922                 break;
1923         }
1924
1925 out:
1926         TRACE_EXIT_RES((int)res);
1927         return res;
1928
1929 out_err:
1930         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
1931         goto out;
1932 }
1933
1934 int scst_calc_block_shift(int sector_size)
1935 {
1936         int block_shift = 0;
1937         int t;
1938
1939         if (sector_size == 0)
1940                 sector_size = 512;
1941
1942         t = sector_size;
1943         while (1) {
1944                 if ((t & 1) != 0)
1945                         break;
1946                 t >>= 1;
1947                 block_shift++;
1948         }
1949         if (block_shift < 9) {
1950                 PRINT_ERROR("Wrong sector size %d", sector_size);
1951                 block_shift = -1;
1952         }
1953
1954         TRACE_EXIT_RES(block_shift);
1955         return block_shift;
1956 }
1957
1958 int scst_sbc_generic_parse(struct scst_cmd *cmd,
1959         int (*get_block_shift)(struct scst_cmd *cmd))
1960 {
1961         int res = 0;
1962
1963         TRACE_ENTRY();
1964
1965         /*
1966          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
1967          * therefore change them only if necessary
1968          */
1969
1970         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
1971               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
1972
1973         switch (cmd->cdb[0]) {
1974         case SERVICE_ACTION_IN:
1975                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
1976                         cmd->bufflen = READ_CAP16_LEN;
1977                         cmd->data_direction = SCST_DATA_READ;
1978                 }
1979                 break;
1980         case VERIFY_6:
1981         case VERIFY:
1982         case VERIFY_12:
1983         case VERIFY_16:
1984                 if ((cmd->cdb[1] & BYTCHK) == 0) {
1985                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
1986                         cmd->bufflen = 0;
1987                         goto out;
1988                 } else
1989                         cmd->data_len = 0;
1990                 break;
1991         default:
1992                 /* It's all good */
1993                 break;
1994         }
1995
1996         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
1997                 /*
1998                  * No need for locks here, since *_detach() can not be
1999                  * called, when there are existing commands.
2000                  */
2001                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2002         }
2003
2004 out:
2005         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2006               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2007
2008         TRACE_EXIT_RES(res);
2009         return res;
2010 }
2011
2012 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2013         int (*get_block_shift)(struct scst_cmd *cmd))
2014 {
2015         int res = 0;
2016
2017         TRACE_ENTRY();
2018
2019         /*
2020          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2021          * therefore change them only if necessary
2022          */
2023
2024         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2025               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2026
2027         cmd->cdb[1] &= 0x1f;
2028
2029         switch (cmd->cdb[0]) {
2030         case VERIFY_6:
2031         case VERIFY:
2032         case VERIFY_12:
2033         case VERIFY_16:
2034                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2035                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2036                         cmd->bufflen = 0;
2037                         goto out;
2038                 }
2039                 break;
2040         default:
2041                 /* It's all good */
2042                 break;
2043         }
2044
2045         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2046                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2047
2048 out:
2049         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2050                 cmd->data_direction);
2051
2052         TRACE_EXIT();
2053         return res;
2054 }
2055
2056 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2057         int (*get_block_shift)(struct scst_cmd *cmd))
2058 {
2059         int res = 0;
2060
2061         TRACE_ENTRY();
2062
2063         /*
2064          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2065          * therefore change them only if necessary
2066          */
2067
2068         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2069               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2070
2071         cmd->cdb[1] &= 0x1f;
2072
2073         switch (cmd->cdb[0]) {
2074         case VERIFY_6:
2075         case VERIFY:
2076         case VERIFY_12:
2077         case VERIFY_16:
2078                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2079                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2080                         cmd->bufflen = 0;
2081                         goto out;
2082                 }
2083                 break;
2084         default:
2085                 /* It's all good */
2086                 break;
2087         }
2088
2089         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2090                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2091
2092 out:
2093         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2094                 cmd->data_direction);
2095
2096         TRACE_EXIT_RES(res);
2097         return res;
2098 }
2099
2100 int scst_tape_generic_parse(struct scst_cmd *cmd,
2101         int (*get_block_size)(struct scst_cmd *cmd))
2102 {
2103         int res = 0;
2104
2105         TRACE_ENTRY();
2106
2107         /*
2108          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2109          * therefore change them only if necessary
2110          */
2111
2112         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2113               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2114
2115         if (cmd->cdb[0] == READ_POSITION) {
2116                 int tclp = cmd->cdb[1] & TCLP_BIT;
2117                 int long_bit = cmd->cdb[1] & LONG_BIT;
2118                 int bt = cmd->cdb[1] & BT_BIT;
2119
2120                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2121                         cmd->bufflen =
2122                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2123                         cmd->data_direction = SCST_DATA_READ;
2124                 } else {
2125                         cmd->bufflen = 0;
2126                         cmd->data_direction = SCST_DATA_NONE;
2127                 }
2128         }
2129
2130         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2131                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2132
2133         TRACE_EXIT_RES(res);
2134         return res;
2135 }
2136
2137 static int scst_null_parse(struct scst_cmd *cmd)
2138 {
2139         int res = 0;
2140
2141         TRACE_ENTRY();
2142
2143         /*
2144          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2145          * therefore change them only if necessary
2146          */
2147
2148         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2149               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2150 #if 0
2151         switch (cmd->cdb[0]) {
2152         default:
2153                 /* It's all good */
2154                 break;
2155         }
2156 #endif
2157         TRACE_DBG("res %d bufflen %d direct %d",
2158               res, cmd->bufflen, cmd->data_direction);
2159
2160         TRACE_EXIT();
2161         return res;
2162 }
2163
2164 int scst_changer_generic_parse(struct scst_cmd *cmd,
2165         int (*nothing)(struct scst_cmd *cmd))
2166 {
2167         return scst_null_parse(cmd);
2168 }
2169
2170 int scst_processor_generic_parse(struct scst_cmd *cmd,
2171         int (*nothing)(struct scst_cmd *cmd))
2172 {
2173         return scst_null_parse(cmd);
2174 }
2175
2176 int scst_raid_generic_parse(struct scst_cmd *cmd,
2177         int (*nothing)(struct scst_cmd *cmd))
2178 {
2179         return scst_null_parse(cmd);
2180 }
2181
2182 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2183         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2184 {
2185         int opcode = cmd->cdb[0];
2186         int status = cmd->status;
2187         int res = SCST_CMD_STATE_DEFAULT;
2188
2189         TRACE_ENTRY();
2190
2191         /*
2192          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2193          * based on cmd->status and cmd->data_direction, therefore change
2194          * them only if necessary
2195          */
2196
2197         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2198                 switch (opcode) {
2199                 case READ_CAPACITY:
2200                 {
2201                         /* Always keep track of disk capacity */
2202                         int buffer_size, sector_size, sh;
2203                         uint8_t *buffer;
2204
2205                         buffer_size = scst_get_buf_first(cmd, &buffer);
2206                         if (unlikely(buffer_size <= 0)) {
2207                                 PRINT_ERROR("%s: Unable to get the buffer "
2208                                         "(%d)", __func__, buffer_size);
2209                                 goto out;
2210                         }
2211
2212                         sector_size =
2213                             ((buffer[4] << 24) | (buffer[5] << 16) |
2214                              (buffer[6] << 8) | (buffer[7] << 0));
2215                         scst_put_buf(cmd, buffer);
2216                         if (sector_size != 0)
2217                                 sh = scst_calc_block_shift(sector_size);
2218                         else
2219                                 sh = 0;
2220                         set_block_shift(cmd, sh);
2221                         TRACE_DBG("block_shift %d", sh);
2222                         break;
2223                 }
2224                 default:
2225                         /* It's all good */
2226                         break;
2227                 }
2228         }
2229
2230         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2231               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2232
2233 out:
2234         TRACE_EXIT_RES(res);
2235         return res;
2236 }
2237
2238 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2239         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2240 {
2241         int opcode = cmd->cdb[0];
2242         int res = SCST_CMD_STATE_DEFAULT;
2243         int buffer_size, bs;
2244         uint8_t *buffer = NULL;
2245
2246         TRACE_ENTRY();
2247
2248         /*
2249          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2250          * based on cmd->status and cmd->data_direction, therefore change
2251          * them only if necessary
2252          */
2253
2254         switch (opcode) {
2255         case MODE_SENSE:
2256         case MODE_SELECT:
2257                 buffer_size = scst_get_buf_first(cmd, &buffer);
2258                 if (unlikely(buffer_size <= 0)) {
2259                         PRINT_ERROR("%s: Unable to get the buffer (%d)",
2260                                 __func__, buffer_size);
2261                         goto out;
2262                 }
2263                 break;
2264         }
2265
2266         switch (opcode) {
2267         case MODE_SENSE:
2268                 TRACE_DBG("%s", "MODE_SENSE");
2269                 if ((cmd->cdb[2] & 0xC0) == 0) {
2270                         if (buffer[3] == 8) {
2271                                 bs = (buffer[9] << 16) |
2272                                     (buffer[10] << 8) | buffer[11];
2273                                 set_block_size(cmd, bs);
2274                         }
2275                 }
2276                 break;
2277         case MODE_SELECT:
2278                 TRACE_DBG("%s", "MODE_SELECT");
2279                 if (buffer[3] == 8) {
2280                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2281                             (buffer[11]);
2282                         set_block_size(cmd, bs);
2283                 }
2284                 break;
2285         default:
2286                 /* It's all good */
2287                 break;
2288         }
2289
2290         switch (opcode) {
2291         case MODE_SENSE:
2292         case MODE_SELECT:
2293                 scst_put_buf(cmd, buffer);
2294                 break;
2295         }
2296
2297 out:
2298         TRACE_EXIT_RES(res);
2299         return res;
2300 }
2301
2302 static void scst_check_internal_sense(struct scst_device *dev, int result,
2303         uint8_t *sense, int sense_len)
2304 {
2305         TRACE_ENTRY();
2306
2307         if (host_byte(result) == DID_RESET) {
2308                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2309                         "reset UA");
2310                 scst_set_sense(sense, sense_len,
2311                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2312                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2313         } else if ((status_byte(result) == CHECK_CONDITION) &&
2314                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2315                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2316
2317         TRACE_EXIT();
2318         return;
2319 }
2320
2321 int scst_obtain_device_parameters(struct scst_device *dev)
2322 {
2323         int res = 0, i;
2324         uint8_t cmd[16];
2325         uint8_t buffer[4+0x0A];
2326         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2327
2328         TRACE_ENTRY();
2329
2330         sBUG_ON(in_interrupt() || in_atomic());
2331         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2332
2333         for (i = 0; i < 5; i++) {
2334                 /* Get control mode page */
2335                 memset(cmd, 0, sizeof(cmd));
2336                 cmd[0] = MODE_SENSE;
2337                 cmd[1] = 8; /* DBD */
2338                 cmd[2] = 0x0A;
2339                 cmd[4] = sizeof(buffer);
2340
2341                 memset(buffer, 0, sizeof(buffer));
2342                 memset(sense_buffer, 0, sizeof(sense_buffer));
2343
2344                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2345                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2346                            sizeof(buffer), sense_buffer, SCST_DEFAULT_TIMEOUT,
2347                             0, 0);
2348
2349                 TRACE_DBG("MODE_SENSE done: %x", res);
2350
2351                 if (scsi_status_is_good(res)) {
2352                         int q;
2353
2354                         PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode page data",
2355                                 buffer, sizeof(buffer));
2356
2357                         dev->tst = buffer[4+2] >> 5;
2358                         q = buffer[4+3] >> 4;
2359                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2360                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2361                                         "%d:%d:%d:%d", dev->queue_alg,
2362                                         dev->scsi_dev->host->host_no, dev->scsi_dev->channel,
2363                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2364                         }
2365                         dev->queue_alg = q;
2366                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2367                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2368
2369                         /*
2370                          * Unfortunately, SCSI ML doesn't provide a way to
2371                          * specify commands task attribute, so we can rely on
2372                          * device's restricted reordering only.
2373                          */
2374                         dev->has_own_order_mgmt = !dev->queue_alg;
2375
2376                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d: TST %x, "
2377                                 "QUEUE ALG %x, SWP %x, TAS %x, has_own_order_mgmt "
2378                                 "%d", dev->scsi_dev->host->host_no,
2379                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2380                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2381                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2382
2383                         goto out;
2384                 } else {
2385 #if 0 /* 3ware controller is buggy and returns CONDITION_GOOD instead of CHECK_CONDITION */
2386                         if ((status_byte(res) == CHECK_CONDITION) &&
2387 #else
2388                         if (
2389 #endif
2390                             SCST_SENSE_VALID(sense_buffer)) {
2391                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2392                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device "
2393                                                 "%d:%d:%d:%d doesn't support control "
2394                                                 "mode page, using defaults: TST "
2395                                                 "%x, QUEUE ALG %x, SWP %x, TAS %x, "
2396                                                 "has_own_order_mgmt %d",
2397                                                 dev->scsi_dev->host->host_no,
2398                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2399                                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2400                                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2401                                         res = 0;
2402                                         goto out;
2403                                 } else if (sense_buffer[2] == NOT_READY) {
2404                                         TRACE(TRACE_SCSI, "Device %d:%d:%d:%d not ready",
2405                                                 dev->scsi_dev->host->host_no,
2406                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2407                                                 dev->scsi_dev->lun);
2408                                         res = 0;
2409                                         goto out;
2410                                 }
2411                         } else {
2412                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Internal MODE SENSE to "
2413                                         "device %d:%d:%d:%d failed: %x",
2414                                         dev->scsi_dev->host->host_no,
2415                                         dev->scsi_dev->channel, dev->scsi_dev->id,
2416                                         dev->scsi_dev->lun, res);
2417                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR, "MODE SENSE "
2418                                         "sense", sense_buffer, sizeof(sense_buffer));
2419                         }
2420                         scst_check_internal_sense(dev, res, sense_buffer,
2421                                         sizeof(sense_buffer));
2422                 }
2423         }
2424         res = -ENODEV;
2425
2426 out:
2427         TRACE_EXIT_RES(res);
2428         return res;
2429 }
2430
2431 /* Called under dev_lock and BH off */
2432 void scst_process_reset(struct scst_device *dev,
2433         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2434         struct scst_mgmt_cmd *mcmd)
2435 {
2436         struct scst_tgt_dev *tgt_dev;
2437         struct scst_cmd *cmd, *tcmd;
2438
2439         TRACE_ENTRY();
2440
2441         /* Clear RESERVE'ation, if necessary */
2442         if (dev->dev_reserved) {
2443                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2444                                     dev_tgt_dev_list_entry) {
2445                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2446                                 "lun %Ld",
2447                                 (long long unsigned int)tgt_dev->lun);
2448                         clear_bit(SCST_TGT_DEV_RESERVED,
2449                                   &tgt_dev->tgt_dev_flags);
2450                 }
2451                 dev->dev_reserved = 0;
2452                 /*
2453                  * There is no need to send RELEASE, since the device is going
2454                  * to be resetted. Actually, since we can be in RESET TM
2455                  * function, it might be dangerous.
2456                  */
2457         }
2458
2459         dev->dev_double_ua_possible = 1;
2460         dev->dev_serialized = 1;
2461
2462         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2463                 dev_tgt_dev_list_entry) {
2464                 struct scst_session *sess = tgt_dev->sess;
2465
2466                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2467                 scst_free_all_UA(tgt_dev);
2468                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2469
2470                 spin_lock_irq(&sess->sess_list_lock);
2471
2472                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2473                 list_for_each_entry(cmd, &sess->search_cmd_list,
2474                                 search_cmd_list_entry) {
2475                         if (cmd == exclude_cmd)
2476                                 continue;
2477                         if ((cmd->tgt_dev == tgt_dev) ||
2478                             ((cmd->tgt_dev == NULL) &&
2479                              (cmd->lun == tgt_dev->lun))) {
2480                                 scst_abort_cmd(cmd, mcmd,
2481                                         (tgt_dev->sess != originator), 0);
2482                         }
2483                 }
2484                 spin_unlock_irq(&sess->sess_list_lock);
2485         }
2486
2487         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2488                                 blocked_cmd_list_entry) {
2489                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2490                         list_del(&cmd->blocked_cmd_list_entry);
2491                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2492                                 "to active cmd list", cmd);
2493                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2494                         list_add_tail(&cmd->cmd_list_entry,
2495                                 &cmd->cmd_lists->active_cmd_list);
2496                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2497                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2498                 }
2499         }
2500
2501         /* BH already off */
2502         spin_lock(&scst_temp_UA_lock);
2503         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2504                 SCST_LOAD_SENSE(scst_sense_reset_UA));
2505         scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2506                 sizeof(scst_temp_UA));
2507         spin_unlock(&scst_temp_UA_lock);
2508
2509         TRACE_EXIT();
2510         return;
2511 }
2512
2513 int scst_set_pending_UA(struct scst_cmd *cmd)
2514 {
2515         int res = 0;
2516         struct scst_tgt_dev_UA *UA_entry;
2517
2518         TRACE_ENTRY();
2519
2520         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2521
2522         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2523
2524         /* UA list could be cleared behind us, so retest */
2525         if (list_empty(&cmd->tgt_dev->UA_list)) {
2526                 TRACE_DBG("%s",
2527                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2528                 res = -1;
2529                 goto out_unlock;
2530         }
2531
2532         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2533                               UA_list_entry);
2534
2535         TRACE_DBG("next %p UA_entry %p",
2536               cmd->tgt_dev->UA_list.next, UA_entry);
2537
2538         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2539                 sizeof(UA_entry->UA_sense_buffer));
2540
2541         cmd->ua_ignore = 1;
2542
2543         list_del(&UA_entry->UA_list_entry);
2544
2545         mempool_free(UA_entry, scst_ua_mempool);
2546
2547         if (list_empty(&cmd->tgt_dev->UA_list)) {
2548                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2549                           &cmd->tgt_dev->tgt_dev_flags);
2550         }
2551
2552         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2553
2554 out:
2555         TRACE_EXIT_RES(res);
2556         return res;
2557
2558 out_unlock:
2559         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2560         goto out;
2561 }
2562
2563 /* Called under tgt_dev_lock and BH off */
2564 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2565         const uint8_t *sense, int sense_len, int head)
2566 {
2567         struct scst_tgt_dev_UA *UA_entry = NULL;
2568
2569         TRACE_ENTRY();
2570
2571         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2572         if (UA_entry == NULL) {
2573                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2574                      "allocation failed. The UNIT ATTENTION "
2575                      "on some sessions will be missed");
2576                 PRINT_BUFFER("Lost UA", sense, sense_len);
2577                 goto out;
2578         }
2579         memset(UA_entry, 0, sizeof(*UA_entry));
2580
2581         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2582                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2583         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2584
2585         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2586
2587         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2588
2589         if (head)
2590                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2591         else
2592                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2593
2594 out:
2595         TRACE_EXIT();
2596         return;
2597 }
2598
2599 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2600         const uint8_t *sense, int sense_len, int head)
2601 {
2602         int skip_UA = 0;
2603         struct scst_tgt_dev_UA *UA_entry_tmp;
2604
2605         TRACE_ENTRY();
2606
2607         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2608
2609         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2610                             UA_list_entry) {
2611                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, sense_len) == 0) {
2612                         TRACE_MGMT_DBG("%s", "UA already exists");
2613                         skip_UA = 1;
2614                         break;
2615                 }
2616         }
2617
2618         if (skip_UA == 0)
2619                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2620
2621         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2622
2623         TRACE_EXIT();
2624         return;
2625 }
2626
2627 /* Called under dev_lock and BH off */
2628 void scst_dev_check_set_local_UA(struct scst_device *dev,
2629         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2630 {
2631         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2632
2633         TRACE_ENTRY();
2634
2635         if (exclude != NULL)
2636                 exclude_tgt_dev = exclude->tgt_dev;
2637
2638         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2639                         dev_tgt_dev_list_entry) {
2640                 if (tgt_dev != exclude_tgt_dev)
2641                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2642         }
2643
2644         TRACE_EXIT();
2645         return;
2646 }
2647
2648 /* Called under dev_lock and BH off */
2649 void __scst_dev_check_set_UA(struct scst_device *dev,
2650         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2651 {
2652         TRACE_ENTRY();
2653
2654         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2655
2656         /* Check for reset UA */
2657         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2658                 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
2659                         exclude, NULL);
2660
2661         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2662
2663         TRACE_EXIT();
2664         return;
2665 }
2666
2667 /* Called under tgt_dev_lock or when tgt_dev is unused */
2668 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2669 {
2670         struct scst_tgt_dev_UA *UA_entry, *t;
2671
2672         TRACE_ENTRY();
2673
2674         list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
2675                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %Ld",
2676                                (long long unsigned int)tgt_dev->lun);
2677                 list_del(&UA_entry->UA_list_entry);
2678                 kfree(UA_entry);
2679         }
2680         INIT_LIST_HEAD(&tgt_dev->UA_list);
2681         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2682
2683         TRACE_EXIT();
2684         return;
2685 }
2686
2687 /* No locks */
2688 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2689 {
2690         struct scst_cmd *res = NULL, *cmd, *t;
2691         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2692
2693         spin_lock_irq(&tgt_dev->sn_lock);
2694
2695         if (unlikely(tgt_dev->hq_cmd_count != 0))
2696                 goto out_unlock;
2697
2698 restart:
2699         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2700                                 sn_cmd_list_entry) {
2701                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2702                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2703                 if (cmd->sn == expected_sn) {
2704                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2705                                 cmd, cmd->sn, cmd->sn_set);
2706                         tgt_dev->def_cmd_count--;
2707                         list_del(&cmd->sn_cmd_list_entry);
2708                         if (res == NULL)
2709                                 res = cmd;
2710                         else {
2711                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2712                                 TRACE_SN("Adding cmd %p to active cmd list",
2713                                         cmd);
2714                                 list_add_tail(&cmd->cmd_list_entry,
2715                                         &cmd->cmd_lists->active_cmd_list);
2716                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2717                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2718                         }
2719                 }
2720         }
2721         if (res != NULL)
2722                 goto out_unlock;
2723
2724         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2725                                 sn_cmd_list_entry) {
2726                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2727                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2728                 if (cmd->sn == expected_sn) {
2729                         atomic_t *slot = cmd->sn_slot;
2730                         /*
2731                          * !! At this point any pointer in cmd, except !!
2732                          * !! sn_slot and sn_cmd_list_entry, could be   !!
2733                          * !! already destroyed                         !!
2734                          */
2735                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2736                                  cmd,
2737                                  (long long unsigned int)cmd->tag,
2738                                  cmd->sn);
2739                         tgt_dev->def_cmd_count--;
2740                         list_del(&cmd->sn_cmd_list_entry);
2741                         spin_unlock_irq(&tgt_dev->sn_lock);
2742                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2743                                              &cmd->cmd_flags))
2744                                 scst_destroy_put_cmd(cmd);
2745                         scst_inc_expected_sn(tgt_dev, slot);
2746                         expected_sn = tgt_dev->expected_sn;
2747                         spin_lock_irq(&tgt_dev->sn_lock);
2748                         goto restart;
2749                 }
2750         }
2751
2752 out_unlock:
2753         spin_unlock_irq(&tgt_dev->sn_lock);
2754         return res;
2755 }
2756
2757 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2758         struct scst_thr_data_hdr *data,
2759         void (*free_fn) (struct scst_thr_data_hdr *data))
2760 {
2761         data->pid = current->pid;
2762         atomic_set(&data->ref, 1);
2763         EXTRACHECKS_BUG_ON(free_fn == NULL);
2764         data->free_fn = free_fn;
2765         spin_lock(&tgt_dev->thr_data_lock);
2766         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2767         spin_unlock(&tgt_dev->thr_data_lock);
2768 }
2769
2770 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2771 {
2772         spin_lock(&tgt_dev->thr_data_lock);
2773         while (!list_empty(&tgt_dev->thr_data_list)) {
2774                 struct scst_thr_data_hdr *d = list_entry(
2775                                 tgt_dev->thr_data_list.next, typeof(*d),
2776                                 thr_data_list_entry);
2777                 list_del(&d->thr_data_list_entry);
2778                 spin_unlock(&tgt_dev->thr_data_lock);
2779                 scst_thr_data_put(d);
2780                 spin_lock(&tgt_dev->thr_data_lock);
2781         }
2782         spin_unlock(&tgt_dev->thr_data_lock);
2783         return;
2784 }
2785
2786 void scst_dev_del_all_thr_data(struct scst_device *dev)
2787 {
2788         struct scst_tgt_dev *tgt_dev;
2789
2790         TRACE_ENTRY();
2791
2792         mutex_lock(&scst_mutex);
2793
2794         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2795                                 dev_tgt_dev_list_entry) {
2796                 scst_del_all_thr_data(tgt_dev);
2797         }
2798
2799         mutex_unlock(&scst_mutex);
2800
2801         TRACE_EXIT();
2802         return;
2803 }
2804
2805 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2806 {
2807         struct scst_thr_data_hdr *res = NULL, *d;
2808
2809         spin_lock(&tgt_dev->thr_data_lock);
2810         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2811                 if (d->pid == current->pid) {
2812                         res = d;
2813                         scst_thr_data_get(res);
2814                         break;
2815                 }
2816         }
2817         spin_unlock(&tgt_dev->thr_data_lock);
2818         return res;
2819 }
2820
2821 /* dev_lock supposed to be held and BH disabled */
2822 void __scst_block_dev(struct scst_device *dev)
2823 {
2824         dev->block_count++;
2825         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
2826 }
2827
2828 /* No locks */
2829 void scst_block_dev(struct scst_device *dev, int outstanding)
2830 {
2831         spin_lock_bh(&dev->dev_lock);
2832         __scst_block_dev(dev);
2833         spin_unlock_bh(&dev->dev_lock);
2834
2835         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
2836         smp_mb();
2837
2838         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
2839                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
2840         wait_event(dev->on_dev_waitQ,
2841                 atomic_read(&dev->on_dev_count) <= outstanding);
2842         TRACE_MGMT_DBG("%s", "wait_event() returned");
2843 }
2844
2845 /* No locks */
2846 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
2847 {
2848         sBUG_ON(cmd->needs_unblocking);
2849
2850         cmd->needs_unblocking = 1;
2851         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
2852                        cmd, (long long unsigned int)cmd->tag);
2853
2854         scst_block_dev(cmd->dev, outstanding);
2855 }
2856
2857 /* No locks */
2858 void scst_unblock_dev(struct scst_device *dev)
2859 {
2860         spin_lock_bh(&dev->dev_lock);
2861         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
2862                 dev->block_count-1, dev);
2863         if (--dev->block_count == 0)
2864                 scst_unblock_cmds(dev);
2865         spin_unlock_bh(&dev->dev_lock);
2866         sBUG_ON(dev->block_count < 0);
2867 }
2868
2869 /* No locks */
2870 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
2871 {
2872         scst_unblock_dev(cmd->dev);
2873         cmd->needs_unblocking = 0;
2874 }
2875
2876 /* No locks */
2877 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
2878 {
2879         int res = 0;
2880         struct scst_device *dev = cmd->dev;
2881
2882         TRACE_ENTRY();
2883
2884         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
2885
2886         atomic_inc(&dev->on_dev_count);
2887         cmd->dec_on_dev_needed = 1;
2888         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
2889
2890 #ifdef STRICT_SERIALIZING
2891         spin_lock_bh(&dev->dev_lock);
2892         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2893                 goto out_unlock;
2894         if (dev->block_count > 0) {
2895                 scst_dec_on_dev_cmd(cmd);
2896                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
2897                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
2898                 list_add_tail(&cmd->blocked_cmd_list_entry,
2899                               &dev->blocked_cmd_list);
2900                 res = 1;
2901         } else {
2902                 __scst_block_dev(dev);
2903                 cmd->inc_blocking = 1;
2904         }
2905         spin_unlock_bh(&dev->dev_lock);
2906         goto out;
2907 #else
2908 repeat:
2909         if (unlikely(dev->block_count > 0)) {
2910                 spin_lock_bh(&dev->dev_lock);
2911                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2912                         goto out_unlock;
2913                 barrier(); /* to reread block_count */
2914                 if (dev->block_count > 0) {
2915                         scst_dec_on_dev_cmd(cmd);
2916                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
2917                                 "serializing (tag %llu, dev %p)", cmd,
2918                                 (long long unsigned int)cmd->tag, dev);
2919                         list_add_tail(&cmd->blocked_cmd_list_entry,
2920                                       &dev->blocked_cmd_list);
2921                         res = 1;
2922                         spin_unlock_bh(&dev->dev_lock);
2923                         goto out;
2924                 } else {
2925                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
2926                                 "continuing");
2927                 }
2928                 spin_unlock_bh(&dev->dev_lock);
2929         }
2930         if (unlikely(dev->dev_serialized)) {
2931                 spin_lock_bh(&dev->dev_lock);
2932                 barrier(); /* to reread block_count */
2933                 if (dev->block_count == 0) {
2934                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
2935                                 "cmds due to serializing (dev %p)", cmd,
2936                                 (long long unsigned int)cmd->tag, dev);
2937                         __scst_block_dev(dev);
2938                         cmd->inc_blocking = 1;
2939                 } else {
2940                         spin_unlock_bh(&dev->dev_lock);
2941                         TRACE_MGMT_DBG("Somebody blocked the device, "
2942                                 "repeating (count %d)", dev->block_count);
2943                         goto repeat;
2944                 }
2945                 spin_unlock_bh(&dev->dev_lock);
2946         }
2947 #endif
2948
2949 out:
2950         TRACE_EXIT_RES(res);
2951         return res;
2952
2953 out_unlock:
2954         spin_unlock_bh(&dev->dev_lock);
2955         goto out;
2956 }
2957
2958 /* Called under dev_lock */
2959 void scst_unblock_cmds(struct scst_device *dev)
2960 {
2961 #ifdef STRICT_SERIALIZING
2962         struct scst_cmd *cmd, *t;
2963         unsigned long flags;
2964
2965         TRACE_ENTRY();
2966
2967         local_irq_save(flags);
2968         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
2969                                  blocked_cmd_list_entry) {
2970                 int brk = 0;
2971                 /*
2972                  * Since only one cmd per time is being executed, expected_sn
2973                  * can't change behind us, if the corresponding cmd is in
2974                  * blocked_cmd_list, but we could be called before
2975                  * scst_inc_expected_sn().
2976                  */
2977                 if (likely(!cmd->internal && !cmd->retry)) {
2978                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
2979                         if (cmd->tgt_dev == NULL)
2980                                 sBUG();
2981                         expected_sn = cmd->tgt_dev->expected_sn;
2982                         if (cmd->sn == expected_sn)
2983                                 brk = 1;
2984                         else if (cmd->sn != (expected_sn+1))
2985                                 continue;
2986                 }
2987
2988                 list_del(&cmd->blocked_cmd_list_entry);
2989                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
2990                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2991                 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
2992                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2993                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2994                 if (brk)
2995                         break;
2996         }
2997         local_irq_restore(flags);
2998 #else /* STRICT_SERIALIZING */
2999         struct scst_cmd *cmd, *tcmd;
3000         unsigned long flags;
3001
3002         TRACE_ENTRY();
3003
3004         local_irq_save(flags);
3005         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3006                                  blocked_cmd_list_entry) {
3007                 list_del(&cmd->blocked_cmd_list_entry);
3008                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3009                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3010                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3011                         list_add(&cmd->cmd_list_entry,
3012                                 &cmd->cmd_lists->active_cmd_list);
3013                 else
3014                         list_add_tail(&cmd->cmd_list_entry,
3015                                 &cmd->cmd_lists->active_cmd_list);
3016                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3017                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3018         }
3019         local_irq_restore(flags);
3020 #endif /* STRICT_SERIALIZING */
3021
3022         TRACE_EXIT();
3023         return;
3024 }
3025
3026 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3027         struct scst_cmd *out_of_sn_cmd)
3028 {
3029         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3030
3031         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3032                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3033                 scst_make_deferred_commands_active(tgt_dev, out_of_sn_cmd);
3034         } else {
3035                 out_of_sn_cmd->out_of_sn = 1;
3036                 spin_lock_irq(&tgt_dev->sn_lock);
3037                 tgt_dev->def_cmd_count++;
3038                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3039                               &tgt_dev->skipped_sn_list);
3040                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list "
3041                         "(expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3042                         tgt_dev->expected_sn);
3043                 spin_unlock_irq(&tgt_dev->sn_lock);
3044         }
3045
3046         return;
3047 }
3048
3049 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3050         struct scst_cmd *out_of_sn_cmd)
3051 {
3052         TRACE_ENTRY();
3053
3054         if (!out_of_sn_cmd->sn_set) {
3055                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3056                 goto out;
3057         }
3058
3059         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3060
3061 out:
3062         TRACE_EXIT();
3063         return;
3064 }
3065
3066 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3067 {
3068         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3069
3070         TRACE_ENTRY();
3071
3072         if (!cmd->hq_cmd_inced)
3073                 goto out;
3074
3075         spin_lock_irq(&tgt_dev->sn_lock);
3076         tgt_dev->hq_cmd_count--;
3077         spin_unlock_irq(&tgt_dev->sn_lock);
3078
3079         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3080
3081         /*
3082          * There is no problem in checking hq_cmd_count in the
3083          * non-locked state. In the worst case we will only have
3084          * unneeded run of the deferred commands.
3085          */
3086         if (tgt_dev->hq_cmd_count == 0)
3087                 scst_make_deferred_commands_active(tgt_dev, cmd);
3088
3089 out:
3090         TRACE_EXIT();
3091         return;
3092 }
3093
3094 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3095 {
3096         TRACE_ENTRY();
3097
3098         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3099                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3100                 atomic_read(&scst_cmd_count));
3101
3102         scst_done_cmd_mgmt(cmd);
3103
3104         smp_rmb();
3105         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3106                 if (cmd->completed) {
3107                         /* It's completed and it's OK to return its result */
3108                         goto out;
3109                 }
3110
3111                 if (cmd->dev->tas) {
3112                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3113                                 "(tag %llu), returning TASK ABORTED ", cmd,
3114                                 (long long unsigned int)cmd->tag);
3115                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3116                 } else {
3117                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3118                                 "(tag %llu), aborting without delivery or "
3119                                 "notification",
3120                                 cmd, (long long unsigned int)cmd->tag);
3121                         /*
3122                          * There is no need to check/requeue possible UA,
3123                          * because, if it exists, it will be delivered
3124                          * by the "completed" branch above.
3125                          */
3126                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3127                 }
3128         }
3129
3130 out:
3131         TRACE_EXIT();
3132         return;
3133 }
3134
3135 void __init scst_scsi_op_list_init(void)
3136 {
3137         int i;
3138         uint8_t op = 0xff;
3139
3140         TRACE_ENTRY();
3141
3142         for (i = 0; i < 256; i++)
3143                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3144
3145         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3146                 if (scst_scsi_op_table[i].ops != op) {
3147                         op = scst_scsi_op_table[i].ops;
3148                         scst_scsi_op_list[op] = i;
3149                 }
3150         }
3151
3152         TRACE_EXIT();
3153         return;
3154 }
3155
3156 #ifdef DEBUG
3157 /* Original taken from the XFS code */
3158 unsigned long scst_random(void)
3159 {
3160         static int Inited;
3161         static unsigned long RandomValue;
3162         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
3163         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3164         register long rv;
3165         register long lo;
3166         register long hi;
3167         unsigned long flags;
3168
3169         spin_lock_irqsave(&lock, flags);
3170         if (!Inited) {
3171                 RandomValue = jiffies;
3172                 Inited = 1;
3173         }
3174         rv = RandomValue;
3175         hi = rv / 127773;
3176         lo = rv % 127773;
3177         rv = 16807 * lo - 2836 * hi;
3178         if (rv <= 0)
3179                 rv += 2147483647;
3180         RandomValue = rv;
3181         spin_unlock_irqrestore(&lock, flags);
3182         return rv;
3183 }
3184 #endif
3185
3186 #ifdef DEBUG_TM
3187
3188 #define TM_DBG_STATE_ABORT              0
3189 #define TM_DBG_STATE_RESET              1
3190 #define TM_DBG_STATE_OFFLINE            2
3191
3192 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3193
3194 static void tm_dbg_timer_fn(unsigned long arg);
3195
3196 static spinlock_t scst_tm_dbg_lock = SPIN_LOCK_UNLOCKED;
3197 /* All serialized by scst_tm_dbg_lock */
3198 struct {
3199         unsigned int tm_dbg_release:1;
3200         unsigned int tm_dbg_blocked:1;
3201 } tm_dbg_flags;
3202 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3203 static int tm_dbg_delayed_cmds_count;
3204 static int tm_dbg_passed_cmds_count;
3205 static int tm_dbg_state;
3206 static int tm_dbg_on_state_passes;
3207 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3208 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3209
3210 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3211
3212 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3213         struct scst_acg_dev *acg_dev)
3214 {
3215         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3216                 unsigned long flags;
3217                 /* Do TM debugging only for LUN 0 */
3218                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3219                 tm_dbg_p_cmd_list_waitQ =
3220                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3221                 tm_dbg_state = INIT_TM_DBG_STATE;
3222                 tm_dbg_on_state_passes =
3223                         tm_dbg_on_state_num_passes[tm_dbg_state];
3224                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3225                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3226                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3227                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3228         }
3229 }
3230
3231 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3232 {
3233         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3234                 unsigned long flags;
3235                 del_timer_sync(&tm_dbg_timer);
3236                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3237                 tm_dbg_p_cmd_list_waitQ = NULL;
3238                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3239         }
3240 }
3241
3242 static void tm_dbg_timer_fn(unsigned long arg)
3243 {
3244         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3245         tm_dbg_flags.tm_dbg_release = 1;
3246         smp_wmb();
3247         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3248 }
3249
3250 /* Called under scst_tm_dbg_lock and IRQs off */
3251 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3252 {
3253         switch (tm_dbg_state) {
3254         case TM_DBG_STATE_ABORT:
3255                 if (tm_dbg_delayed_cmds_count == 0) {
3256                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3257                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu) "
3258                                 "for %ld.%ld seconds (%ld HZ), "
3259                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3260                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3261                         mod_timer(&tm_dbg_timer, jiffies + d);
3262 #if 0
3263                         tm_dbg_flags.tm_dbg_blocked = 1;
3264 #endif
3265                 } else {
3266                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3267                                 "(tag %llu), delayed_cmds_count=%d, "
3268                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3269                                 tm_dbg_delayed_cmds_count,
3270                                 tm_dbg_on_state_passes);
3271                         if (tm_dbg_delayed_cmds_count == 2)
3272                                 tm_dbg_flags.tm_dbg_blocked = 0;
3273                 }
3274                 break;
3275
3276         case TM_DBG_STATE_RESET:
3277         case TM_DBG_STATE_OFFLINE:
3278                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3279                         "(tag %llu), delayed_cmds_count=%d, "
3280                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3281                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3282                 tm_dbg_flags.tm_dbg_blocked = 1;
3283                 break;
3284
3285         default:
3286                 sBUG();
3287         }
3288         /* IRQs already off */
3289         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3290         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3291         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3292         cmd->tm_dbg_delayed = 1;
3293         tm_dbg_delayed_cmds_count++;
3294         return;
3295 }
3296
3297 /* No locks */
3298 void tm_dbg_check_released_cmds(void)
3299 {
3300         if (tm_dbg_flags.tm_dbg_release) {
3301                 struct scst_cmd *cmd, *tc;
3302                 spin_lock_irq(&scst_tm_dbg_lock);
3303                 list_for_each_entry_safe_reverse(cmd, tc,
3304                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3305                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3306                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3307                                 tm_dbg_delayed_cmds_count);
3308                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3309                         list_move(&cmd->cmd_list_entry,
3310                                 &cmd->cmd_lists->active_cmd_list);
3311                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3312                 }
3313                 tm_dbg_flags.tm_dbg_release = 0;
3314                 spin_unlock_irq(&scst_tm_dbg_lock);
3315         }
3316 }
3317
3318 /* Called under scst_tm_dbg_lock */
3319 static void tm_dbg_change_state(void)
3320 {
3321         tm_dbg_flags.tm_dbg_blocked = 0;
3322         if (--tm_dbg_on_state_passes == 0) {
3323                 switch (tm_dbg_state) {
3324                 case TM_DBG_STATE_ABORT:
3325                         TRACE_MGMT_DBG("%s", "Changing "
3326                             "tm_dbg_state to RESET");
3327                         tm_dbg_state =
3328                                 TM_DBG_STATE_RESET;
3329                         tm_dbg_flags.tm_dbg_blocked = 0;
3330                         break;
3331                 case TM_DBG_STATE_RESET:
3332                 case TM_DBG_STATE_OFFLINE:
3333                         if (TM_DBG_GO_OFFLINE) {
3334                             TRACE_MGMT_DBG("%s", "Changing "
3335                                     "tm_dbg_state to OFFLINE");
3336                             tm_dbg_state =
3337                                 TM_DBG_STATE_OFFLINE;
3338                         } else {
3339                             TRACE_MGMT_DBG("%s", "Changing "
3340                                     "tm_dbg_state to ABORT");
3341                             tm_dbg_state =
3342                                 TM_DBG_STATE_ABORT;
3343                         }
3344                         break;
3345                 default:
3346                         sBUG();
3347                 }
3348                 tm_dbg_on_state_passes =
3349                     tm_dbg_on_state_num_passes[tm_dbg_state];
3350         }
3351
3352         TRACE_MGMT_DBG("%s", "Deleting timer");
3353         del_timer(&tm_dbg_timer);
3354 }
3355
3356 /* No locks */
3357 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3358 {
3359         int res = 0;
3360         unsigned long flags;
3361
3362         if (cmd->tm_dbg_immut)
3363                 goto out;
3364
3365         if (cmd->tm_dbg_delayed) {
3366                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3367                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3368                         "delayed_cmds_count=%d", cmd, cmd->tag,
3369                         tm_dbg_delayed_cmds_count);
3370
3371                 cmd->tm_dbg_immut = 1;
3372                 tm_dbg_delayed_cmds_count--;
3373                 if ((tm_dbg_delayed_cmds_count == 0) &&
3374                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3375                         tm_dbg_change_state();
3376                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3377         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3378                                         &cmd->tgt_dev->tgt_dev_flags)) {
3379                 /* Delay 50th command */
3380                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3381                 if (tm_dbg_flags.tm_dbg_blocked ||
3382                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3383                         tm_dbg_delay_cmd(cmd);
3384                         res = 1;
3385                 } else
3386                         cmd->tm_dbg_immut = 1;
3387                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3388         }
3389
3390 out:
3391         return res;
3392 }
3393
3394 /* No locks */
3395 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3396 {
3397         struct scst_cmd *c;
3398         unsigned long flags;
3399
3400         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3401         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3402                                 cmd_list_entry) {
3403                 if (c == cmd) {
3404                         TRACE_MGMT_DBG("Abort request for "
3405                                 "delayed cmd %p (tag=%llu), moving it to "
3406                                 "active cmd list (delayed_cmds_count=%d)",
3407                                 c, c->tag, tm_dbg_delayed_cmds_count);
3408
3409                         if (!test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3410                                 /* Test how completed commands handled */
3411                                 if (((scst_random() % 10) == 5)) {
3412                                         scst_set_cmd_error(cmd,
3413                                            SCST_LOAD_SENSE(scst_sense_hardw_error));
3414                                         /* It's completed now */
3415                                 }
3416                         }
3417
3418                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3419                         list_move(&c->cmd_list_entry,
3420                                 &c->cmd_lists->active_cmd_list);
3421                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3422                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3423                         break;
3424                 }
3425         }
3426         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3427 }
3428
3429 /* Might be called under scst_mutex */
3430 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3431 {
3432         unsigned long flags;
3433
3434         if (dev != NULL) {
3435                 struct scst_tgt_dev *tgt_dev;
3436                 bool found = 0;
3437
3438                 spin_lock_bh(&dev->dev_lock);
3439                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3440                                             dev_tgt_dev_list_entry) {
3441                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3442                                         &tgt_dev->tgt_dev_flags)) {
3443                                 found = 1;
3444                                 break;
3445                         }
3446                 }
3447                 spin_unlock_bh(&dev->dev_lock);
3448
3449                 if (!found)
3450                         goto out;
3451         }
3452
3453         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3454         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3455                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3456                         tm_dbg_delayed_cmds_count);
3457                 tm_dbg_change_state();
3458                 tm_dbg_flags.tm_dbg_release = 1;
3459                 smp_wmb();
3460                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3461                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3462         } else {
3463                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3464         }
3465         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3466
3467 out:
3468         return;
3469 }
3470
3471 int tm_dbg_is_release(void)
3472 {
3473         return tm_dbg_flags.tm_dbg_release;
3474 }
3475 #endif /* DEBUG_TM */
3476
3477 #ifdef DEBUG_SN
3478 void scst_check_debug_sn(struct scst_cmd *cmd)
3479 {
3480         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
3481         static int type;
3482         static int cnt;
3483         unsigned long flags;
3484         int old = cmd->queue_type;
3485
3486         spin_lock_irqsave(&lock, flags);
3487
3488         if (cnt == 0) {
3489                 if ((scst_random() % 1000) == 500) {
3490                         if ((scst_random() % 3) == 1)
3491                                 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3492                         else
3493                                 type = SCST_CMD_QUEUE_ORDERED;
3494                         do {
3495                                 cnt = scst_random() % 10;
3496                         } while (cnt == 0);
3497                 } else
3498                         goto out_unlock;
3499         }
3500
3501         cmd->queue_type = type;
3502         cnt--;
3503
3504         if (((scst_random() % 1000) == 750))
3505                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3506         else if (((scst_random() % 1000) == 751))
3507                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3508         else if (((scst_random() % 1000) == 752))
3509                 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3510
3511         TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3512                 cmd->queue_type, cnt);
3513
3514 out_unlock:
3515         spin_unlock_irqrestore(&lock, flags);
3516         return;
3517 }
3518 #endif /* DEBUG_SN */