147f913a6ed2985adb0ff2a7f52b1b34d767f132
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *  
4  *  Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/kthread.h>
26 #include <linux/cdrom.h>
27 #include <asm/unistd.h>
28 #include <asm/string.h>
29
30 #ifdef SCST_HIGHMEM
31 #include <linux/highmem.h>
32 #endif
33
34 #include "scsi_tgt.h"
35 #include "scst_priv.h"
36 #include "scst_mem.h"
37
38 #include "scst_cdbprobe.h"
39
40 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
41 static void scst_check_internal_sense(struct scst_device *dev, int result,
42         uint8_t *sense, int sense_len);
43
44 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
45 {
46         int res = 0;
47         unsigned long gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
48
49         TRACE_ENTRY();
50
51         sBUG_ON(cmd->sense != NULL);
52
53         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
54         if (cmd->sense == NULL) {
55                 PRINT_ERROR("FATAL!!! Sense memory allocation failed (op %x). "
56                         "The sense data will be lost!!", cmd->cdb[0]);
57                 res = -ENOMEM;
58                 goto out;
59         }
60
61         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
62
63 out:
64         TRACE_EXIT_RES(res);
65         return res;
66 }
67
68 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
69         const uint8_t *sense, unsigned int len)
70 {
71         int res;
72
73         TRACE_ENTRY();
74
75         res = scst_alloc_sense(cmd, atomic);
76         if (res != 0) {
77                 PRINT_BUFFER("Lost sense", sense, len);
78                 goto out;
79         }
80
81         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
82         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
83
84 out:
85         TRACE_EXIT_RES(res);
86         return res;
87 }
88
89 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
90 {
91         TRACE_ENTRY();
92
93         cmd->status = status;
94         cmd->host_status = DID_OK;
95
96         cmd->data_direction = SCST_DATA_NONE;
97         cmd->tgt_resp_flags = SCST_TSC_FLAG_STATUS;
98         cmd->resp_data_len = 0;
99
100         cmd->completed = 1;
101
102         TRACE_EXIT();
103         return;
104 }
105
106 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
107 {
108         int rc;
109
110         TRACE_ENTRY();
111
112         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
113
114         rc = scst_alloc_sense(cmd, 1);
115         if (rc != 0) {
116                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
117                         key, asc, ascq);
118                 goto out;
119         }
120
121         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
122         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
123
124 out:
125         TRACE_EXIT();
126         return;
127 }
128
129 void scst_set_sense(uint8_t *buffer, int len, int key,
130         int asc, int ascq)
131 {
132         memset(buffer, 0, len);
133         buffer[0] = 0x70;       /* Error Code                   */
134         buffer[2] = key;        /* Sense Key                    */
135         buffer[7] = 0x0a;       /* Additional Sense Length      */
136         buffer[12] = asc;       /* ASC                          */
137         buffer[13] = ascq;      /* ASCQ                         */
138         TRACE_BUFFER("Sense set", buffer, len);
139         return;
140 }
141
142 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense, 
143         unsigned int len)
144 {
145         TRACE_ENTRY();
146
147         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
148         scst_alloc_set_sense(cmd, 1, sense, len);
149
150         TRACE_EXIT();
151         return;
152 }
153
154 void scst_set_busy(struct scst_cmd *cmd)
155 {
156         int c = atomic_read(&cmd->sess->sess_cmd_count);
157
158         TRACE_ENTRY();
159
160         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
161                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
162                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
163                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
164                         cmd->sess->initiator_name, c,
165                         cmd->queue_type, cmd->sess->init_phase);
166         } else {
167                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
168                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
169                         "initiator %s (cmds count %d, queue_type %x, "
170                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
171                         cmd->queue_type, cmd->sess->init_phase);
172         }
173
174         TRACE_EXIT();
175         return;
176 }
177
178 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
179 {
180         int i, l;
181
182         TRACE_ENTRY();
183
184         scst_check_restore_sg_buff(cmd);
185         cmd->resp_data_len = resp_data_len;
186
187         if (resp_data_len == cmd->bufflen)
188                 goto out;
189
190         l = 0;
191         for(i = 0; i < cmd->sg_cnt; i++) {
192                 l += cmd->sg[i].length;
193                 if (l >= resp_data_len) {
194                         int left = resp_data_len - (l - cmd->sg[i].length);
195 #ifdef DEBUG
196                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
197                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
198                                 "left %d", cmd, cmd->tag, resp_data_len, i,
199                                 cmd->sg[i].length, left);
200 #endif
201                         cmd->orig_sg_cnt = cmd->sg_cnt;
202                         cmd->orig_sg_entry = i;
203                         cmd->orig_entry_len = cmd->sg[i].length;
204                         cmd->sg_cnt = (left > 0) ? i+1 : i;
205                         cmd->sg[i].length = left;
206                         cmd->sg_buff_modified = 1;
207                         break;
208                 }
209         }
210
211 out:
212         TRACE_EXIT();
213         return;
214 }
215
216 /* Called under scst_mutex and suspended activity */
217 int scst_alloc_device(int gfp_mask, struct scst_device **out_dev)
218 {
219         struct scst_device *dev;
220         int res = 0;
221         static int dev_num; /* protected by scst_mutex */
222
223         TRACE_ENTRY();
224
225         dev = kzalloc(sizeof(*dev), gfp_mask);
226         if (dev == NULL) {
227                 TRACE(TRACE_OUT_OF_MEM, "%s",
228                         "Allocation of scst_device failed");
229                 res = -ENOMEM;
230                 goto out;
231         }
232
233         dev->handler = &scst_null_devtype;
234         dev->p_cmd_lists = &scst_main_cmd_lists;
235         atomic_set(&dev->dev_cmd_count, 0);
236         spin_lock_init(&dev->dev_lock);
237         atomic_set(&dev->on_dev_count, 0);
238         INIT_LIST_HEAD(&dev->blocked_cmd_list);
239         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
240         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
241         INIT_LIST_HEAD(&dev->threads_list);
242         init_waitqueue_head(&dev->on_dev_waitQ);
243         dev->dev_double_ua_possible = 1;
244         dev->dev_serialized = 1;
245         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
246         dev->dev_num = dev_num++;
247
248         *out_dev = dev;
249
250 out:
251         TRACE_EXIT_RES(res);
252         return res;
253 }
254
255 /* Called under scst_mutex and suspended activity */
256 void scst_free_device(struct scst_device *dev)
257 {
258         TRACE_ENTRY();
259
260 #ifdef EXTRACHECKS
261         if (!list_empty(&dev->dev_tgt_dev_list) || 
262             !list_empty(&dev->dev_acg_dev_list)) {
263                 PRINT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
264                         "is not empty!", __FUNCTION__);
265                 sBUG();
266         }
267 #endif
268
269         kfree(dev);
270
271         TRACE_EXIT();
272         return;
273 }
274
275 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
276         struct scst_device *dev, lun_t lun)
277 {
278         struct scst_acg_dev *res;
279
280         TRACE_ENTRY();
281
282 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
283         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
284 #else
285         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
286 #endif
287         if (res == NULL) {
288                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
289                 goto out;
290         }
291 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
292         memset(res, 0, sizeof(*res));
293 #endif
294
295         res->dev = dev;
296         res->acg = acg;
297         res->lun = lun;
298         
299 out:
300         TRACE_EXIT_HRES(res);
301         return res;
302 }
303
304 /* The activity supposed to be suspended and scst_mutex held */
305 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
306 {
307         TRACE_ENTRY();
308         
309         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list", 
310                 acg_dev);
311         list_del(&acg_dev->acg_dev_list_entry);
312         list_del(&acg_dev->dev_acg_dev_list_entry);
313         
314         kmem_cache_free(scst_acgd_cachep, acg_dev);
315         
316         TRACE_EXIT();
317         return;
318 }
319
320 /* The activity supposed to be suspended and scst_mutex held */
321 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
322 {
323         struct scst_acg *acg;
324
325         TRACE_ENTRY();
326
327         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
328         if (acg == NULL) {
329                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
330                 goto out;
331         }
332
333         INIT_LIST_HEAD(&acg->acg_dev_list);
334         INIT_LIST_HEAD(&acg->acg_sess_list);
335         INIT_LIST_HEAD(&acg->acn_list);
336         acg->acg_name = acg_name;
337         
338         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
339         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
340         
341 out:
342         TRACE_EXIT_HRES(acg);
343         return acg;
344 }
345
346 /* The activity supposed to be suspended and scst_mutex held */
347 int scst_destroy_acg(struct scst_acg *acg)
348 {
349         struct scst_acn *n, *nn;
350         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
351         int res = 0;
352
353         TRACE_ENTRY();
354
355         if (!list_empty(&acg->acg_sess_list)) {
356                 PRINT_ERROR("%s: acg_sess_list is not empty!", __FUNCTION__);
357                 res = -EBUSY;
358                 goto out;
359         }
360
361         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
362         list_del(&acg->scst_acg_list_entry);
363         
364         /* Freeing acg_devs */
365         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list, 
366                         acg_dev_list_entry) {
367                 struct scst_tgt_dev *tgt_dev, *tt;
368                 list_for_each_entry_safe(tgt_dev, tt,
369                                  &acg_dev->dev->dev_tgt_dev_list,
370                                  dev_tgt_dev_list_entry) {
371                         if (tgt_dev->acg_dev == acg_dev)
372                                 scst_free_tgt_dev(tgt_dev);
373                 }
374                 scst_free_acg_dev(acg_dev);
375         }
376
377         /* Freeing names */
378         list_for_each_entry_safe(n, nn, &acg->acn_list, 
379                         acn_list_entry) {
380                 list_del(&n->acn_list_entry);
381                 kfree(n->name);
382                 kfree(n);
383         }
384         INIT_LIST_HEAD(&acg->acn_list);
385
386         kfree(acg);
387 out:
388         TRACE_EXIT_RES(res);
389         return res;
390 }
391
392 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
393 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
394         struct scst_acg_dev *acg_dev)
395 {
396         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
397         struct scst_tgt_dev *tgt_dev;
398         struct scst_device *dev = acg_dev->dev;
399         struct list_head *sess_tgt_dev_list_head;
400         struct scst_tgt_template *vtt = sess->tgt->tgtt;
401         int rc, i;
402
403         TRACE_ENTRY();
404
405 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
406         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
407 #else
408         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
409 #endif
410         if (tgt_dev == NULL) {
411                 TRACE(TRACE_OUT_OF_MEM, "%s",
412                       "Allocation of scst_tgt_dev failed");
413                 goto out;
414         }
415 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
416         memset(tgt_dev, 0, sizeof(*tgt_dev));
417 #endif
418
419         tgt_dev->dev = dev;
420         tgt_dev->lun = acg_dev->lun;
421         tgt_dev->acg_dev = acg_dev;
422         tgt_dev->sess = sess;
423         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
424         
425         scst_sgv_pool_use_norm(tgt_dev);
426
427         if (dev->scsi_dev != NULL) {
428                 ini_sg = dev->scsi_dev->host->sg_tablesize;
429                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
430                 ini_use_clustering = (dev->scsi_dev->host->use_clustering == 
431                                 ENABLE_CLUSTERING);
432         } else {
433                 ini_sg = (1 << 15) /* infinite */;
434                 ini_unchecked_isa_dma = 0;
435                 ini_use_clustering = 0;
436         }
437         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
438
439         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) && 
440             !sess->tgt->tgtt->no_clustering) {
441                 scst_sgv_pool_use_norm_clust(tgt_dev); 
442         }
443
444         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
445                 scst_sgv_pool_use_dma(tgt_dev);
446         } else {
447 #ifdef SCST_HIGHMEM
448                 scst_sgv_pool_use_highmem(tgt_dev);
449 #endif
450         }
451
452         if (dev->scsi_dev != NULL) {
453                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
454                       "SCST lun=%Ld", dev->scsi_dev->host->host_no, 
455                       dev->scsi_dev->channel, dev->scsi_dev->id, 
456                       dev->scsi_dev->lun, (uint64_t)tgt_dev->lun);
457         }
458         else {
459                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%Ld", 
460                         dev->virt_name, (uint64_t)tgt_dev->lun);
461         }
462
463         spin_lock_init(&tgt_dev->tgt_dev_lock);
464         INIT_LIST_HEAD(&tgt_dev->UA_list);
465         spin_lock_init(&tgt_dev->thr_data_lock);
466         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
467         spin_lock_init(&tgt_dev->sn_lock);
468         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
469         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
470         tgt_dev->expected_sn = 1;
471         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
472         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
473         for(i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
474                 atomic_set(&tgt_dev->sn_slots[i], 0);
475
476         if (dev->handler->parse_atomic && 
477             sess->tgt->tgtt->preprocessing_done_atomic) {
478                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
479                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
480                                 &tgt_dev->tgt_dev_flags);
481                 if (dev->handler->exec_atomic)
482                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
483                                 &tgt_dev->tgt_dev_flags);
484         }
485         if (dev->handler->exec_atomic) {
486                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
487                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
488                                 &tgt_dev->tgt_dev_flags);
489                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
490                                 &tgt_dev->tgt_dev_flags);
491                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
492                         &tgt_dev->tgt_dev_flags);
493         }
494         if (dev->handler->dev_done_atomic && 
495             sess->tgt->tgtt->xmit_response_atomic) {
496                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
497                         &tgt_dev->tgt_dev_flags);
498         }
499
500         spin_lock_bh(&scst_temp_UA_lock);
501         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
502                 SCST_LOAD_SENSE(scst_sense_reset_UA));
503         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
504         spin_unlock_bh(&scst_temp_UA_lock);
505
506         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
507
508         if (vtt->threads_num > 0) {
509                 rc = 0;
510                 if (dev->handler->threads_num > 0)
511                         rc = scst_add_dev_threads(dev, vtt->threads_num);
512                 else if (dev->handler->threads_num == 0)
513                         rc = scst_add_cmd_threads(vtt->threads_num);
514                 if (rc != 0)
515                         goto out_free;
516         }
517
518         if (dev->handler && dev->handler->attach_tgt) {
519                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
520                       tgt_dev);
521                 rc = dev->handler->attach_tgt(tgt_dev);
522                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
523                 if (rc != 0) {
524                         PRINT_ERROR("Device handler's %s attach_tgt() "
525                             "failed: %d", dev->handler->name, rc);
526                         goto out_thr_free;
527                 }
528         }
529
530         spin_lock_bh(&dev->dev_lock);   
531         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
532         if (dev->dev_reserved)
533                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
534         spin_unlock_bh(&dev->dev_lock);
535
536         sess_tgt_dev_list_head = 
537                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
538         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, sess_tgt_dev_list_head);
539
540 out:
541         TRACE_EXIT();
542         return tgt_dev;
543
544 out_thr_free:
545         if (vtt->threads_num > 0) {
546                 if (dev->handler->threads_num > 0)
547                         scst_del_dev_threads(dev, vtt->threads_num);
548                 else if (dev->handler->threads_num == 0)
549                         scst_del_cmd_threads(vtt->threads_num);
550         }
551
552 out_free:
553         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
554         tgt_dev = NULL;
555         goto out;
556 }
557
558 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
559
560 /* No locks supposed to be held, scst_mutex - held */
561 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
562 {
563         TRACE_ENTRY();
564
565         scst_clear_reservation(tgt_dev);
566
567         /* With activity suspended the lock isn't needed, but let's be safe */
568         spin_lock_bh(&tgt_dev->tgt_dev_lock);
569         scst_free_all_UA(tgt_dev);
570         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
571
572         spin_lock_bh(&scst_temp_UA_lock);
573         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
574                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
575         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
576         spin_unlock_bh(&scst_temp_UA_lock);
577
578         TRACE_EXIT();
579         return;
580 }
581
582 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
583 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
584 {
585         struct scst_device *dev = tgt_dev->dev;
586         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
587
588         TRACE_ENTRY();
589
590         tm_dbg_deinit_tgt_dev(tgt_dev);
591
592         spin_lock_bh(&dev->dev_lock);
593         list_del(&tgt_dev->dev_tgt_dev_list_entry);
594         spin_unlock_bh(&dev->dev_lock);
595
596         list_del(&tgt_dev->sess_tgt_dev_list_entry);
597
598         scst_clear_reservation(tgt_dev);
599         scst_free_all_UA(tgt_dev);
600
601         if (dev->handler && dev->handler->detach_tgt) {
602                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
603                       tgt_dev);
604                 dev->handler->detach_tgt(tgt_dev);
605                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
606         }
607
608         if (vtt->threads_num > 0) {
609                 if (dev->handler->threads_num > 0)
610                         scst_del_dev_threads(dev, vtt->threads_num);
611                 else if (dev->handler->threads_num == 0)
612                         scst_del_cmd_threads(vtt->threads_num);
613         }
614
615         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
616
617         TRACE_EXIT();
618         return;
619 }
620
621 /* scst_mutex supposed to be held */
622 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
623 {
624         int res = 0;
625         struct scst_acg_dev *acg_dev;
626         struct scst_tgt_dev *tgt_dev;
627
628         TRACE_ENTRY();
629
630         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list, 
631                         acg_dev_list_entry) {
632                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
633                 if (tgt_dev == NULL) {
634                         res = -ENOMEM;
635                         goto out_free;
636                 }
637         }
638
639 out:
640         TRACE_EXIT();
641         return res;
642
643 out_free:
644         scst_sess_free_tgt_devs(sess);
645         goto out;
646 }
647
648 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
649 void scst_sess_free_tgt_devs(struct scst_session *sess)
650 {
651         int i;
652         struct scst_tgt_dev *tgt_dev, *t;
653
654         TRACE_ENTRY();
655         
656         /* The session is going down, no users, so no locks */
657         for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
658                 struct list_head *sess_tgt_dev_list_head =
659                         &sess->sess_tgt_dev_list_hash[i];
660                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
661                                 sess_tgt_dev_list_entry) {
662                         scst_free_tgt_dev(tgt_dev);
663                 }
664                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
665         }
666
667         TRACE_EXIT();
668         return;
669 }
670
671 /* The activity supposed to be suspended and scst_mutex held */
672 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
673         int read_only)
674 {
675         int res = 0;
676         struct scst_acg_dev *acg_dev;
677         struct scst_tgt_dev *tgt_dev;
678         struct scst_session *sess;
679         LIST_HEAD(tmp_tgt_dev_list);
680         
681         TRACE_ENTRY();
682         
683         INIT_LIST_HEAD(&tmp_tgt_dev_list);
684         
685 #ifdef EXTRACHECKS
686         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
687                 if (acg_dev->dev == dev) {
688                         PRINT_ERROR("Device is already in group %s", 
689                                 acg->acg_name);
690                         res = -EINVAL;
691                         goto out;
692                 }
693         }
694 #endif
695         
696         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
697         if (acg_dev == NULL) {
698                 res = -ENOMEM;
699                 goto out;
700         }
701         acg_dev->rd_only_flag = read_only;
702
703         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list", 
704                 acg_dev);
705         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
706         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
707         
708         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) 
709         {
710                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
711                 if (tgt_dev == NULL) {
712                         res = -ENOMEM;
713                         goto out_free;
714                 }
715                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
716                               &tmp_tgt_dev_list);
717         }
718
719 out:
720         if (res == 0) {
721                 if (dev->virt_name != NULL) {
722                         PRINT_INFO("Added device %s to group %s",
723                                 dev->virt_name, acg->acg_name);
724                 } else {
725                         PRINT_INFO("Added device %d:%d:%d:%d to group %s",
726                                 dev->scsi_dev->host->host_no,
727                                 dev->scsi_dev->channel, dev->scsi_dev->id,
728                                 dev->scsi_dev->lun, acg->acg_name);
729                 }
730         }
731
732         TRACE_EXIT_RES(res);
733         return res;
734
735 out_free:
736         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
737                          extra_tgt_dev_list_entry) {
738                 scst_free_tgt_dev(tgt_dev);
739         }
740         scst_free_acg_dev(acg_dev);
741         goto out;
742 }
743
744 /* The activity supposed to be suspended and scst_mutex held */
745 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
746 {
747         int res = 0;
748         struct scst_acg_dev *acg_dev = NULL, *a;
749         struct scst_tgt_dev *tgt_dev, *tt;
750         
751         TRACE_ENTRY();
752         
753         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
754                 if (a->dev == dev) {
755                         acg_dev = a;
756                         break;
757                 }
758         }
759         
760         if (acg_dev == NULL) {
761                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
762                 res = -EINVAL;
763                 goto out;
764         }
765
766         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
767                          dev_tgt_dev_list_entry) {
768                 if (tgt_dev->acg_dev == acg_dev)
769                         scst_free_tgt_dev(tgt_dev);
770         }
771         scst_free_acg_dev(acg_dev);
772
773 out:
774         if (res == 0) {
775                 if (dev->virt_name != NULL) {
776                         PRINT_INFO("Removed device %s from group %s",
777                                 dev->virt_name, acg->acg_name);
778                 } else {
779                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
780                                 dev->scsi_dev->host->host_no,
781                                 dev->scsi_dev->channel, dev->scsi_dev->id,
782                                 dev->scsi_dev->lun, acg->acg_name);
783                 }
784         }
785
786         TRACE_EXIT_RES(res);
787         return res;
788 }
789
790 /* scst_mutex supposed to be held */
791 int scst_acg_add_name(struct scst_acg *acg, const char *name)
792 {
793         int res = 0;
794         struct scst_acn *n;
795         int len;
796         char *nm;
797         
798         TRACE_ENTRY();
799
800         list_for_each_entry(n, &acg->acn_list, acn_list_entry) 
801         {
802                 if (strcmp(n->name, name) == 0) {
803                         PRINT_ERROR("Name %s already exists in group %s",
804                                 name, acg->acg_name);
805                         res = -EINVAL;
806                         goto out;
807                 }
808         }
809         
810         n = kmalloc(sizeof(*n), GFP_KERNEL);
811         if (n == NULL) {
812                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
813                 res = -ENOMEM;
814                 goto out;
815         }
816         
817         len = strlen(name);
818         nm = kmalloc(len + 1, GFP_KERNEL);
819         if (nm == NULL) {
820                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
821                 res = -ENOMEM;
822                 goto out_free;
823         }
824         
825         strcpy(nm, name);
826         n->name = nm;
827         
828         list_add_tail(&n->acn_list_entry, &acg->acn_list);
829
830 out:
831         if (res == 0) {
832                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
833         }
834
835         TRACE_EXIT_RES(res);
836         return res;
837
838 out_free:
839         kfree(n);
840         goto out;
841 }
842
843 /* scst_mutex supposed to be held */
844 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
845 {
846         int res = -EINVAL;
847         struct scst_acn *n;
848         
849         TRACE_ENTRY();
850         
851         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
852         {
853                 if (strcmp(n->name, name) == 0) {
854                         list_del(&n->acn_list_entry);
855                         kfree(n->name);
856                         kfree(n);
857                         res = 0;
858                         break;
859                 }
860         }
861         
862         if (res == 0) {
863                 PRINT_INFO("Removed name %s from group %s", name,
864                         acg->acg_name);
865         } else {
866                 PRINT_ERROR("Unable to find name %s in group %s", name,
867                         acg->acg_name);
868         }
869
870         TRACE_EXIT_RES(res);
871         return res;
872 }
873
874 struct scst_cmd *scst_create_prepare_internal_cmd(
875         struct scst_cmd *orig_cmd, int bufsize)
876 {
877         struct scst_cmd *res;
878         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
879
880         TRACE_ENTRY();
881
882         res = scst_alloc_cmd(gfp_mask);
883         if (res == NULL)
884                 goto out;
885
886         res->cmd_lists = orig_cmd->cmd_lists;
887         res->sess = orig_cmd->sess;
888         res->state = SCST_CMD_STATE_DEV_PARSE;
889         res->atomic = scst_cmd_atomic(orig_cmd);
890         res->internal = 1;
891         res->tgtt = orig_cmd->tgtt;
892         res->tgt = orig_cmd->tgt;
893         res->dev = orig_cmd->dev;
894         res->tgt_dev = orig_cmd->tgt_dev;
895         res->lun = orig_cmd->lun;
896         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
897         res->data_direction = SCST_DATA_UNKNOWN;
898         res->orig_cmd = orig_cmd;
899
900         res->bufflen = bufsize;
901
902 out:
903         TRACE_EXIT_HRES((unsigned long)res);
904         return res;
905 }
906
907 void scst_free_internal_cmd(struct scst_cmd *cmd)
908 {
909         TRACE_ENTRY();
910
911         __scst_cmd_put(cmd);
912
913         TRACE_EXIT();
914         return;
915 }
916
917 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
918 {
919         int res = SCST_CMD_STATE_RES_CONT_NEXT;
920 #define sbuf_size 252
921         static const uint8_t request_sense[6] =
922             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
923         struct scst_cmd *rs_cmd;
924
925         TRACE_ENTRY();
926
927         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
928         if (rs_cmd == NULL)
929                 goto out_error;
930
931         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
932         rs_cmd->cdb_len = sizeof(request_sense);
933         rs_cmd->data_direction = SCST_DATA_READ;
934
935         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
936                 "cmd list ", rs_cmd);
937         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
938         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
939         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
940
941 out:
942         TRACE_EXIT_RES(res);
943         return res;
944
945 out_error:
946         res = -1;
947         goto out;
948 #undef sbuf_size
949 }
950
951 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
952 {
953         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
954         uint8_t *buf;
955         int len;
956
957         TRACE_ENTRY();
958
959         if (req_cmd->dev->handler->dev_done != NULL) {
960                 int rc;
961                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
962                       req_cmd->dev->handler->name, req_cmd);
963                 rc = req_cmd->dev->handler->dev_done(req_cmd);
964                 TRACE_DBG("Dev handler %s dev_done() returned %d",
965                       req_cmd->dev->handler->name, rc);
966         }
967
968         sBUG_ON(orig_cmd);
969
970         len = scst_get_buf_first(req_cmd, &buf);
971
972         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
973             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
974                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
975                         buf, len);
976                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
977                         len);
978         } else {
979                 PRINT_ERROR("%s", "Unable to get the sense via "
980                         "REQUEST SENSE, returning HARDWARE ERROR");
981                 scst_set_cmd_error(orig_cmd,
982                         SCST_LOAD_SENSE(scst_sense_hardw_error));
983         }
984
985         if (len > 0)
986                 scst_put_buf(req_cmd, buf);
987
988         scst_free_internal_cmd(req_cmd);
989
990         TRACE_EXIT_HRES((unsigned long)orig_cmd);
991         return orig_cmd;
992 }
993
994 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
995 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
996 {
997         struct scsi_request *req;
998
999         TRACE_ENTRY();
1000
1001         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1002                 if (req) {
1003                         if (req->sr_bufflen)
1004                                 kfree(req->sr_buffer);
1005                         scsi_release_request(req);
1006                 }
1007         }
1008
1009         TRACE_EXIT();
1010         return;
1011 }
1012
1013 static void scst_send_release(struct scst_tgt_dev *tgt_dev)
1014 {
1015         struct scsi_request *req;
1016         struct scsi_device *scsi_dev;
1017         uint8_t cdb[6];
1018
1019         TRACE_ENTRY();
1020         
1021         if (tgt_dev->dev->scsi_dev == NULL)
1022                 goto out;
1023
1024         scsi_dev = tgt_dev->dev->scsi_dev;
1025
1026         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1027         if (req == NULL) {
1028                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1029                             "to RELEASE device %d:%d:%d:%d",
1030                             scsi_dev->host->host_no, scsi_dev->channel,
1031                             scsi_dev->id, scsi_dev->lun);
1032                 goto out;
1033         }
1034
1035         memset(cdb, 0, sizeof(cdb));
1036         cdb[0] = RELEASE;
1037         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1038             ((scsi_dev->lun << 5) & 0xe0) : 0;
1039         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1040         req->sr_cmd_len = sizeof(cdb);
1041         req->sr_data_direction = SCST_DATA_NONE;
1042         req->sr_use_sg = 0;
1043         req->sr_bufflen = 0;
1044         req->sr_buffer = NULL;
1045         req->sr_request->rq_disk = tgt_dev->dev->rq_disk;
1046         req->sr_sense_buffer[0] = 0;
1047
1048         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1049                 "mid-level", req);
1050         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1051                     scst_req_done, SCST_DEFAULT_TIMEOUT, 3);
1052
1053 out:
1054         TRACE_EXIT();
1055         return;
1056 }
1057 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1058 static void scst_send_release(struct scst_tgt_dev *tgt_dev)
1059 {
1060         struct scsi_device *scsi_dev;
1061         unsigned char cdb[6];
1062         unsigned char *sense;
1063         int rc, i;
1064
1065         TRACE_ENTRY();
1066         
1067         if (tgt_dev->dev->scsi_dev == NULL)
1068                 goto out;
1069
1070         /* We can't afford missing RELEASE due to memory shortage */
1071         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1072
1073         scsi_dev = tgt_dev->dev->scsi_dev;
1074
1075         for(i = 0; i < 5; i++) {
1076                 memset(cdb, 0, sizeof(cdb));
1077                 cdb[0] = RELEASE;
1078                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1079                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1080
1081                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1082
1083                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1084                         "SCSI mid-level");
1085                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1086                                 sense, SCST_DEFAULT_TIMEOUT, 0, GFP_KERNEL);
1087                 TRACE_DBG("MODE_SENSE done: %x", rc);
1088
1089                 if (scsi_status_is_good(rc)) {
1090                         break;
1091                 } else {
1092                         PRINT_ERROR("RELEASE failed: %d", rc);
1093                         PRINT_BUFFER("RELEASE sense", sense,
1094                                 SCST_SENSE_BUFFERSIZE);
1095                         scst_check_internal_sense(tgt_dev->dev, rc,
1096                                         sense, SCST_SENSE_BUFFERSIZE);
1097                 }
1098         }
1099
1100         kfree(sense);
1101
1102 out:
1103         TRACE_EXIT();
1104         return;
1105 }
1106 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1107
1108 /* scst_mutex supposed to be held */
1109 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1110 {
1111         struct scst_device *dev = tgt_dev->dev;
1112         int release = 0;
1113
1114         TRACE_ENTRY();
1115
1116         spin_lock_bh(&dev->dev_lock);
1117         if (dev->dev_reserved &&
1118             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1119                 /* This is one who holds the reservation */
1120                 struct scst_tgt_dev *tgt_dev_tmp;
1121                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1122                                     dev_tgt_dev_list_entry) {
1123                         clear_bit(SCST_TGT_DEV_RESERVED,
1124                                     &tgt_dev_tmp->tgt_dev_flags);
1125                 }
1126                 dev->dev_reserved = 0;
1127         }
1128         spin_unlock_bh(&dev->dev_lock);
1129
1130         if (release)
1131                 scst_send_release(tgt_dev);
1132
1133         TRACE_EXIT();
1134         return;
1135 }
1136
1137 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
1138         const char *initiator_name)
1139 {
1140         struct scst_session *sess;
1141         int i;
1142         int len;
1143         char *nm;
1144
1145         TRACE_ENTRY();
1146
1147 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1148         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1149 #else
1150         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1151 #endif
1152         if (sess == NULL) {
1153                 TRACE(TRACE_OUT_OF_MEM, "%s",
1154                       "Allocation of scst_session failed");
1155                 goto out;
1156         }
1157 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1158         memset(sess, 0, sizeof(*sess));
1159 #endif
1160
1161         sess->init_phase = SCST_SESS_IPH_INITING;
1162         sess->shut_phase = SCST_SESS_SPH_READY;
1163         atomic_set(&sess->refcnt, 0);
1164         for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1165                 struct list_head *sess_tgt_dev_list_head =
1166                          &sess->sess_tgt_dev_list_hash[i];
1167                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1168         }
1169         spin_lock_init(&sess->sess_list_lock);
1170         INIT_LIST_HEAD(&sess->search_cmd_list);
1171         sess->tgt = tgt;
1172         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1173         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1174
1175 #ifdef MEASURE_LATENCY
1176         spin_lock_init(&sess->meas_lock);
1177 #endif
1178
1179         len = strlen(initiator_name);
1180         nm = kmalloc(len + 1, gfp_mask);
1181         if (nm == NULL) {
1182                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1183                 goto out_free;
1184         }
1185         
1186         strcpy(nm, initiator_name);
1187         sess->initiator_name = nm;
1188         
1189 out:
1190         TRACE_EXIT();
1191         return sess;
1192
1193 out_free:
1194         kmem_cache_free(scst_sess_cachep, sess);
1195         sess = NULL;
1196         goto out;
1197 }
1198
1199 void scst_free_session(struct scst_session *sess)
1200 {
1201         TRACE_ENTRY();
1202
1203         mutex_lock(&scst_mutex);
1204
1205         TRACE_DBG("Removing sess %p from the list", sess);
1206         list_del(&sess->sess_list_entry);
1207         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1208         list_del(&sess->acg_sess_list_entry);
1209
1210         scst_sess_free_tgt_devs(sess);
1211
1212         wake_up_all(&sess->tgt->unreg_waitQ);
1213
1214         mutex_unlock(&scst_mutex);
1215
1216         kfree(sess->initiator_name);
1217         kmem_cache_free(scst_sess_cachep, sess);
1218
1219         TRACE_EXIT();
1220         return;
1221 }
1222
1223 void scst_free_session_callback(struct scst_session *sess)
1224 {
1225         struct completion *c;
1226
1227         TRACE_ENTRY();
1228
1229         TRACE_DBG("Freeing session %p", sess);
1230
1231         c = sess->shutdown_compl;
1232
1233         if (sess->unreg_done_fn) {
1234                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1235                 sess->unreg_done_fn(sess);
1236                 TRACE_DBG("%s", "unreg_done_fn() returned");
1237         }
1238         scst_free_session(sess);
1239
1240         if (c)
1241                 complete_all(c);
1242
1243         TRACE_EXIT();
1244         return;
1245 }
1246
1247 void scst_sched_session_free(struct scst_session *sess)
1248 {
1249         unsigned long flags;
1250
1251         TRACE_ENTRY();
1252
1253         spin_lock_irqsave(&scst_mgmt_lock, flags);
1254         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1255         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1256         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1257         
1258         wake_up(&scst_mgmt_waitQ);
1259
1260         TRACE_EXIT();
1261         return;
1262 }
1263
1264 void scst_cmd_get(struct scst_cmd *cmd)
1265 {
1266         __scst_cmd_get(cmd);
1267 }
1268
1269 void scst_cmd_put(struct scst_cmd *cmd)
1270 {
1271         __scst_cmd_put(cmd);
1272 }
1273
1274 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
1275 {
1276         struct scst_cmd *cmd;
1277
1278         TRACE_ENTRY();
1279
1280 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1281         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1282 #else
1283         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1284 #endif
1285         if (cmd == NULL) {
1286                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1287                 goto out;
1288         }
1289 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1290         memset(cmd, 0, sizeof(*cmd));
1291 #endif
1292
1293         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1294         atomic_set(&cmd->cmd_ref, 1);
1295         cmd->cmd_lists = &scst_main_cmd_lists;
1296         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1297         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1298         cmd->retries = 0;
1299         cmd->data_len = -1;
1300         cmd->tgt_resp_flags = SCST_TSC_FLAG_STATUS;
1301         cmd->resp_data_len = -1;
1302
1303 out:
1304         TRACE_EXIT();
1305         return cmd;
1306 }
1307
1308 void scst_destroy_put_cmd(struct scst_cmd *cmd)
1309 {
1310         scst_sess_put(cmd->sess);
1311
1312         /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1313         if (likely(cmd->tgt_dev != NULL))
1314                 __scst_put();
1315
1316         scst_destroy_cmd(cmd);
1317         return;
1318 }
1319
1320 /* No locks supposed to be held */
1321 void scst_free_cmd(struct scst_cmd *cmd)
1322 {
1323         int destroy = 1;
1324
1325         TRACE_ENTRY();
1326
1327         TRACE_DBG("Freeing cmd %p (tag %Lu)", cmd, cmd->tag);
1328
1329         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1330                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1331                         cmd, atomic_read(&scst_cmd_count));
1332         }
1333
1334         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1335                 cmd->dec_on_dev_needed);
1336
1337 #if defined(EXTRACHECKS) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
1338         if (cmd->scsi_req) {
1339                 PRINT_ERROR("%s: %s", __FUNCTION__, "Cmd with unfreed "
1340                         "scsi_req!");
1341                 scst_release_request(cmd);
1342         }
1343 #endif
1344
1345         scst_check_restore_sg_buff(cmd);
1346
1347         if (unlikely(cmd->internal)) {
1348                 if (cmd->bufflen > 0)
1349                         scst_release_space(cmd);
1350                 scst_destroy_cmd(cmd);
1351                 goto out;
1352         }
1353
1354         if (cmd->tgtt->on_free_cmd != NULL) {
1355                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1356                 cmd->tgtt->on_free_cmd(cmd);
1357                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1358         }
1359
1360         if (likely(cmd->dev != NULL)) {
1361                 struct scst_dev_type *handler = cmd->dev->handler;
1362                 if (handler->on_free_cmd != NULL) {
1363                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1364                               handler->name, cmd);
1365                         handler->on_free_cmd(cmd);
1366                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1367                                 handler->name);
1368                 }
1369         }
1370
1371         scst_release_space(cmd);
1372
1373         if (unlikely(cmd->sense != NULL)) {
1374                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1375                 mempool_free(cmd->sense, scst_sense_mempool);
1376                 cmd->sense = NULL;
1377         }
1378
1379         if (likely(cmd->tgt_dev != NULL)) {
1380 #ifdef EXTRACHECKS
1381                 if (unlikely(!cmd->sent_to_midlev)) {
1382                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1383                              "%d, target %s, lun %Ld, sn %ld, expected_sn %ld)",
1384                              cmd, cmd->cdb[0], cmd->tgtt->name, (uint64_t)cmd->lun,
1385                              cmd->sn, cmd->tgt_dev->expected_sn);
1386                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1387                 }
1388 #endif
1389
1390                 if (unlikely(cmd->out_of_sn)) {
1391                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1392                                 "destroy=%d", cmd, cmd->tag, cmd->sn, destroy);
1393                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1394                                         &cmd->cmd_flags);
1395                 }
1396         }
1397
1398         if (likely(destroy))
1399                 scst_destroy_put_cmd(cmd);
1400
1401 out:
1402         TRACE_EXIT();
1403         return;
1404 }
1405
1406 /* No locks supposed to be held. */
1407 void scst_check_retries(struct scst_tgt *tgt)
1408 {
1409         int need_wake_up = 0;
1410
1411         TRACE_ENTRY();
1412
1413         /* 
1414          * We don't worry about overflow of finished_cmds, because we check 
1415          * only for its change 
1416          */
1417         atomic_inc(&tgt->finished_cmds);
1418         smp_mb__after_atomic_inc();
1419         if (unlikely(tgt->retry_cmds > 0)) 
1420         {
1421                 struct scst_cmd *c, *tc;
1422                 unsigned long flags;
1423
1424                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1425                       tgt->retry_cmds);
1426
1427                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1428                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1429                                 cmd_list_entry)
1430                 {
1431                         tgt->retry_cmds--;
1432
1433                         TRACE_RETRY("Moving retry cmd %p to head of active "
1434                                 "cmd list (retry_cmds left %d)", c, tgt->retry_cmds);
1435                         spin_lock(&c->cmd_lists->cmd_list_lock);
1436                         list_move(&c->cmd_list_entry, &c->cmd_lists->active_cmd_list);
1437                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1438                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1439
1440                         need_wake_up++;
1441                         if (need_wake_up >= 2) /* "slow start" */
1442                                 break; 
1443                 }
1444                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1445         }
1446
1447         TRACE_EXIT();
1448         return;
1449 }
1450
1451 void scst_tgt_retry_timer_fn(unsigned long arg)
1452 {
1453         struct scst_tgt *tgt = (struct scst_tgt*)arg;
1454         unsigned long flags;
1455
1456         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1457
1458         spin_lock_irqsave(&tgt->tgt_lock, flags);
1459         tgt->retry_timer_active = 0;
1460         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1461
1462         scst_check_retries(tgt);
1463
1464         TRACE_EXIT();
1465         return;
1466 }
1467
1468 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1469 {
1470         struct scst_mgmt_cmd *mcmd;
1471
1472         TRACE_ENTRY();
1473
1474         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1475         if (mcmd == NULL) {
1476                 PRINT_ERROR("%s", "Allocation of management command "
1477                         "failed, some commands and their data could leak");
1478                 goto out;
1479         }
1480         memset(mcmd, 0, sizeof(*mcmd));
1481
1482 out:
1483         TRACE_EXIT();
1484         return mcmd;
1485 }
1486
1487 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1488 {
1489         unsigned long flags;
1490
1491         TRACE_ENTRY();
1492
1493         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1494         atomic_dec(&mcmd->sess->sess_cmd_count);
1495         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1496
1497         scst_sess_put(mcmd->sess);
1498
1499         if (mcmd->mcmd_tgt_dev != NULL)
1500                 __scst_put();
1501
1502         mempool_free(mcmd, scst_mgmt_mempool);
1503
1504         TRACE_EXIT();
1505         return;
1506 }
1507
1508 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1509 int scst_alloc_request(struct scst_cmd *cmd)
1510 {
1511         int res = 0;
1512         struct scsi_request *req;
1513         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1514
1515         TRACE_ENTRY();
1516
1517         /* cmd->dev->scsi_dev must be non-NULL here */
1518         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1519         if (req == NULL) {
1520                 TRACE(TRACE_OUT_OF_MEM, "%s",
1521                       "Allocation of scsi_request failed");
1522                 res = -ENOMEM;
1523                 goto out;
1524         }
1525
1526         cmd->scsi_req = req;
1527
1528         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1529         req->sr_cmd_len = cmd->cdb_len;
1530         req->sr_data_direction = cmd->data_direction;
1531         req->sr_use_sg = cmd->sg_cnt;
1532         req->sr_bufflen = cmd->bufflen;
1533         req->sr_buffer = cmd->sg;
1534         req->sr_request->rq_disk = cmd->dev->rq_disk;
1535         req->sr_sense_buffer[0] = 0;
1536
1537         cmd->scsi_req->upper_private_data = cmd;
1538
1539 out:
1540         TRACE_EXIT();
1541         return res;
1542 }
1543
1544 void scst_release_request(struct scst_cmd *cmd)
1545 {
1546         scsi_release_request(cmd->scsi_req);
1547         cmd->scsi_req = NULL;
1548 }
1549 #endif
1550
1551 int scst_alloc_space(struct scst_cmd *cmd)
1552 {
1553         int gfp_mask;
1554         int res = -ENOMEM;
1555         int atomic = scst_cmd_atomic(cmd);
1556         int flags;
1557         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1558
1559         TRACE_ENTRY();
1560
1561         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1562
1563         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1564         if (cmd->no_sgv)
1565                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1566         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1567                         &cmd->sg_cnt, &cmd->sgv, NULL);
1568         if (cmd->sg == NULL)
1569                 goto out;
1570
1571         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1572                 static int ll;
1573                 if (ll < 10) {
1574                         PRINT_INFO("Unable to complete command due to "
1575                                 "SG IO count limitation (requested %d, "
1576                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1577                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1578                         ll++;
1579                 }
1580                 goto out_sg_free;
1581         }
1582
1583         res = 0;
1584
1585 out:
1586         TRACE_EXIT();
1587         return res;
1588
1589 out_sg_free:
1590         sgv_pool_free(cmd->sgv);
1591         cmd->sgv = NULL;
1592         cmd->sg = NULL;
1593         cmd->sg_cnt = 0;
1594         goto out;
1595 }
1596
1597 void scst_release_space(struct scst_cmd *cmd)
1598 {
1599         TRACE_ENTRY();
1600
1601         if (cmd->sgv == NULL)
1602                 goto out;
1603
1604         if (cmd->data_buf_alloced) {
1605                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1606                 goto out;
1607         }
1608
1609         sgv_pool_free(cmd->sgv);
1610
1611         cmd->sgv = NULL;
1612         cmd->sg_cnt = 0;
1613         cmd->sg = NULL;
1614         cmd->bufflen = 0;
1615         cmd->data_len = 0;
1616
1617 out:
1618         TRACE_EXIT();
1619         return;
1620 }
1621
1622 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1623
1624 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1625 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1626
1627 int scst_get_cdb_len(const uint8_t *cdb)
1628 {
1629         return SCST_GET_CDB_LEN(cdb[0]);
1630 }
1631
1632 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1633
1634 static uint32_t get_trans_len_1(const uint8_t *cdb, uint8_t off)
1635 {
1636         u32 len;
1637
1638         len = (u32)cdb[off];
1639         return len;
1640 }
1641
1642 static uint32_t get_trans_len_2(const uint8_t *cdb, uint8_t off)
1643 {
1644         const uint8_t *p = cdb + off;
1645         u32 len = 0;
1646
1647         len |= ((u32)p[0]) << 8;
1648         len |= ((u32)p[1]);
1649         return len;
1650 }
1651
1652 static uint32_t get_trans_len_3(const uint8_t *cdb, uint8_t off)
1653 {
1654         const uint8_t *p = cdb + off;
1655         u32 len = 0;
1656
1657         len |= ((u32)p[0]) << 16;
1658         len |= ((u32)p[1]) << 8;
1659         len |= ((u32)p[2]);
1660         return len;
1661 }
1662
1663 static uint32_t get_trans_len_4(const uint8_t *cdb, uint8_t off)
1664 {
1665         const uint8_t *p = cdb + off;
1666         u32 len = 0;
1667
1668         len |= ((u32)p[0]) << 24;
1669         len |= ((u32)p[1]) << 16;
1670         len |= ((u32)p[2]) << 8;
1671         len |= ((u32)p[3]);
1672         return len;
1673 }
1674
1675 /* for special commands */
1676 static uint32_t get_trans_len_block_limit(const uint8_t *cdb, uint8_t off)
1677 {
1678         return 6;
1679 }
1680
1681 static uint32_t get_trans_len_read_capacity(const uint8_t *cdb, uint8_t off)
1682 {
1683         return READ_CAP_LEN;
1684 }
1685
1686 static uint32_t get_trans_len_single(const uint8_t *cdb, uint8_t off)
1687 {
1688         return 1;
1689 }
1690
1691 static uint32_t get_trans_len_none(const uint8_t *cdb, uint8_t off)
1692 {
1693         return 0;
1694 }
1695
1696 int scst_get_cdb_info(const uint8_t *cdb_p, int dev_type,
1697         enum scst_cdb_flags *op_flags, scst_data_direction *direction,
1698         unsigned int *transfer_len, int *cdb_len, const char **op_name)
1699 {
1700         int i, res = 0;
1701         uint8_t op;
1702         const struct scst_sdbops *ptr = NULL;
1703
1704         TRACE_ENTRY();
1705
1706         op = *cdb_p;    /* get clear opcode */
1707
1708         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1709                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1710                 dev_type);
1711
1712         i = scst_scsi_op_list[op];
1713         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1714                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1715                         ptr = &scst_scsi_op_table[i];
1716                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>", 
1717                               ptr->ops, ptr->devkey[0], /* disk     */
1718                               ptr->devkey[1],   /* tape     */
1719                               ptr->devkey[2],   /* printer */
1720                               ptr->devkey[3],   /* cpu      */
1721                               ptr->devkey[4],   /* cdr      */
1722                               ptr->devkey[5],   /* cdrom    */
1723                               ptr->devkey[6],   /* scanner */
1724                               ptr->devkey[7],   /* worm     */
1725                               ptr->devkey[8],   /* changer */
1726                               ptr->devkey[9],   /* commdev */
1727                               ptr->op_name);
1728                         TRACE_DBG("direction=%d flags=%d off=%d",
1729                               ptr->direction,
1730                               ptr->flags,
1731                               ptr->off);
1732                         break;
1733                 }
1734                 i++;
1735         }
1736
1737         if (ptr == NULL) {
1738                 /* opcode not found or now not used !!! */
1739                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1740                       dev_type);
1741                 res = -1;
1742                 *op_flags = SCST_INFO_INVALID;
1743                 goto out;
1744         }
1745
1746         *cdb_len = SCST_GET_CDB_LEN(op);
1747         *op_name = ptr->op_name;
1748         *direction = ptr->direction;
1749         *op_flags = ptr->flags;
1750         *transfer_len = (*ptr->get_trans_len)(cdb_p, ptr->off);
1751
1752 #ifdef EXTRACHECKS
1753         if (unlikely((*transfer_len == 0) &&
1754                      (*direction != SCST_DATA_NONE) &&
1755             ((*op_flags & SCST_UNKNOWN_LENGTH) == 0))) {
1756                 PRINT_ERROR("transfer_len 0, direction %d, flags %x, changing "
1757                         "direction on NONE", *direction, *op_flags);
1758                 *direction = SCST_DATA_NONE;
1759         }
1760 #endif
1761
1762 out:
1763         TRACE_EXIT();
1764         return res;
1765 }
1766
1767 /*
1768  * Routine to extract a lun number from an 8-byte LUN structure
1769  * in network byte order (BE).
1770  * (see SAM-2, Section 4.12.3 page 40)
1771  * Supports 2 types of lun unpacking: peripheral and logical unit.
1772  */
1773 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1774 {
1775         lun_t res = (lun_t)-1;
1776         int address_method;
1777
1778         TRACE_ENTRY();
1779
1780         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1781
1782         if (len < 2) {
1783                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1784                         "more", len);
1785                 goto out;
1786         }
1787
1788         if (len > 2) {
1789                 switch(len) {
1790                 case 8:
1791                 {
1792                         if ((*((uint64_t*)lun) & 
1793                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1794                                 goto out_err;
1795                         break;
1796                 }
1797                 case 4:
1798                         if (*((uint16_t*)&lun[2]) != 0)
1799                                 goto out_err;
1800                         break;
1801                 case 6:
1802                         if (*((uint32_t*)&lun[2]) != 0)
1803                                 goto out_err;
1804                         break;
1805                 default:
1806                         goto out_err;
1807                 }
1808         }
1809
1810         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
1811         switch (address_method) {
1812         case 0: /* peripheral device addressing method */
1813 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1814                 if (*lun) {
1815                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1816                              "peripheral device addressing method 0x%02x, "
1817                              "expected 0", *lun);
1818                         break;
1819                 }
1820                 res = *(lun + 1);
1821                 break;
1822 #else
1823                 /* go through */
1824 #endif
1825
1826         case 1: /* flat space addressing method */
1827                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1828                 break;
1829
1830         case 2: /* logical unit addressing method */
1831                 if (*lun & 0x3f) {
1832                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1833                                     "addressing method 0x%02x, expected 0",
1834                                     *lun & 0x3f);
1835                         break;
1836                 }
1837                 if (*(lun + 1) & 0xe0) {
1838                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
1839                                     "addressing method 0x%02x, expected 0",
1840                                     (*(lun + 1) & 0xf8) >> 5);
1841                         break;
1842                 }
1843                 res = *(lun + 1) & 0x1f;
1844                 break;
1845
1846         case 3: /* extended logical unit addressing method */
1847         default:
1848                 PRINT_ERROR("Unimplemented LUN addressing method %u",
1849                             address_method);
1850                 break;
1851         }
1852
1853 out:
1854         TRACE_EXIT_RES((int)res);
1855         return res;
1856
1857 out_err:
1858         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
1859         goto out;
1860 }
1861
1862 int scst_calc_block_shift(int sector_size)
1863 {
1864         int block_shift = 0;
1865         int t;
1866
1867         if (sector_size == 0)
1868                 sector_size = 512;
1869
1870         t = sector_size;
1871         while(1) {
1872                 if ((t & 1) != 0)
1873                         break;
1874                 t >>= 1;
1875                 block_shift++;
1876         }
1877         if (block_shift < 9) {
1878                 PRINT_ERROR("Wrong sector size %d", sector_size);
1879                 block_shift = -1;
1880         } 
1881
1882         TRACE_EXIT_RES(block_shift);
1883         return block_shift;
1884 }
1885
1886 int scst_sbc_generic_parse(struct scst_cmd *cmd,
1887         int (*get_block_shift)(struct scst_cmd *cmd))
1888 {
1889         int res = 0;
1890
1891         TRACE_ENTRY();
1892         
1893         /*
1894          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
1895          * therefore change them only if necessary
1896          */
1897
1898         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
1899               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
1900
1901         switch (cmd->cdb[0]) {
1902         case SERVICE_ACTION_IN:
1903                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
1904                         cmd->bufflen = READ_CAP16_LEN;
1905                         cmd->data_direction = SCST_DATA_READ;
1906                 }
1907                 break;
1908         case VERIFY_6:
1909         case VERIFY:
1910         case VERIFY_12:
1911         case VERIFY_16:
1912                 if ((cmd->cdb[1] & BYTCHK) == 0) {
1913                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
1914                         cmd->bufflen = 0;
1915                         goto out;
1916                 } else
1917                         cmd->data_len = 0;
1918                 break;
1919         default:
1920                 /* It's all good */
1921                 break;
1922         }
1923
1924         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
1925                 /* 
1926                  * No need for locks here, since *_detach() can not be
1927                  * called, when there are existing commands.
1928                  */
1929                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
1930         }
1931
1932 out:
1933         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
1934               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
1935
1936         TRACE_EXIT_RES(res);
1937         return res;
1938 }
1939
1940 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
1941         int (*get_block_shift)(struct scst_cmd *cmd))
1942 {
1943         int res = 0;
1944
1945         TRACE_ENTRY();
1946
1947         /*
1948          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
1949          * therefore change them only if necessary
1950          */
1951
1952         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
1953               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
1954
1955         cmd->cdb[1] &= 0x1f;
1956
1957         switch (cmd->cdb[0]) {
1958         case VERIFY_6:
1959         case VERIFY:
1960         case VERIFY_12:
1961         case VERIFY_16:
1962                 if ((cmd->cdb[1] & BYTCHK) == 0) {
1963                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
1964                         cmd->bufflen = 0;
1965                         goto out;
1966                 }
1967                 break;
1968         default:
1969                 /* It's all good */
1970                 break;
1971         }
1972
1973         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
1974                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
1975
1976 out:
1977         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
1978                 cmd->data_direction);
1979
1980         TRACE_EXIT();
1981         return res;
1982 }
1983
1984 int scst_modisk_generic_parse(struct scst_cmd *cmd,
1985         int (*get_block_shift)(struct scst_cmd *cmd))
1986 {
1987         int res = 0;
1988
1989         TRACE_ENTRY();
1990
1991         /*
1992          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
1993          * therefore change them only if necessary
1994          */
1995
1996         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
1997               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
1998
1999         cmd->cdb[1] &= 0x1f;
2000
2001         switch (cmd->cdb[0]) {
2002         case VERIFY_6:
2003         case VERIFY:
2004         case VERIFY_12:
2005         case VERIFY_16:
2006                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2007                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2008                         cmd->bufflen = 0;
2009                         goto out;
2010                 }
2011                 break;
2012         default:
2013                 /* It's all good */
2014                 break;
2015         }
2016
2017         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2018                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2019
2020 out:
2021         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2022                 cmd->data_direction);
2023
2024         TRACE_EXIT_RES(res);
2025         return res;
2026 }
2027
2028 int scst_tape_generic_parse(struct scst_cmd *cmd,
2029         int (*get_block_size)(struct scst_cmd *cmd))
2030 {
2031         int res = 0;
2032
2033         TRACE_ENTRY();
2034
2035         /*
2036          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2037          * therefore change them only if necessary
2038          */
2039
2040         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2041               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2042
2043         if (cmd->cdb[0] == READ_POSITION) {
2044                 int tclp = cmd->cdb[1] & TCLP_BIT;
2045                 int long_bit = cmd->cdb[1] & LONG_BIT;
2046                 int bt = cmd->cdb[1] & BT_BIT;
2047
2048                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2049                         cmd->bufflen =
2050                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2051                         cmd->data_direction = SCST_DATA_READ;
2052                 } else {
2053                         cmd->bufflen = 0;
2054                         cmd->data_direction = SCST_DATA_NONE;
2055                 }
2056         }
2057
2058         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2059                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2060
2061         TRACE_EXIT_RES(res);
2062         return res;
2063 }
2064
2065 static int scst_null_parse(struct scst_cmd *cmd)
2066 {
2067         int res = 0;
2068
2069         TRACE_ENTRY();
2070
2071         /*
2072          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2073          * therefore change them only if necessary
2074          */
2075
2076         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2077               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2078 #if 0
2079         switch (cmd->cdb[0]) {
2080         default:
2081                 /* It's all good */
2082                 break;
2083         }
2084 #endif
2085         TRACE_DBG("res %d bufflen %d direct %d",
2086               res, cmd->bufflen, cmd->data_direction);
2087
2088         TRACE_EXIT();
2089         return res;
2090 }
2091
2092 int scst_changer_generic_parse(struct scst_cmd *cmd,
2093         int (*nothing)(struct scst_cmd *cmd))
2094 {
2095         return scst_null_parse(cmd);
2096 }
2097
2098 int scst_processor_generic_parse(struct scst_cmd *cmd,
2099         int (*nothing)(struct scst_cmd *cmd))
2100 {
2101         return scst_null_parse(cmd);
2102 }
2103
2104 int scst_raid_generic_parse(struct scst_cmd *cmd,
2105         int (*nothing)(struct scst_cmd *cmd))
2106 {
2107         return scst_null_parse(cmd);
2108 }
2109
2110 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2111         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2112 {
2113         int opcode = cmd->cdb[0];
2114         int status = cmd->status;
2115         int res = SCST_CMD_STATE_DEFAULT;
2116
2117         TRACE_ENTRY();
2118
2119         /*
2120          * SCST sets good defaults for cmd->tgt_resp_flags and cmd->resp_data_len
2121          * based on cmd->status and cmd->data_direction, therefore change
2122          * them only if necessary
2123          */
2124
2125         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2126                 switch (opcode) {
2127                 case READ_CAPACITY:
2128                 {
2129                         /* Always keep track of disk capacity */
2130                         int buffer_size, sector_size, sh;
2131                         uint8_t *buffer;
2132
2133                         buffer_size = scst_get_buf_first(cmd, &buffer);
2134                         if (unlikely(buffer_size <= 0)) {
2135                                 PRINT_ERROR("%s: Unable to get the buffer "
2136                                         "(%d)", __FUNCTION__, buffer_size);
2137                                 goto out;
2138                         }
2139
2140                         sector_size =
2141                             ((buffer[4] << 24) | (buffer[5] << 16) |
2142                              (buffer[6] << 8) | (buffer[7] << 0));
2143                         scst_put_buf(cmd, buffer);
2144                         if (sector_size != 0)
2145                                 sh = scst_calc_block_shift(sector_size);
2146                         else
2147                                 sh = 0;
2148                         set_block_shift(cmd, sh);
2149                         TRACE_DBG("block_shift %d", sh);
2150                         break;
2151                 }
2152                 default:
2153                         /* It's all good */
2154                         break;
2155                 }
2156         }
2157
2158         TRACE_DBG("cmd->tgt_resp_flags=%x, cmd->resp_data_len=%d, "
2159               "res=%d", cmd->tgt_resp_flags, cmd->resp_data_len, res);
2160
2161 out:
2162         TRACE_EXIT_RES(res);
2163         return res;
2164 }
2165
2166 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2167         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2168 {
2169         int opcode = cmd->cdb[0];
2170         int res = SCST_CMD_STATE_DEFAULT;
2171         int buffer_size, bs;
2172         uint8_t *buffer = NULL;
2173
2174         TRACE_ENTRY();
2175
2176         /*
2177          * SCST sets good defaults for cmd->tgt_resp_flags and cmd->resp_data_len
2178          * based on cmd->status and cmd->data_direction, therefore change
2179          * them only if necessary
2180          */
2181                 
2182         switch (opcode) {
2183         case MODE_SENSE:
2184         case MODE_SELECT:
2185                 buffer_size = scst_get_buf_first(cmd, &buffer);
2186                 if (unlikely(buffer_size <= 0)) {
2187                         PRINT_ERROR("%s: Unable to get the buffer (%d)",
2188                                 __FUNCTION__, buffer_size);
2189                         goto out;
2190                 }
2191                 break;
2192         }
2193
2194         switch (opcode) {
2195         case MODE_SENSE:
2196                 TRACE_DBG("%s", "MODE_SENSE");
2197                 if ((cmd->cdb[2] & 0xC0) == 0) {
2198                         if (buffer[3] == 8) {
2199                                 bs = (buffer[9] << 16) |
2200                                     (buffer[10] << 8) | buffer[11];
2201                                 set_block_size(cmd, bs);
2202                         }
2203                 }
2204                 break;
2205         case MODE_SELECT:
2206                 TRACE_DBG("%s", "MODE_SELECT");
2207                 if (buffer[3] == 8) {
2208                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2209                             (buffer[11]);
2210                         set_block_size(cmd, bs);
2211                 }
2212                 break;
2213         default:
2214                 /* It's all good */
2215                 break;
2216         }
2217         
2218         switch (opcode) {
2219         case MODE_SENSE:
2220         case MODE_SELECT:
2221                 scst_put_buf(cmd, buffer);
2222                 break;
2223         }
2224
2225 out:
2226         TRACE_EXIT_RES(res);
2227         return res;
2228 }
2229
2230 static void scst_check_internal_sense(struct scst_device *dev, int result,
2231         uint8_t *sense, int sense_len)
2232 {
2233         TRACE_ENTRY();
2234
2235         if (host_byte(result) == DID_RESET) {
2236                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2237                         "reset UA");
2238                 scst_set_sense(sense, sense_len,
2239                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2240                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2241         } else if ((status_byte(result) == CHECK_CONDITION) &&
2242                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2243                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2244
2245         TRACE_EXIT();
2246         return;
2247 }
2248
2249 int scst_obtain_device_parameters(struct scst_device *dev)
2250 {
2251         int res = 0, i;
2252         uint8_t cmd[16];
2253         uint8_t buffer[4+0x0A];
2254         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2255
2256         TRACE_ENTRY();
2257
2258         sBUG_ON(in_interrupt() || in_atomic());
2259         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2260
2261         for(i = 0; i < 5; i++) {
2262                 /* Get control mode page */
2263                 memset(cmd, 0, sizeof(cmd));
2264                 cmd[0] = MODE_SENSE;
2265                 cmd[1] = 8; /* DBD */
2266                 cmd[2] = 0x0A;
2267                 cmd[4] = sizeof(buffer);
2268
2269                 memset(buffer, 0, sizeof(buffer));
2270                 memset(sense_buffer, 0, sizeof(sense_buffer));
2271
2272                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2273                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer, 
2274                            sizeof(buffer), sense_buffer, SCST_DEFAULT_TIMEOUT,
2275                             0, GFP_KERNEL);
2276
2277                 TRACE_DBG("MODE_SENSE done: %x", res);
2278
2279                 if (scsi_status_is_good(res)) {
2280                         int q;
2281
2282                         PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode page data",
2283                                 buffer, sizeof(buffer));
2284
2285                         dev->tst = buffer[4+2] >> 5;
2286                         q = buffer[4+3] >> 4;
2287                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2288                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2289                                         "%d:%d:%d:%d", dev->queue_alg,
2290                                         dev->scsi_dev->host->host_no, dev->scsi_dev->channel,
2291                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2292                         }
2293                         dev->queue_alg = q;
2294                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2295                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2296
2297                         /*
2298                          * Unfortunately, SCSI ML doesn't provide a way to
2299                          * specify commands task attribute, so we can rely on
2300                          * device's restricted reordering only.
2301                          */
2302                         dev->has_own_order_mgmt = !dev->queue_alg;
2303
2304                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d: TST %x, "
2305                                 "QUEUE ALG %x, SWP %x, TAS %x, has_own_order_mgmt "
2306                                 "%d", dev->scsi_dev->host->host_no,
2307                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2308                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2309                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2310
2311                         goto out;
2312                 } else {
2313 #if 0 /* 3ware controller is buggy and returns CONDITION_GOOD instead of CHECK_CONDITION */
2314                         if ((status_byte(res) == CHECK_CONDITION) &&
2315 #else
2316                         if (
2317 #endif
2318                             SCST_SENSE_VALID(sense_buffer) &&
2319                             (sense_buffer[2] == ILLEGAL_REQUEST)) {
2320                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d "
2321                                         "doesn't support control mode page, using "
2322                                         "defaults: TST %x, QUEUE ALG %x, SWP %x, "
2323                                         "TAS %x, has_own_order_mgmt %d",
2324                                         dev->scsi_dev->host->host_no,
2325                                         dev->scsi_dev->channel, dev->scsi_dev->id,
2326                                         dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2327                                         dev->swp, dev->tas, dev->has_own_order_mgmt);
2328                                 res = 0;
2329                                 goto out;
2330                         } else {
2331                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Internal MODE SENSE to "
2332                                         "device %d:%d:%d:%d failed: %x",
2333                                         dev->scsi_dev->host->host_no,
2334                                         dev->scsi_dev->channel, dev->scsi_dev->id,
2335                                         dev->scsi_dev->lun, res);
2336                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR, "MODE SENSE "
2337                                         "sense", sense_buffer, sizeof(sense_buffer));
2338                         }
2339                         scst_check_internal_sense(dev, res, sense_buffer,
2340                                         sizeof(sense_buffer));
2341                 }
2342         }
2343         res = -ENODEV;
2344
2345 out:
2346         TRACE_EXIT_RES(res);
2347         return res;
2348 }
2349
2350 /* Called under dev_lock and BH off */
2351 void scst_process_reset(struct scst_device *dev,
2352         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2353         struct scst_mgmt_cmd *mcmd)
2354 {
2355         struct scst_tgt_dev *tgt_dev;
2356         struct scst_cmd *cmd, *tcmd;
2357
2358         TRACE_ENTRY();
2359
2360         /* Clear RESERVE'ation, if necessary */
2361         if (dev->dev_reserved) {
2362                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2363                                     dev_tgt_dev_list_entry) {
2364                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2365                                 "lun %Ld", tgt_dev->lun);
2366                         clear_bit(SCST_TGT_DEV_RESERVED,
2367                                   &tgt_dev->tgt_dev_flags);
2368                 }
2369                 dev->dev_reserved = 0;
2370                 /*
2371                  * There is no need to send RELEASE, since the device is going
2372                  * to be resetted. Actually, since we can be in RESET TM
2373                  * function, it might be dangerous.
2374                  */
2375         }
2376
2377         dev->dev_double_ua_possible = 1;
2378         dev->dev_serialized = 1;
2379
2380         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list, 
2381                 dev_tgt_dev_list_entry) {
2382                 struct scst_session *sess = tgt_dev->sess;
2383
2384                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2385                 scst_free_all_UA(tgt_dev);
2386                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2387
2388                 spin_lock_irq(&sess->sess_list_lock);
2389
2390                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2391                 list_for_each_entry(cmd, &sess->search_cmd_list, 
2392                                 search_cmd_list_entry) {
2393                         if (cmd == exclude_cmd)
2394                                 continue;
2395                         if ((cmd->tgt_dev == tgt_dev) ||
2396                             ((cmd->tgt_dev == NULL) && 
2397                              (cmd->lun == tgt_dev->lun))) {
2398                                 scst_abort_cmd(cmd, mcmd,
2399                                         (tgt_dev->sess != originator), 0);
2400                         }
2401                 }
2402                 spin_unlock_irq(&sess->sess_list_lock);
2403         }
2404
2405         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2406                                 blocked_cmd_list_entry) {
2407                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2408                         list_del(&cmd->blocked_cmd_list_entry);
2409                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2410                                 "to active cmd list", cmd);
2411                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2412                         list_add_tail(&cmd->cmd_list_entry,
2413                                 &cmd->cmd_lists->active_cmd_list);
2414                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2415                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2416                 }
2417         }
2418
2419         /* BH already off */
2420         spin_lock(&scst_temp_UA_lock);
2421         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2422                 SCST_LOAD_SENSE(scst_sense_reset_UA));
2423         scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2424                 sizeof(scst_temp_UA));
2425         spin_unlock(&scst_temp_UA_lock);
2426
2427         TRACE_EXIT();
2428         return;
2429 }
2430
2431 int scst_set_pending_UA(struct scst_cmd *cmd)
2432 {
2433         int res = 0;
2434         struct scst_tgt_dev_UA *UA_entry;
2435
2436         TRACE_ENTRY();
2437
2438         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2439
2440         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2441
2442         /* UA list could be cleared behind us, so retest */
2443         if (list_empty(&cmd->tgt_dev->UA_list)) {
2444                 TRACE_DBG("%s",
2445                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2446                 res = -1;
2447                 goto out_unlock;
2448         }
2449
2450         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2451                               UA_list_entry);
2452
2453         TRACE_DBG("next %p UA_entry %p",
2454               cmd->tgt_dev->UA_list.next, UA_entry);
2455
2456         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2457                 sizeof(UA_entry->UA_sense_buffer));
2458
2459         cmd->ua_ignore = 1;
2460
2461         list_del(&UA_entry->UA_list_entry);
2462
2463         mempool_free(UA_entry, scst_ua_mempool);
2464
2465         if (list_empty(&cmd->tgt_dev->UA_list)) {
2466                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2467                           &cmd->tgt_dev->tgt_dev_flags);
2468         }
2469
2470         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2471
2472 out:
2473         TRACE_EXIT_RES(res);
2474         return res;
2475
2476 out_unlock:
2477         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2478         goto out;
2479 }
2480
2481 /* Called under tgt_dev_lock and BH off */
2482 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2483         const uint8_t *sense, int sense_len, int head)
2484 {
2485         struct scst_tgt_dev_UA *UA_entry = NULL;
2486
2487         TRACE_ENTRY();
2488
2489         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2490         if (UA_entry == NULL) {
2491                 PRINT_ERROR("%s", "UNIT ATTENTION memory "
2492                      "allocation failed. The UNIT ATTENTION "
2493                      "on some sessions will be missed");
2494                 goto out;
2495         }
2496         memset(UA_entry, 0, sizeof(*UA_entry));
2497
2498         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2499                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2500         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2501
2502         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2503
2504         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2505
2506         if (head)
2507                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2508         else
2509                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2510
2511 out:
2512         TRACE_EXIT();
2513         return;
2514 }
2515
2516 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2517         const uint8_t *sense, int sense_len, int head)
2518 {
2519         int skip_UA = 0;
2520         struct scst_tgt_dev_UA *UA_entry_tmp;
2521
2522         TRACE_ENTRY();
2523
2524         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2525
2526         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2527                             UA_list_entry) {
2528                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, sense_len) == 0) {
2529                         TRACE_MGMT_DBG("%s", "UA already exists");
2530                         skip_UA = 1;
2531                         break;
2532                 }
2533         }
2534
2535         if (skip_UA == 0)
2536                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2537
2538         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2539
2540         TRACE_EXIT();
2541         return;
2542 }
2543
2544 /* Called under dev_lock and BH off */
2545 void scst_dev_check_set_local_UA(struct scst_device *dev,
2546         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2547 {
2548         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2549
2550         TRACE_ENTRY();
2551
2552         if (exclude != NULL)
2553                 exclude_tgt_dev = exclude->tgt_dev;
2554
2555         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list, 
2556                         dev_tgt_dev_list_entry) {
2557                 if (tgt_dev != exclude_tgt_dev)
2558                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2559         }
2560
2561         TRACE_EXIT();
2562         return;
2563 }
2564
2565 /* Called under dev_lock and BH off */
2566 void __scst_dev_check_set_UA(struct scst_device *dev,
2567         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2568 {
2569         TRACE_ENTRY();
2570
2571         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2572
2573         /* Check for reset UA */
2574         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2575                 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
2576                         exclude, NULL);
2577
2578         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2579
2580         TRACE_EXIT();
2581         return;
2582 }
2583
2584 /* Called under tgt_dev_lock or when tgt_dev is unused */
2585 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2586 {
2587         struct scst_tgt_dev_UA *UA_entry, *t;
2588
2589         TRACE_ENTRY();
2590
2591         list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
2592                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %Ld", 
2593                         tgt_dev->lun);
2594                 list_del(&UA_entry->UA_list_entry);
2595                 kfree(UA_entry);
2596         }
2597         INIT_LIST_HEAD(&tgt_dev->UA_list);
2598         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2599
2600         TRACE_EXIT();
2601         return;
2602 }
2603
2604 /* No locks */
2605 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2606 {
2607         struct scst_cmd *res = NULL, *cmd, *t;
2608         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2609
2610         spin_lock_irq(&tgt_dev->sn_lock);
2611
2612 restart:
2613         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2614                                 sn_cmd_list_entry) {
2615                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2616                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2617                 if (cmd->sn == expected_sn) {
2618                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2619                                 cmd, cmd->sn, cmd->sn_set);
2620                         tgt_dev->def_cmd_count--;
2621                         list_del(&cmd->sn_cmd_list_entry);
2622                         if (res == NULL)
2623                                 res = cmd;
2624                         else {
2625                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2626                                 TRACE_SN("Adding cmd %p to active cmd list",
2627                                         cmd);
2628                                 list_add_tail(&cmd->cmd_list_entry,
2629                                         &cmd->cmd_lists->active_cmd_list);
2630                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2631                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2632                         }
2633                 }
2634         }
2635         if (res != NULL)
2636                 goto out_unlock;
2637
2638         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2639                                 sn_cmd_list_entry) {
2640                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2641                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2642                 if (cmd->sn == expected_sn) {
2643                         atomic_t *slot = cmd->sn_slot;
2644                         /* 
2645                          * !! At this point any pointer in cmd, except !!
2646                          * !! sn_slot and sn_cmd_list_entry, could be   !!
2647                          * !! already destroyed                         !!
2648                          */
2649                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2650                                 cmd, cmd->tag, cmd->sn);
2651                         tgt_dev->def_cmd_count--;
2652                         list_del(&cmd->sn_cmd_list_entry);
2653                         spin_unlock_irq(&tgt_dev->sn_lock);
2654                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED, 
2655                                         &cmd->cmd_flags)) {
2656                                 scst_destroy_put_cmd(cmd);
2657                         }
2658                         scst_inc_expected_sn(tgt_dev, slot);
2659                         expected_sn = tgt_dev->expected_sn;
2660                         spin_lock_irq(&tgt_dev->sn_lock);
2661                         goto restart;
2662                 }
2663         }
2664
2665 out_unlock:
2666         spin_unlock_irq(&tgt_dev->sn_lock);
2667         return res;
2668 }
2669
2670 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2671         struct scst_thr_data_hdr *data,
2672         void (*free_fn) (struct scst_thr_data_hdr *data))
2673 {
2674         data->pid = current->pid;
2675         atomic_set(&data->ref, 1);
2676         EXTRACHECKS_BUG_ON(free_fn == NULL);
2677         data->free_fn = free_fn;
2678         spin_lock(&tgt_dev->thr_data_lock);
2679         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2680         spin_unlock(&tgt_dev->thr_data_lock);
2681 }
2682
2683 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2684 {
2685         spin_lock(&tgt_dev->thr_data_lock);
2686         while (!list_empty(&tgt_dev->thr_data_list)) {
2687                 struct scst_thr_data_hdr *d = list_entry(
2688                                 tgt_dev->thr_data_list.next, typeof(*d),
2689                                 thr_data_list_entry);
2690                 list_del(&d->thr_data_list_entry);
2691                 spin_unlock(&tgt_dev->thr_data_lock);
2692                 scst_thr_data_put(d);
2693                 spin_lock(&tgt_dev->thr_data_lock);
2694         }
2695         spin_unlock(&tgt_dev->thr_data_lock);
2696         return;
2697 }
2698
2699 void scst_dev_del_all_thr_data(struct scst_device *dev)
2700 {
2701         struct scst_tgt_dev *tgt_dev;
2702
2703         TRACE_ENTRY();
2704
2705         mutex_lock(&scst_mutex);
2706
2707         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2708                                 dev_tgt_dev_list_entry) {
2709                 scst_del_all_thr_data(tgt_dev);
2710         }
2711
2712         mutex_unlock(&scst_mutex);
2713
2714         TRACE_EXIT();
2715         return;
2716 }
2717
2718 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2719 {
2720         struct scst_thr_data_hdr *res = NULL, *d;
2721
2722         spin_lock(&tgt_dev->thr_data_lock);
2723         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2724                 if (d->pid == current->pid) {
2725                         res = d;
2726                         scst_thr_data_get(res);
2727                         break;
2728                 }
2729         }
2730         spin_unlock(&tgt_dev->thr_data_lock);
2731         return res;
2732 }
2733
2734 /* dev_lock supposed to be held and BH disabled */
2735 void __scst_block_dev(struct scst_device *dev)
2736 {
2737         dev->block_count++;
2738         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
2739 }
2740
2741 /* No locks */
2742 void scst_block_dev(struct scst_device *dev, int outstanding)
2743 {
2744         spin_lock_bh(&dev->dev_lock);
2745         __scst_block_dev(dev);
2746         spin_unlock_bh(&dev->dev_lock);
2747
2748         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
2749         smp_mb();
2750
2751         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
2752                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
2753         wait_event(dev->on_dev_waitQ, 
2754                 atomic_read(&dev->on_dev_count) <= outstanding);
2755         TRACE_MGMT_DBG("%s", "wait_event() returned");
2756 }
2757
2758 /* No locks */
2759 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
2760 {
2761         sBUG_ON(cmd->needs_unblocking);
2762
2763         cmd->needs_unblocking = 1;
2764         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)", cmd, cmd->tag);
2765
2766         scst_block_dev(cmd->dev, outstanding);
2767 }
2768
2769 /* No locks */
2770 void scst_unblock_dev(struct scst_device *dev)
2771 {
2772         spin_lock_bh(&dev->dev_lock);
2773         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
2774                 dev->block_count-1, dev);
2775         if (--dev->block_count == 0)
2776                 scst_unblock_cmds(dev);
2777         spin_unlock_bh(&dev->dev_lock);
2778         sBUG_ON(dev->block_count < 0);
2779 }
2780
2781 /* No locks */
2782 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
2783 {
2784         scst_unblock_dev(cmd->dev);
2785         cmd->needs_unblocking = 0;
2786 }
2787
2788 /* No locks */
2789 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
2790 {
2791         int res = 0;
2792         struct scst_device *dev = cmd->dev;
2793
2794         TRACE_ENTRY();
2795
2796         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
2797
2798         atomic_inc(&dev->on_dev_count);
2799         cmd->dec_on_dev_needed = 1;
2800         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
2801
2802 #ifdef STRICT_SERIALIZING
2803         spin_lock_bh(&dev->dev_lock);
2804         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2805                 goto out_unlock;
2806         if (dev->block_count > 0) {
2807                 scst_dec_on_dev_cmd(cmd);
2808                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
2809                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
2810                 list_add_tail(&cmd->blocked_cmd_list_entry,
2811                               &dev->blocked_cmd_list);
2812                 res = 1;
2813         } else {
2814                 __scst_block_dev(dev);
2815                 cmd->inc_blocking = 1;
2816         }
2817         spin_unlock_bh(&dev->dev_lock);
2818         goto out;
2819 #else
2820 repeat:
2821         if (unlikely(dev->block_count > 0)) {
2822                 spin_lock_bh(&dev->dev_lock);
2823                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2824                         goto out_unlock;
2825                 barrier(); /* to reread block_count */
2826                 if (dev->block_count > 0) {
2827                         scst_dec_on_dev_cmd(cmd);
2828                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
2829                                 "serializing (tag %llu, dev %p)", cmd,
2830                                 cmd->tag, dev);
2831                         list_add_tail(&cmd->blocked_cmd_list_entry,
2832                                       &dev->blocked_cmd_list);
2833                         res = 1;
2834                         spin_unlock_bh(&dev->dev_lock);
2835                         goto out;
2836                 } else {
2837                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
2838                                 "continuing");
2839                 }
2840                 spin_unlock_bh(&dev->dev_lock);
2841         }
2842         if (unlikely(dev->dev_serialized)) {
2843                 spin_lock_bh(&dev->dev_lock);
2844                 barrier(); /* to reread block_count */
2845                 if (dev->block_count == 0) {
2846                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
2847                                 "cmds due to serializing (dev %p)", cmd,
2848                                 cmd->tag, dev);
2849                         __scst_block_dev(dev);
2850                         cmd->inc_blocking = 1;
2851                 } else {
2852                         spin_unlock_bh(&dev->dev_lock);
2853                         TRACE_MGMT_DBG("Somebody blocked the device, "
2854                                 "repeating (count %d)", dev->block_count);
2855                         goto repeat;
2856                 }
2857                 spin_unlock_bh(&dev->dev_lock);
2858         }
2859 #endif
2860
2861 out:
2862         TRACE_EXIT_RES(res);
2863         return res;
2864
2865 out_unlock:
2866         spin_unlock_bh(&dev->dev_lock);
2867         goto out;
2868 }
2869
2870 /* Called under dev_lock */
2871 void scst_unblock_cmds(struct scst_device *dev)
2872 {
2873 #ifdef STRICT_SERIALIZING
2874         struct scst_cmd *cmd, *t;
2875         unsigned long flags;
2876
2877         TRACE_ENTRY();
2878
2879         local_irq_save(flags);
2880         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
2881                                  blocked_cmd_list_entry) {
2882                 int brk = 0;
2883                 /* 
2884                  * Since only one cmd per time is being executed, expected_sn
2885                  * can't change behind us, if the corresponding cmd is in
2886                  * blocked_cmd_list, but we could be called before
2887                  * scst_inc_expected_sn().
2888                  */
2889                 if (likely(!cmd->internal && !cmd->retry)) {
2890                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
2891                         if (cmd->tgt_dev == NULL)
2892                                 sBUG();
2893                         expected_sn = cmd->tgt_dev->expected_sn;
2894                         if (cmd->sn == expected_sn)
2895                                 brk = 1;
2896                         else if (cmd->sn != (expected_sn+1))
2897                                 continue;
2898                 }
2899                         
2900                 list_del(&cmd->blocked_cmd_list_entry);
2901                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
2902                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2903                 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
2904                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2905                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2906                 if (brk)
2907                         break;
2908         }
2909         local_irq_restore(flags);
2910 #else /* STRICT_SERIALIZING */
2911         struct scst_cmd *cmd, *tcmd;
2912         unsigned long flags;
2913
2914         TRACE_ENTRY();
2915
2916         local_irq_save(flags);
2917         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2918                                  blocked_cmd_list_entry) {
2919                 list_del(&cmd->blocked_cmd_list_entry);
2920                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
2921                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2922                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2923                         list_add(&cmd->cmd_list_entry,
2924                                 &cmd->cmd_lists->active_cmd_list);
2925                 else
2926                         list_add_tail(&cmd->cmd_list_entry,
2927                                 &cmd->cmd_lists->active_cmd_list);
2928                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2929                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2930         }
2931         local_irq_restore(flags);
2932 #endif /* STRICT_SERIALIZING */
2933
2934         TRACE_EXIT();
2935         return;
2936 }
2937
2938 static struct scst_cmd *__scst_unblock_deferred(
2939         struct scst_tgt_dev *tgt_dev, struct scst_cmd *out_of_sn_cmd)
2940 {
2941         struct scst_cmd *res = NULL;
2942
2943         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
2944
2945         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
2946                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
2947                 res = scst_check_deferred_commands(tgt_dev);
2948         } else {
2949                 out_of_sn_cmd->out_of_sn = 1;
2950                 spin_lock_irq(&tgt_dev->sn_lock);
2951                 tgt_dev->def_cmd_count++;
2952                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
2953                               &tgt_dev->skipped_sn_list);
2954                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list "
2955                         "(expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
2956                         tgt_dev->expected_sn);
2957                 spin_unlock_irq(&tgt_dev->sn_lock);
2958         }
2959
2960         return res;
2961 }
2962
2963 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
2964         struct scst_cmd *out_of_sn_cmd)
2965 {
2966         struct scst_cmd *cmd;
2967
2968         TRACE_ENTRY();
2969
2970         if (!out_of_sn_cmd->sn_set) {
2971                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
2972                 goto out;
2973         }
2974
2975         cmd = __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
2976         if (cmd != NULL) {
2977                 unsigned long flags;
2978                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
2979                 TRACE_SN("cmd %p with sn %ld added to the head of active cmd "
2980                         "list", cmd, cmd->sn);
2981                 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
2982                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2983                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
2984         }
2985
2986 out:
2987         TRACE_EXIT();
2988         return;
2989 }
2990
2991 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
2992 {
2993         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2994
2995         TRACE_ENTRY();
2996
2997         spin_lock_irq(&tgt_dev->sn_lock);
2998         tgt_dev->hq_cmd_count--;
2999         spin_unlock_irq(&tgt_dev->sn_lock);
3000
3001         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3002
3003         /*
3004          * There is no problem in checking hq_cmd_count in the
3005          * non-locked state. In the worst case we will only have
3006          * unneeded run of the deferred commands.
3007          */
3008         if (tgt_dev->hq_cmd_count == 0) {
3009                 struct scst_cmd *c =
3010                         scst_check_deferred_commands(tgt_dev);
3011                 if (c != NULL) {
3012                         spin_lock_irq(&c->cmd_lists->cmd_list_lock);
3013                         TRACE_SN("Adding cmd %p to active cmd list", c);
3014                         list_add_tail(&c->cmd_list_entry,
3015                                 &c->cmd_lists->active_cmd_list);
3016                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3017                         spin_unlock_irq(&c->cmd_lists->cmd_list_lock);
3018                 }
3019         }
3020
3021         TRACE_EXIT();
3022         return;
3023 }
3024
3025 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3026 {
3027         TRACE_ENTRY();
3028
3029         smp_rmb();
3030         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3031                 if (cmd->completed) {
3032                         /* It's completed and it's OK to return its result */
3033                         goto out;
3034                 }
3035                 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p (tag %llu)",
3036                         cmd, cmd->tag);
3037                 if (cmd->dev->tas) {
3038                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3039                 } else {
3040                         /*
3041                          * Abort without delivery or notification.
3042                          * There is no need to check/requeue possible UA,
3043                          * because, if it exists, it will be delivered
3044                          * by the "completed" branch.
3045                          */
3046                         clear_bit(SCST_CMD_ABORTED_OTHER,
3047                                 &cmd->cmd_flags);
3048                 }
3049         } else {
3050                 if ((cmd->tgt_dev != NULL) &&
3051                     scst_is_ua_sense(cmd->sense)) {
3052                         /* This UA delivery is going to fail, so requeue it */
3053                         TRACE_MGMT_DBG("Requeuing UA for aborted cmd %p", cmd);
3054                         scst_check_set_UA(cmd->tgt_dev, cmd->sense,
3055                                         SCST_SENSE_BUFFERSIZE, 1);
3056                         mempool_free(cmd->sense, scst_sense_mempool);
3057                         cmd->sense = NULL;
3058                 }
3059         }
3060
3061 out:
3062         TRACE_EXIT();
3063         return;
3064 }
3065
3066 void __init scst_scsi_op_list_init(void)
3067 {
3068         int i;
3069         uint8_t op = 0xff;
3070
3071         TRACE_ENTRY();
3072
3073         for (i = 0; i < 256; i++)
3074                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3075
3076         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3077                 if (scst_scsi_op_table[i].ops != op) {
3078                         op = scst_scsi_op_table[i].ops;
3079                         scst_scsi_op_list[op] = i;
3080                 }
3081         }
3082
3083         TRACE_EXIT();
3084         return;
3085 }
3086
3087 #ifdef DEBUG
3088 /* Original taken from the XFS code */
3089 unsigned long scst_random(void)
3090 {
3091         static int Inited;
3092         static unsigned long RandomValue;
3093         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
3094         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3095         register long rv;
3096         register long lo;
3097         register long hi;
3098         unsigned long flags;
3099
3100         spin_lock_irqsave(&lock, flags);
3101         if (!Inited) {
3102                 RandomValue = jiffies;
3103                 Inited = 1;
3104         }
3105         rv = RandomValue;
3106         hi = rv / 127773;
3107         lo = rv % 127773;
3108         rv = 16807 * lo - 2836 * hi;
3109         if (rv <= 0) rv += 2147483647;
3110         RandomValue = rv;
3111         spin_unlock_irqrestore(&lock, flags);
3112         return rv;
3113 }
3114 #endif
3115
3116 #ifdef DEBUG_TM
3117
3118 #define TM_DBG_STATE_ABORT              0
3119 #define TM_DBG_STATE_RESET              1
3120 #define TM_DBG_STATE_OFFLINE            2
3121
3122 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3123
3124 static void tm_dbg_timer_fn(unsigned long arg);
3125
3126 static spinlock_t scst_tm_dbg_lock = SPIN_LOCK_UNLOCKED;
3127 /* All serialized by scst_tm_dbg_lock */
3128 struct
3129 {
3130         unsigned int tm_dbg_release:1;
3131         unsigned int tm_dbg_blocked:1;
3132 } tm_dbg_flags;
3133 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3134 static int tm_dbg_delayed_cmds_count;
3135 static int tm_dbg_passed_cmds_count;
3136 static int tm_dbg_state;
3137 static int tm_dbg_on_state_passes;
3138 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3139 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3140
3141 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3142
3143 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3144         struct scst_acg_dev *acg_dev)
3145 {
3146         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3147                 unsigned long flags;
3148                 /* Do TM debugging only for LUN 0 */
3149                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3150                 tm_dbg_p_cmd_list_waitQ = 
3151                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3152                 tm_dbg_state = INIT_TM_DBG_STATE;
3153                 tm_dbg_on_state_passes =
3154                         tm_dbg_on_state_num_passes[tm_dbg_state];
3155                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3156                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3157                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3158                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3159         }
3160 }
3161
3162 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3163 {
3164         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3165                 unsigned long flags;
3166                 del_timer_sync(&tm_dbg_timer);
3167                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3168                 tm_dbg_p_cmd_list_waitQ = NULL;
3169                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3170         }
3171 }
3172
3173 static void tm_dbg_timer_fn(unsigned long arg)
3174 {
3175         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3176         tm_dbg_flags.tm_dbg_release = 1;
3177         smp_wmb();
3178         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3179 }
3180
3181 /* Called under scst_tm_dbg_lock and IRQs off */
3182 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3183 {
3184         switch(tm_dbg_state) {
3185         case TM_DBG_STATE_ABORT:
3186                 if (tm_dbg_delayed_cmds_count == 0) {
3187                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3188                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu) "
3189                                 "for %ld.%ld seconds (%ld HZ), "
3190                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3191                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3192                         mod_timer(&tm_dbg_timer, jiffies + d);
3193 #if 0
3194                         tm_dbg_flags.tm_dbg_blocked = 1;
3195 #endif
3196                 } else {
3197                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3198                                 "(tag %llu), delayed_cmds_count=%d, "
3199                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3200                                 tm_dbg_delayed_cmds_count,
3201                                 tm_dbg_on_state_passes);
3202                         if (tm_dbg_delayed_cmds_count == 2)
3203                                 tm_dbg_flags.tm_dbg_blocked = 0;
3204                 }
3205                 break;
3206
3207         case TM_DBG_STATE_RESET:
3208         case TM_DBG_STATE_OFFLINE:
3209                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3210                         "(tag %llu), delayed_cmds_count=%d, "
3211                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3212                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3213                 tm_dbg_flags.tm_dbg_blocked = 1;
3214                 break;
3215
3216         default:
3217                 sBUG();
3218         }
3219         /* IRQs already off */
3220         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3221         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3222         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3223         cmd->tm_dbg_delayed = 1;
3224         tm_dbg_delayed_cmds_count++;
3225         return;
3226 }
3227
3228 /* No locks */
3229 void tm_dbg_check_released_cmds(void)
3230 {
3231         if (tm_dbg_flags.tm_dbg_release) {
3232                 struct scst_cmd *cmd, *tc;
3233                 spin_lock_irq(&scst_tm_dbg_lock);
3234                 list_for_each_entry_safe_reverse(cmd, tc, 
3235                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3236                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3237                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3238                                 tm_dbg_delayed_cmds_count);
3239                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3240                         list_move(&cmd->cmd_list_entry,
3241                                 &cmd->cmd_lists->active_cmd_list);
3242                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3243                 }
3244                 tm_dbg_flags.tm_dbg_release = 0;
3245                 spin_unlock_irq(&scst_tm_dbg_lock);
3246         }
3247 }
3248
3249 /* Called under scst_tm_dbg_lock */
3250 static void tm_dbg_change_state(void)
3251 {
3252         tm_dbg_flags.tm_dbg_blocked = 0;
3253         if (--tm_dbg_on_state_passes == 0) {
3254                 switch(tm_dbg_state) {
3255                 case TM_DBG_STATE_ABORT:
3256                         TRACE_MGMT_DBG("%s", "Changing "
3257                             "tm_dbg_state to RESET");
3258                         tm_dbg_state =
3259                                 TM_DBG_STATE_RESET;
3260                         tm_dbg_flags.tm_dbg_blocked = 0;
3261                         break;
3262                 case TM_DBG_STATE_RESET:
3263                 case TM_DBG_STATE_OFFLINE:
3264                         if (TM_DBG_GO_OFFLINE) {
3265                             TRACE_MGMT_DBG("%s", "Changing "
3266                                     "tm_dbg_state to OFFLINE");
3267                             tm_dbg_state =
3268                                 TM_DBG_STATE_OFFLINE;
3269                         } else {
3270                             TRACE_MGMT_DBG("%s", "Changing "
3271                                     "tm_dbg_state to ABORT");
3272                             tm_dbg_state =
3273                                 TM_DBG_STATE_ABORT;
3274                         }
3275                         break;
3276                 default:
3277                         sBUG();
3278                 }
3279                 tm_dbg_on_state_passes =
3280                     tm_dbg_on_state_num_passes[tm_dbg_state];
3281         }
3282                 
3283         TRACE_MGMT_DBG("%s", "Deleting timer");
3284         del_timer(&tm_dbg_timer);
3285 }
3286
3287 /* No locks */
3288 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3289 {
3290         int res = 0;
3291         unsigned long flags;
3292
3293         if (cmd->tm_dbg_immut)
3294                 goto out;
3295
3296         if (cmd->tm_dbg_delayed) {
3297                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3298                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3299                         "delayed_cmds_count=%d", cmd, cmd->tag,
3300                         tm_dbg_delayed_cmds_count);
3301
3302                 cmd->tm_dbg_immut = 1;
3303                 tm_dbg_delayed_cmds_count--;
3304                 if ((tm_dbg_delayed_cmds_count == 0) &&
3305                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3306                         tm_dbg_change_state();
3307                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3308         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3309                                         &cmd->tgt_dev->tgt_dev_flags)) {
3310                 /* Delay 50th command */
3311                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3312                 if (tm_dbg_flags.tm_dbg_blocked ||
3313                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3314                         tm_dbg_delay_cmd(cmd);
3315                         res = 1;
3316                 } else
3317                         cmd->tm_dbg_immut = 1;
3318                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3319         }
3320
3321 out:
3322         return res;
3323 }
3324
3325 /* No locks */
3326 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3327 {
3328         struct scst_cmd *c;
3329         unsigned long flags;
3330
3331         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3332         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3333                                 cmd_list_entry) {
3334                 if (c == cmd) {
3335                         TRACE_MGMT_DBG("Abort request for "
3336                                 "delayed cmd %p (tag=%llu), moving it to "
3337                                 "active cmd list (delayed_cmds_count=%d)",
3338                                 c, c->tag, tm_dbg_delayed_cmds_count);
3339
3340                         if (!test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3341                                 if (((scst_random() % 10) == 5)) {
3342                                         scst_set_cmd_error(cmd,
3343                                            SCST_LOAD_SENSE(scst_sense_hardw_error));
3344                                         /* It's completed now */
3345                                 }
3346                         }
3347
3348                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3349                         list_move(&c->cmd_list_entry, 
3350                                 &c->cmd_lists->active_cmd_list);
3351                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3352                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3353                         break;
3354                 }
3355         }
3356         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3357 }
3358
3359 /* Might be called under scst_mutex */
3360 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3361 {
3362         unsigned long flags;
3363
3364         if (dev != NULL) {
3365                 struct scst_tgt_dev *tgt_dev;
3366                 bool found = 0;
3367
3368                 spin_lock_bh(&dev->dev_lock);
3369                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3370                                             dev_tgt_dev_list_entry) {
3371                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3372                                         &tgt_dev->tgt_dev_flags)) {
3373                                 found = 1;
3374                                 break;
3375                         }
3376                 }
3377                 spin_unlock_bh(&dev->dev_lock);
3378
3379                 if (!found)
3380                         goto out;
3381         }
3382
3383         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3384         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3385                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3386                         tm_dbg_delayed_cmds_count);
3387                 tm_dbg_change_state();
3388                 tm_dbg_flags.tm_dbg_release = 1;
3389                 smp_wmb();
3390                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3391                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3392         } else {
3393                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3394         }
3395         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3396
3397 out:
3398         return;
3399 }
3400
3401 int tm_dbg_is_release(void)
3402 {
3403         return tm_dbg_flags.tm_dbg_release;
3404 }
3405 #endif /* DEBUG_TM */
3406
3407 #ifdef DEBUG_SN
3408 void scst_check_debug_sn(struct scst_cmd *cmd)
3409 {
3410         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
3411         static int type;
3412         static int cnt;
3413         unsigned long flags;
3414         int old = cmd->queue_type;
3415
3416         spin_lock_irqsave(&lock, flags);
3417
3418         if (cnt == 0) {
3419                 if ((scst_random() % 1000) == 500) {
3420                         if ((scst_random() % 3) == 1)
3421                                 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3422                         else
3423                                 type = SCST_CMD_QUEUE_ORDERED;
3424                         do {
3425                                 cnt = scst_random() % 10;
3426                         } while(cnt == 0);
3427                 } else
3428                         goto out_unlock;
3429         }
3430
3431         cmd->queue_type = type;
3432         cnt--;
3433
3434         if (((scst_random() % 1000) == 750))
3435                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3436         else if (((scst_random() % 1000) == 751))
3437                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3438         else if (((scst_random() % 1000) == 752))
3439                 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3440
3441         TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3442                 cmd->queue_type, cnt);
3443
3444 out_unlock:
3445         spin_unlock_irqrestore(&lock, flags);
3446         return;
3447 }
3448 #endif /* DEBUG_SN */