- Fixed GFP_KERNEL misuse. Reported independently by mbe1@charter.net and Erez Zilbe...
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *  
4  *  Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/kthread.h>
26 #include <linux/cdrom.h>
27 #include <asm/unistd.h>
28 #include <asm/string.h>
29
30 #ifdef SCST_HIGHMEM
31 #include <linux/highmem.h>
32 #endif
33
34 #include "scst.h"
35 #include "scst_priv.h"
36 #include "scst_mem.h"
37
38 #include "scst_cdbprobe.h"
39
40 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
41 static void scst_check_internal_sense(struct scst_device *dev, int result,
42         uint8_t *sense, int sense_len);
43
44 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
45 {
46         int res = 0;
47         unsigned long gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
48
49         TRACE_ENTRY();
50
51         sBUG_ON(cmd->sense != NULL);
52
53         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
54         if (cmd->sense == NULL) {
55                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
56                         "The sense data will be lost!!", cmd->cdb[0]);
57                 res = -ENOMEM;
58                 goto out;
59         }
60
61         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
62
63 out:
64         TRACE_EXIT_RES(res);
65         return res;
66 }
67
68 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
69         const uint8_t *sense, unsigned int len)
70 {
71         int res;
72
73         TRACE_ENTRY();
74
75         res = scst_alloc_sense(cmd, atomic);
76         if (res != 0) {
77                 PRINT_BUFFER("Lost sense", sense, len);
78                 goto out;
79         }
80
81         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
82         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
83
84 out:
85         TRACE_EXIT_RES(res);
86         return res;
87 }
88
89 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
90 {
91         TRACE_ENTRY();
92
93         cmd->status = status;
94         cmd->host_status = DID_OK;
95
96         cmd->data_direction = SCST_DATA_NONE;
97         cmd->is_send_status = 1;
98         cmd->resp_data_len = 0;
99
100         cmd->completed = 1;
101
102         TRACE_EXIT();
103         return;
104 }
105
106 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
107 {
108         int rc;
109
110         TRACE_ENTRY();
111
112         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
113
114         rc = scst_alloc_sense(cmd, 1);
115         if (rc != 0) {
116                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
117                         key, asc, ascq);
118                 goto out;
119         }
120
121         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
122         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
123
124 out:
125         TRACE_EXIT();
126         return;
127 }
128
129 void scst_set_sense(uint8_t *buffer, int len, int key,
130         int asc, int ascq)
131 {
132         memset(buffer, 0, len);
133         buffer[0] = 0x70;       /* Error Code                   */
134         buffer[2] = key;        /* Sense Key                    */
135         buffer[7] = 0x0a;       /* Additional Sense Length      */
136         buffer[12] = asc;       /* ASC                          */
137         buffer[13] = ascq;      /* ASCQ                         */
138         TRACE_BUFFER("Sense set", buffer, len);
139         return;
140 }
141
142 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense, 
143         unsigned int len)
144 {
145         TRACE_ENTRY();
146
147         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
148         scst_alloc_set_sense(cmd, 1, sense, len);
149
150         TRACE_EXIT();
151         return;
152 }
153
154 void scst_set_busy(struct scst_cmd *cmd)
155 {
156         int c = atomic_read(&cmd->sess->sess_cmd_count);
157
158         TRACE_ENTRY();
159
160         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
161                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
162                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
163                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
164                         cmd->sess->initiator_name, c,
165                         cmd->queue_type, cmd->sess->init_phase);
166         } else {
167                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
168                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
169                         "initiator %s (cmds count %d, queue_type %x, "
170                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
171                         cmd->queue_type, cmd->sess->init_phase);
172         }
173
174         TRACE_EXIT();
175         return;
176 }
177
178 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
179 {
180         int i, l;
181
182         TRACE_ENTRY();
183
184         scst_check_restore_sg_buff(cmd);
185         cmd->resp_data_len = resp_data_len;
186
187         if (resp_data_len == cmd->bufflen)
188                 goto out;
189
190         l = 0;
191         for(i = 0; i < cmd->sg_cnt; i++) {
192                 l += cmd->sg[i].length;
193                 if (l >= resp_data_len) {
194                         int left = resp_data_len - (l - cmd->sg[i].length);
195 #ifdef DEBUG
196                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
197                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
198                                 "left %d", cmd, cmd->tag, resp_data_len, i,
199                                 cmd->sg[i].length, left);
200 #endif
201                         cmd->orig_sg_cnt = cmd->sg_cnt;
202                         cmd->orig_sg_entry = i;
203                         cmd->orig_entry_len = cmd->sg[i].length;
204                         cmd->sg_cnt = (left > 0) ? i+1 : i;
205                         cmd->sg[i].length = left;
206                         cmd->sg_buff_modified = 1;
207                         break;
208                 }
209         }
210
211 out:
212         TRACE_EXIT();
213         return;
214 }
215
216 /* Called under scst_mutex and suspended activity */
217 int scst_alloc_device(int gfp_mask, struct scst_device **out_dev)
218 {
219         struct scst_device *dev;
220         int res = 0;
221         static int dev_num; /* protected by scst_mutex */
222
223         TRACE_ENTRY();
224
225         dev = kzalloc(sizeof(*dev), gfp_mask);
226         if (dev == NULL) {
227                 TRACE(TRACE_OUT_OF_MEM, "%s",
228                         "Allocation of scst_device failed");
229                 res = -ENOMEM;
230                 goto out;
231         }
232
233         dev->handler = &scst_null_devtype;
234         dev->p_cmd_lists = &scst_main_cmd_lists;
235         atomic_set(&dev->dev_cmd_count, 0);
236         atomic_set(&dev->write_cmd_count, 0);
237         spin_lock_init(&dev->dev_lock);
238         atomic_set(&dev->on_dev_count, 0);
239         INIT_LIST_HEAD(&dev->blocked_cmd_list);
240         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
241         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
242         INIT_LIST_HEAD(&dev->threads_list);
243         init_waitqueue_head(&dev->on_dev_waitQ);
244         dev->dev_double_ua_possible = 1;
245         dev->dev_serialized = 1;
246         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
247         dev->dev_num = dev_num++;
248
249         *out_dev = dev;
250
251 out:
252         TRACE_EXIT_RES(res);
253         return res;
254 }
255
256 /* Called under scst_mutex and suspended activity */
257 void scst_free_device(struct scst_device *dev)
258 {
259         TRACE_ENTRY();
260
261 #ifdef EXTRACHECKS
262         if (!list_empty(&dev->dev_tgt_dev_list) || 
263             !list_empty(&dev->dev_acg_dev_list)) {
264                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
265                         "is not empty!", __FUNCTION__);
266                 sBUG();
267         }
268 #endif
269
270         kfree(dev);
271
272         TRACE_EXIT();
273         return;
274 }
275
276 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
277         struct scst_device *dev, lun_t lun)
278 {
279         struct scst_acg_dev *res;
280
281         TRACE_ENTRY();
282
283 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
284         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
285 #else
286         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
287 #endif
288         if (res == NULL) {
289                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
290                 goto out;
291         }
292 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
293         memset(res, 0, sizeof(*res));
294 #endif
295
296         res->dev = dev;
297         res->acg = acg;
298         res->lun = lun;
299         
300 out:
301         TRACE_EXIT_HRES(res);
302         return res;
303 }
304
305 /* The activity supposed to be suspended and scst_mutex held */
306 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
307 {
308         TRACE_ENTRY();
309         
310         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list", 
311                 acg_dev);
312         list_del(&acg_dev->acg_dev_list_entry);
313         list_del(&acg_dev->dev_acg_dev_list_entry);
314         
315         kmem_cache_free(scst_acgd_cachep, acg_dev);
316         
317         TRACE_EXIT();
318         return;
319 }
320
321 /* The activity supposed to be suspended and scst_mutex held */
322 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
323 {
324         struct scst_acg *acg;
325
326         TRACE_ENTRY();
327
328         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
329         if (acg == NULL) {
330                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
331                 goto out;
332         }
333
334         INIT_LIST_HEAD(&acg->acg_dev_list);
335         INIT_LIST_HEAD(&acg->acg_sess_list);
336         INIT_LIST_HEAD(&acg->acn_list);
337         acg->acg_name = acg_name;
338         
339         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
340         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
341         
342 out:
343         TRACE_EXIT_HRES(acg);
344         return acg;
345 }
346
347 /* The activity supposed to be suspended and scst_mutex held */
348 int scst_destroy_acg(struct scst_acg *acg)
349 {
350         struct scst_acn *n, *nn;
351         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
352         int res = 0;
353
354         TRACE_ENTRY();
355
356         if (!list_empty(&acg->acg_sess_list)) {
357                 PRINT_ERROR("%s: acg_sess_list is not empty!", __FUNCTION__);
358                 res = -EBUSY;
359                 goto out;
360         }
361
362         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
363         list_del(&acg->scst_acg_list_entry);
364         
365         /* Freeing acg_devs */
366         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list, 
367                         acg_dev_list_entry) {
368                 struct scst_tgt_dev *tgt_dev, *tt;
369                 list_for_each_entry_safe(tgt_dev, tt,
370                                  &acg_dev->dev->dev_tgt_dev_list,
371                                  dev_tgt_dev_list_entry) {
372                         if (tgt_dev->acg_dev == acg_dev)
373                                 scst_free_tgt_dev(tgt_dev);
374                 }
375                 scst_free_acg_dev(acg_dev);
376         }
377
378         /* Freeing names */
379         list_for_each_entry_safe(n, nn, &acg->acn_list, 
380                         acn_list_entry) {
381                 list_del(&n->acn_list_entry);
382                 kfree(n->name);
383                 kfree(n);
384         }
385         INIT_LIST_HEAD(&acg->acn_list);
386
387         kfree(acg);
388 out:
389         TRACE_EXIT_RES(res);
390         return res;
391 }
392
393 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
394 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
395         struct scst_acg_dev *acg_dev)
396 {
397         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
398         struct scst_tgt_dev *tgt_dev;
399         struct scst_device *dev = acg_dev->dev;
400         struct list_head *sess_tgt_dev_list_head;
401         struct scst_tgt_template *vtt = sess->tgt->tgtt;
402         int rc, i;
403
404         TRACE_ENTRY();
405
406 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
407         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
408 #else
409         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
410 #endif
411         if (tgt_dev == NULL) {
412                 TRACE(TRACE_OUT_OF_MEM, "%s",
413                       "Allocation of scst_tgt_dev failed");
414                 goto out;
415         }
416 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
417         memset(tgt_dev, 0, sizeof(*tgt_dev));
418 #endif
419
420         tgt_dev->dev = dev;
421         tgt_dev->lun = acg_dev->lun;
422         tgt_dev->acg_dev = acg_dev;
423         tgt_dev->sess = sess;
424         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
425         
426         scst_sgv_pool_use_norm(tgt_dev);
427
428         if (dev->scsi_dev != NULL) {
429                 ini_sg = dev->scsi_dev->host->sg_tablesize;
430                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
431                 ini_use_clustering = (dev->scsi_dev->host->use_clustering == 
432                                 ENABLE_CLUSTERING);
433         } else {
434                 ini_sg = (1 << 15) /* infinite */;
435                 ini_unchecked_isa_dma = 0;
436                 ini_use_clustering = 0;
437         }
438         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
439
440         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) && 
441             !sess->tgt->tgtt->no_clustering) {
442                 scst_sgv_pool_use_norm_clust(tgt_dev); 
443         }
444
445         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
446                 scst_sgv_pool_use_dma(tgt_dev);
447         } else {
448 #ifdef SCST_HIGHMEM
449                 scst_sgv_pool_use_highmem(tgt_dev);
450 #endif
451         }
452
453         if (dev->scsi_dev != NULL) {
454                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
455                       "SCST lun=%Ld", dev->scsi_dev->host->host_no, 
456                       dev->scsi_dev->channel, dev->scsi_dev->id, 
457                       dev->scsi_dev->lun, (uint64_t)tgt_dev->lun);
458         }
459         else {
460                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%Ld", 
461                         dev->virt_name, (uint64_t)tgt_dev->lun);
462         }
463
464         spin_lock_init(&tgt_dev->tgt_dev_lock);
465         INIT_LIST_HEAD(&tgt_dev->UA_list);
466         spin_lock_init(&tgt_dev->thr_data_lock);
467         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
468         spin_lock_init(&tgt_dev->sn_lock);
469         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
470         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
471         tgt_dev->expected_sn = 1;
472         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
473         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
474         for(i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
475                 atomic_set(&tgt_dev->sn_slots[i], 0);
476
477         if (dev->handler->parse_atomic && 
478             (sess->tgt->tgtt->preprocessing_done == NULL)) {
479                 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
480                     (sess->tgt->tgtt->rdy_to_xfer == NULL))
481                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
482                                 &tgt_dev->tgt_dev_flags);
483                 if (dev->handler->exec_atomic || (dev->handler->exec == NULL))
484                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
485                                 &tgt_dev->tgt_dev_flags);
486         }
487         if (dev->handler->exec_atomic || (dev->handler->exec == NULL)) {
488                 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
489                     (sess->tgt->tgtt->rdy_to_xfer == NULL))
490                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
491                                 &tgt_dev->tgt_dev_flags);
492                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
493                                 &tgt_dev->tgt_dev_flags);
494                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
495                         &tgt_dev->tgt_dev_flags);
496         }
497         if ((dev->handler->dev_done_atomic ||
498              (dev->handler->dev_done == NULL)) &&
499             sess->tgt->tgtt->xmit_response_atomic) {
500                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
501                         &tgt_dev->tgt_dev_flags);
502         }
503
504         spin_lock_bh(&scst_temp_UA_lock);
505         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
506                 SCST_LOAD_SENSE(scst_sense_reset_UA));
507         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
508         spin_unlock_bh(&scst_temp_UA_lock);
509
510         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
511
512         if (vtt->threads_num > 0) {
513                 rc = 0;
514                 if (dev->handler->threads_num > 0)
515                         rc = scst_add_dev_threads(dev, vtt->threads_num);
516                 else if (dev->handler->threads_num == 0)
517                         rc = scst_add_cmd_threads(vtt->threads_num);
518                 if (rc != 0)
519                         goto out_free;
520         }
521
522         if (dev->handler && dev->handler->attach_tgt) {
523                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
524                       tgt_dev);
525                 rc = dev->handler->attach_tgt(tgt_dev);
526                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
527                 if (rc != 0) {
528                         PRINT_ERROR("Device handler's %s attach_tgt() "
529                             "failed: %d", dev->handler->name, rc);
530                         goto out_thr_free;
531                 }
532         }
533
534         spin_lock_bh(&dev->dev_lock);   
535         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
536         if (dev->dev_reserved)
537                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
538         spin_unlock_bh(&dev->dev_lock);
539
540         sess_tgt_dev_list_head = 
541                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
542         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, sess_tgt_dev_list_head);
543
544 out:
545         TRACE_EXIT();
546         return tgt_dev;
547
548 out_thr_free:
549         if (vtt->threads_num > 0) {
550                 if (dev->handler->threads_num > 0)
551                         scst_del_dev_threads(dev, vtt->threads_num);
552                 else if (dev->handler->threads_num == 0)
553                         scst_del_cmd_threads(vtt->threads_num);
554         }
555
556 out_free:
557         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
558         tgt_dev = NULL;
559         goto out;
560 }
561
562 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
563
564 /* No locks supposed to be held, scst_mutex - held */
565 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
566 {
567         TRACE_ENTRY();
568
569         scst_clear_reservation(tgt_dev);
570
571         /* With activity suspended the lock isn't needed, but let's be safe */
572         spin_lock_bh(&tgt_dev->tgt_dev_lock);
573         scst_free_all_UA(tgt_dev);
574         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
575
576         spin_lock_bh(&scst_temp_UA_lock);
577         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
578                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
579         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
580         spin_unlock_bh(&scst_temp_UA_lock);
581
582         TRACE_EXIT();
583         return;
584 }
585
586 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
587 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
588 {
589         struct scst_device *dev = tgt_dev->dev;
590         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
591
592         TRACE_ENTRY();
593
594         tm_dbg_deinit_tgt_dev(tgt_dev);
595
596         spin_lock_bh(&dev->dev_lock);
597         list_del(&tgt_dev->dev_tgt_dev_list_entry);
598         spin_unlock_bh(&dev->dev_lock);
599
600         list_del(&tgt_dev->sess_tgt_dev_list_entry);
601
602         scst_clear_reservation(tgt_dev);
603         scst_free_all_UA(tgt_dev);
604
605         if (dev->handler && dev->handler->detach_tgt) {
606                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
607                       tgt_dev);
608                 dev->handler->detach_tgt(tgt_dev);
609                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
610         }
611
612         if (vtt->threads_num > 0) {
613                 if (dev->handler->threads_num > 0)
614                         scst_del_dev_threads(dev, vtt->threads_num);
615                 else if (dev->handler->threads_num == 0)
616                         scst_del_cmd_threads(vtt->threads_num);
617         }
618
619         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
620
621         TRACE_EXIT();
622         return;
623 }
624
625 /* scst_mutex supposed to be held */
626 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
627 {
628         int res = 0;
629         struct scst_acg_dev *acg_dev;
630         struct scst_tgt_dev *tgt_dev;
631
632         TRACE_ENTRY();
633
634         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list, 
635                         acg_dev_list_entry) {
636                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
637                 if (tgt_dev == NULL) {
638                         res = -ENOMEM;
639                         goto out_free;
640                 }
641         }
642
643 out:
644         TRACE_EXIT();
645         return res;
646
647 out_free:
648         scst_sess_free_tgt_devs(sess);
649         goto out;
650 }
651
652 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
653 void scst_sess_free_tgt_devs(struct scst_session *sess)
654 {
655         int i;
656         struct scst_tgt_dev *tgt_dev, *t;
657
658         TRACE_ENTRY();
659         
660         /* The session is going down, no users, so no locks */
661         for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
662                 struct list_head *sess_tgt_dev_list_head =
663                         &sess->sess_tgt_dev_list_hash[i];
664                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
665                                 sess_tgt_dev_list_entry) {
666                         scst_free_tgt_dev(tgt_dev);
667                 }
668                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
669         }
670
671         TRACE_EXIT();
672         return;
673 }
674
675 /* The activity supposed to be suspended and scst_mutex held */
676 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
677         int read_only)
678 {
679         int res = 0;
680         struct scst_acg_dev *acg_dev;
681         struct scst_tgt_dev *tgt_dev;
682         struct scst_session *sess;
683         LIST_HEAD(tmp_tgt_dev_list);
684         
685         TRACE_ENTRY();
686         
687         INIT_LIST_HEAD(&tmp_tgt_dev_list);
688         
689 #ifdef EXTRACHECKS
690         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
691                 if (acg_dev->dev == dev) {
692                         PRINT_ERROR("Device is already in group %s", 
693                                 acg->acg_name);
694                         res = -EINVAL;
695                         goto out;
696                 }
697         }
698 #endif
699         
700         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
701         if (acg_dev == NULL) {
702                 res = -ENOMEM;
703                 goto out;
704         }
705         acg_dev->rd_only_flag = read_only;
706
707         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list", 
708                 acg_dev);
709         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
710         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
711         
712         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) 
713         {
714                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
715                 if (tgt_dev == NULL) {
716                         res = -ENOMEM;
717                         goto out_free;
718                 }
719                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
720                               &tmp_tgt_dev_list);
721         }
722
723 out:
724         if (res == 0) {
725                 if (dev->virt_name != NULL) {
726                         PRINT_INFO("Added device %s to group %s (LUN %Ld, "
727                                 "rd_only %d)", dev->virt_name, acg->acg_name,
728                                 lun, read_only);
729                 } else {
730                         PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
731                                 "%Ld, rd_only %d)", dev->scsi_dev->host->host_no,
732                                 dev->scsi_dev->channel, dev->scsi_dev->id,
733                                 dev->scsi_dev->lun, acg->acg_name, lun,
734                                 read_only);
735                 }
736         }
737
738         TRACE_EXIT_RES(res);
739         return res;
740
741 out_free:
742         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
743                          extra_tgt_dev_list_entry) {
744                 scst_free_tgt_dev(tgt_dev);
745         }
746         scst_free_acg_dev(acg_dev);
747         goto out;
748 }
749
750 /* The activity supposed to be suspended and scst_mutex held */
751 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
752 {
753         int res = 0;
754         struct scst_acg_dev *acg_dev = NULL, *a;
755         struct scst_tgt_dev *tgt_dev, *tt;
756         
757         TRACE_ENTRY();
758         
759         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
760                 if (a->dev == dev) {
761                         acg_dev = a;
762                         break;
763                 }
764         }
765         
766         if (acg_dev == NULL) {
767                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
768                 res = -EINVAL;
769                 goto out;
770         }
771
772         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
773                          dev_tgt_dev_list_entry) {
774                 if (tgt_dev->acg_dev == acg_dev)
775                         scst_free_tgt_dev(tgt_dev);
776         }
777         scst_free_acg_dev(acg_dev);
778
779 out:
780         if (res == 0) {
781                 if (dev->virt_name != NULL) {
782                         PRINT_INFO("Removed device %s from group %s",
783                                 dev->virt_name, acg->acg_name);
784                 } else {
785                         PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
786                                 dev->scsi_dev->host->host_no,
787                                 dev->scsi_dev->channel, dev->scsi_dev->id,
788                                 dev->scsi_dev->lun, acg->acg_name);
789                 }
790         }
791
792         TRACE_EXIT_RES(res);
793         return res;
794 }
795
796 /* scst_mutex supposed to be held */
797 int scst_acg_add_name(struct scst_acg *acg, const char *name)
798 {
799         int res = 0;
800         struct scst_acn *n;
801         int len;
802         char *nm;
803         
804         TRACE_ENTRY();
805
806         list_for_each_entry(n, &acg->acn_list, acn_list_entry) 
807         {
808                 if (strcmp(n->name, name) == 0) {
809                         PRINT_ERROR("Name %s already exists in group %s",
810                                 name, acg->acg_name);
811                         res = -EINVAL;
812                         goto out;
813                 }
814         }
815         
816         n = kmalloc(sizeof(*n), GFP_KERNEL);
817         if (n == NULL) {
818                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
819                 res = -ENOMEM;
820                 goto out;
821         }
822         
823         len = strlen(name);
824         nm = kmalloc(len + 1, GFP_KERNEL);
825         if (nm == NULL) {
826                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
827                 res = -ENOMEM;
828                 goto out_free;
829         }
830         
831         strcpy(nm, name);
832         n->name = nm;
833         
834         list_add_tail(&n->acn_list_entry, &acg->acn_list);
835
836 out:
837         if (res == 0) {
838                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
839         }
840
841         TRACE_EXIT_RES(res);
842         return res;
843
844 out_free:
845         kfree(n);
846         goto out;
847 }
848
849 /* scst_mutex supposed to be held */
850 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
851 {
852         int res = -EINVAL;
853         struct scst_acn *n;
854         
855         TRACE_ENTRY();
856         
857         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
858         {
859                 if (strcmp(n->name, name) == 0) {
860                         list_del(&n->acn_list_entry);
861                         kfree(n->name);
862                         kfree(n);
863                         res = 0;
864                         break;
865                 }
866         }
867         
868         if (res == 0) {
869                 PRINT_INFO("Removed name %s from group %s", name,
870                         acg->acg_name);
871         } else {
872                 PRINT_ERROR("Unable to find name %s in group %s", name,
873                         acg->acg_name);
874         }
875
876         TRACE_EXIT_RES(res);
877         return res;
878 }
879
880 struct scst_cmd *scst_create_prepare_internal_cmd(
881         struct scst_cmd *orig_cmd, int bufsize)
882 {
883         struct scst_cmd *res;
884         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
885
886         TRACE_ENTRY();
887
888         res = scst_alloc_cmd(gfp_mask);
889         if (res == NULL)
890                 goto out;
891
892         res->cmd_lists = orig_cmd->cmd_lists;
893         res->sess = orig_cmd->sess;
894         res->state = SCST_CMD_STATE_DEV_PARSE;
895         res->atomic = scst_cmd_atomic(orig_cmd);
896         res->internal = 1;
897         res->tgtt = orig_cmd->tgtt;
898         res->tgt = orig_cmd->tgt;
899         res->dev = orig_cmd->dev;
900         res->tgt_dev = orig_cmd->tgt_dev;
901         res->lun = orig_cmd->lun;
902         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
903         res->data_direction = SCST_DATA_UNKNOWN;
904         res->orig_cmd = orig_cmd;
905
906         res->bufflen = bufsize;
907
908 out:
909         TRACE_EXIT_HRES((unsigned long)res);
910         return res;
911 }
912
913 void scst_free_internal_cmd(struct scst_cmd *cmd)
914 {
915         TRACE_ENTRY();
916
917         __scst_cmd_put(cmd);
918
919         TRACE_EXIT();
920         return;
921 }
922
923 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
924 {
925         int res = SCST_CMD_STATE_RES_CONT_NEXT;
926 #define sbuf_size 252
927         static const uint8_t request_sense[6] =
928             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
929         struct scst_cmd *rs_cmd;
930
931         TRACE_ENTRY();
932
933         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
934         if (rs_cmd == NULL)
935                 goto out_error;
936
937         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
938         rs_cmd->cdb_len = sizeof(request_sense);
939         rs_cmd->data_direction = SCST_DATA_READ;
940
941         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
942                 "cmd list ", rs_cmd);
943         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
944         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
945         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
946
947 out:
948         TRACE_EXIT_RES(res);
949         return res;
950
951 out_error:
952         res = -1;
953         goto out;
954 #undef sbuf_size
955 }
956
957 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
958 {
959         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
960         uint8_t *buf;
961         int len;
962
963         TRACE_ENTRY();
964
965         if (req_cmd->dev->handler->dev_done != NULL) {
966                 int rc;
967                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
968                       req_cmd->dev->handler->name, req_cmd);
969                 rc = req_cmd->dev->handler->dev_done(req_cmd);
970                 TRACE_DBG("Dev handler %s dev_done() returned %d",
971                       req_cmd->dev->handler->name, rc);
972         }
973
974         sBUG_ON(orig_cmd);
975
976         len = scst_get_buf_first(req_cmd, &buf);
977
978         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
979             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
980                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
981                         buf, len);
982                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
983                         len);
984         } else {
985                 PRINT_ERROR("%s", "Unable to get the sense via "
986                         "REQUEST SENSE, returning HARDWARE ERROR");
987                 scst_set_cmd_error(orig_cmd,
988                         SCST_LOAD_SENSE(scst_sense_hardw_error));
989         }
990
991         if (len > 0)
992                 scst_put_buf(req_cmd, buf);
993
994         scst_free_internal_cmd(req_cmd);
995
996         TRACE_EXIT_HRES((unsigned long)orig_cmd);
997         return orig_cmd;
998 }
999
1000 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1001 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1002 {
1003         struct scsi_request *req;
1004
1005         TRACE_ENTRY();
1006
1007         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1008                 if (req) {
1009                         if (req->sr_bufflen)
1010                                 kfree(req->sr_buffer);
1011                         scsi_release_request(req);
1012                 }
1013         }
1014
1015         TRACE_EXIT();
1016         return;
1017 }
1018
1019 static void scst_send_release(struct scst_device *dev)
1020 {
1021         struct scsi_request *req;
1022         struct scsi_device *scsi_dev;
1023         uint8_t cdb[6];
1024
1025         TRACE_ENTRY();
1026         
1027         if (dev->scsi_dev == NULL)
1028                 goto out;
1029
1030         scsi_dev = dev->scsi_dev;
1031
1032         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1033         if (req == NULL) {
1034                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1035                             "to RELEASE device %d:%d:%d:%d",
1036                             scsi_dev->host->host_no, scsi_dev->channel,
1037                             scsi_dev->id, scsi_dev->lun);
1038                 goto out;
1039         }
1040
1041         memset(cdb, 0, sizeof(cdb));
1042         cdb[0] = RELEASE;
1043         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1044             ((scsi_dev->lun << 5) & 0xe0) : 0;
1045         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1046         req->sr_cmd_len = sizeof(cdb);
1047         req->sr_data_direction = SCST_DATA_NONE;
1048         req->sr_use_sg = 0;
1049         req->sr_bufflen = 0;
1050         req->sr_buffer = NULL;
1051         req->sr_request->rq_disk = dev->rq_disk;
1052         req->sr_sense_buffer[0] = 0;
1053
1054         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1055                 "mid-level", req);
1056         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1057                     scst_req_done, SCST_DEFAULT_TIMEOUT, 3);
1058
1059 out:
1060         TRACE_EXIT();
1061         return;
1062 }
1063 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1064 static void scst_send_release(struct scst_device *dev)
1065 {
1066         struct scsi_device *scsi_dev;
1067         unsigned char cdb[6];
1068         unsigned char *sense;
1069         int rc, i;
1070
1071         TRACE_ENTRY();
1072         
1073         if (dev->scsi_dev == NULL)
1074                 goto out;
1075
1076         /* We can't afford missing RELEASE due to memory shortage */
1077         sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1078
1079         scsi_dev = dev->scsi_dev;
1080
1081         for(i = 0; i < 5; i++) {
1082                 memset(cdb, 0, sizeof(cdb));
1083                 cdb[0] = RELEASE;
1084                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1085                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1086
1087                 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1088
1089                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1090                         "SCSI mid-level");
1091                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1092                                 sense, SCST_DEFAULT_TIMEOUT, 0, 0);
1093                 TRACE_DBG("MODE_SENSE done: %x", rc);
1094
1095                 if (scsi_status_is_good(rc)) {
1096                         break;
1097                 } else {
1098                         PRINT_ERROR("RELEASE failed: %d", rc);
1099                         PRINT_BUFFER("RELEASE sense", sense,
1100                                 SCST_SENSE_BUFFERSIZE);
1101                         scst_check_internal_sense(dev, rc,
1102                                         sense, SCST_SENSE_BUFFERSIZE);
1103                 }
1104         }
1105
1106         kfree(sense);
1107
1108 out:
1109         TRACE_EXIT();
1110         return;
1111 }
1112 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1113
1114 /* scst_mutex supposed to be held */
1115 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1116 {
1117         struct scst_device *dev = tgt_dev->dev;
1118         int release = 0;
1119
1120         TRACE_ENTRY();
1121
1122         spin_lock_bh(&dev->dev_lock);
1123         if (dev->dev_reserved &&
1124             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1125                 /* This is one who holds the reservation */
1126                 struct scst_tgt_dev *tgt_dev_tmp;
1127                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1128                                     dev_tgt_dev_list_entry) {
1129                         clear_bit(SCST_TGT_DEV_RESERVED,
1130                                     &tgt_dev_tmp->tgt_dev_flags);
1131                 }
1132                 dev->dev_reserved = 0;
1133                 release = 1;
1134         }
1135         spin_unlock_bh(&dev->dev_lock);
1136
1137         if (release)
1138                 scst_send_release(dev);
1139
1140         TRACE_EXIT();
1141         return;
1142 }
1143
1144 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
1145         const char *initiator_name)
1146 {
1147         struct scst_session *sess;
1148         int i;
1149         int len;
1150         char *nm;
1151
1152         TRACE_ENTRY();
1153
1154 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1155         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1156 #else
1157         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1158 #endif
1159         if (sess == NULL) {
1160                 TRACE(TRACE_OUT_OF_MEM, "%s",
1161                       "Allocation of scst_session failed");
1162                 goto out;
1163         }
1164 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1165         memset(sess, 0, sizeof(*sess));
1166 #endif
1167
1168         sess->init_phase = SCST_SESS_IPH_INITING;
1169         sess->shut_phase = SCST_SESS_SPH_READY;
1170         atomic_set(&sess->refcnt, 0);
1171         for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1172                 struct list_head *sess_tgt_dev_list_head =
1173                          &sess->sess_tgt_dev_list_hash[i];
1174                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1175         }
1176         spin_lock_init(&sess->sess_list_lock);
1177         INIT_LIST_HEAD(&sess->search_cmd_list);
1178         sess->tgt = tgt;
1179         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1180         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1181
1182 #ifdef MEASURE_LATENCY
1183         spin_lock_init(&sess->meas_lock);
1184 #endif
1185
1186         len = strlen(initiator_name);
1187         nm = kmalloc(len + 1, gfp_mask);
1188         if (nm == NULL) {
1189                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1190                 goto out_free;
1191         }
1192         
1193         strcpy(nm, initiator_name);
1194         sess->initiator_name = nm;
1195         
1196 out:
1197         TRACE_EXIT();
1198         return sess;
1199
1200 out_free:
1201         kmem_cache_free(scst_sess_cachep, sess);
1202         sess = NULL;
1203         goto out;
1204 }
1205
1206 void scst_free_session(struct scst_session *sess)
1207 {
1208         TRACE_ENTRY();
1209
1210         mutex_lock(&scst_mutex);
1211
1212         TRACE_DBG("Removing sess %p from the list", sess);
1213         list_del(&sess->sess_list_entry);
1214         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1215         list_del(&sess->acg_sess_list_entry);
1216
1217         scst_sess_free_tgt_devs(sess);
1218
1219         wake_up_all(&sess->tgt->unreg_waitQ);
1220
1221         mutex_unlock(&scst_mutex);
1222
1223         kfree(sess->initiator_name);
1224         kmem_cache_free(scst_sess_cachep, sess);
1225
1226         TRACE_EXIT();
1227         return;
1228 }
1229
1230 void scst_free_session_callback(struct scst_session *sess)
1231 {
1232         struct completion *c;
1233
1234         TRACE_ENTRY();
1235
1236         TRACE_DBG("Freeing session %p", sess);
1237
1238         c = sess->shutdown_compl;
1239
1240         if (sess->unreg_done_fn) {
1241                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1242                 sess->unreg_done_fn(sess);
1243                 TRACE_DBG("%s", "unreg_done_fn() returned");
1244         }
1245         scst_free_session(sess);
1246
1247         if (c)
1248                 complete_all(c);
1249
1250         TRACE_EXIT();
1251         return;
1252 }
1253
1254 void scst_sched_session_free(struct scst_session *sess)
1255 {
1256         unsigned long flags;
1257
1258         TRACE_ENTRY();
1259
1260         spin_lock_irqsave(&scst_mgmt_lock, flags);
1261         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1262         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1263         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1264         
1265         wake_up(&scst_mgmt_waitQ);
1266
1267         TRACE_EXIT();
1268         return;
1269 }
1270
1271 void scst_cmd_get(struct scst_cmd *cmd)
1272 {
1273         __scst_cmd_get(cmd);
1274 }
1275
1276 void scst_cmd_put(struct scst_cmd *cmd)
1277 {
1278         __scst_cmd_put(cmd);
1279 }
1280
1281 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
1282 {
1283         struct scst_cmd *cmd;
1284
1285         TRACE_ENTRY();
1286
1287 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1288         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1289 #else
1290         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1291 #endif
1292         if (cmd == NULL) {
1293                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1294                 goto out;
1295         }
1296 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1297         memset(cmd, 0, sizeof(*cmd));
1298 #endif
1299
1300         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1301         atomic_set(&cmd->cmd_ref, 1);
1302         cmd->cmd_lists = &scst_main_cmd_lists;
1303         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1304         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1305         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1306         cmd->retries = 0;
1307         cmd->data_len = -1;
1308         cmd->is_send_status = 1;
1309         cmd->resp_data_len = -1;
1310
1311 out:
1312         TRACE_EXIT();
1313         return cmd;
1314 }
1315
1316 void scst_destroy_put_cmd(struct scst_cmd *cmd)
1317 {
1318         scst_sess_put(cmd->sess);
1319
1320         /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1321         if (likely(cmd->tgt_dev != NULL))
1322                 __scst_put();
1323
1324         scst_destroy_cmd(cmd);
1325         return;
1326 }
1327
1328 /* No locks supposed to be held */
1329 void scst_free_cmd(struct scst_cmd *cmd)
1330 {
1331         int destroy = 1;
1332
1333         TRACE_ENTRY();
1334
1335         TRACE_DBG("Freeing cmd %p (tag %Lu)", cmd, cmd->tag);
1336
1337         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1338                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1339                         cmd, atomic_read(&scst_cmd_count));
1340         }
1341
1342         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1343                 cmd->dec_on_dev_needed);
1344
1345 #if defined(EXTRACHECKS) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
1346         if (cmd->scsi_req) {
1347                 PRINT_ERROR("%s: %s", __FUNCTION__, "Cmd with unfreed "
1348                         "scsi_req!");
1349                 scst_release_request(cmd);
1350         }
1351 #endif
1352
1353         scst_check_restore_sg_buff(cmd);
1354
1355         if (unlikely(cmd->internal)) {
1356                 if (cmd->bufflen > 0)
1357                         scst_release_space(cmd);
1358                 scst_destroy_cmd(cmd);
1359                 goto out;
1360         }
1361
1362         if (cmd->tgtt->on_free_cmd != NULL) {
1363                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1364                 cmd->tgtt->on_free_cmd(cmd);
1365                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1366         }
1367
1368         if (likely(cmd->dev != NULL)) {
1369                 struct scst_dev_type *handler = cmd->dev->handler;
1370                 if (handler->on_free_cmd != NULL) {
1371                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1372                               handler->name, cmd);
1373                         handler->on_free_cmd(cmd);
1374                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1375                                 handler->name);
1376                 }
1377         }
1378
1379         scst_release_space(cmd);
1380
1381         if (unlikely(cmd->sense != NULL)) {
1382                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1383                 mempool_free(cmd->sense, scst_sense_mempool);
1384                 cmd->sense = NULL;
1385         }
1386
1387         if (likely(cmd->tgt_dev != NULL)) {
1388 #ifdef EXTRACHECKS
1389                 if (unlikely(!cmd->sent_to_midlev)) {
1390                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1391                              "%d, target %s, lun %Ld, sn %ld, expected_sn %ld)",
1392                              cmd, cmd->cdb[0], cmd->tgtt->name, (uint64_t)cmd->lun,
1393                              cmd->sn, cmd->tgt_dev->expected_sn);
1394                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1395                 }
1396 #endif
1397
1398                 if (unlikely(cmd->out_of_sn)) {
1399                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1400                                 "destroy=%d", cmd, cmd->tag, cmd->sn, destroy);
1401                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1402                                         &cmd->cmd_flags);
1403                 }
1404         }
1405
1406         if (likely(destroy))
1407                 scst_destroy_put_cmd(cmd);
1408
1409 out:
1410         TRACE_EXIT();
1411         return;
1412 }
1413
1414 /* No locks supposed to be held. */
1415 void scst_check_retries(struct scst_tgt *tgt)
1416 {
1417         int need_wake_up = 0;
1418
1419         TRACE_ENTRY();
1420
1421         /* 
1422          * We don't worry about overflow of finished_cmds, because we check 
1423          * only for its change 
1424          */
1425         atomic_inc(&tgt->finished_cmds);
1426         smp_mb__after_atomic_inc();
1427         if (unlikely(tgt->retry_cmds > 0)) 
1428         {
1429                 struct scst_cmd *c, *tc;
1430                 unsigned long flags;
1431
1432                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1433                       tgt->retry_cmds);
1434
1435                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1436                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1437                                 cmd_list_entry)
1438                 {
1439                         tgt->retry_cmds--;
1440
1441                         TRACE_RETRY("Moving retry cmd %p to head of active "
1442                                 "cmd list (retry_cmds left %d)", c, tgt->retry_cmds);
1443                         spin_lock(&c->cmd_lists->cmd_list_lock);
1444                         list_move(&c->cmd_list_entry, &c->cmd_lists->active_cmd_list);
1445                         wake_up(&c->cmd_lists->cmd_list_waitQ);
1446                         spin_unlock(&c->cmd_lists->cmd_list_lock);
1447
1448                         need_wake_up++;
1449                         if (need_wake_up >= 2) /* "slow start" */
1450                                 break; 
1451                 }
1452                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1453         }
1454
1455         TRACE_EXIT();
1456         return;
1457 }
1458
1459 void scst_tgt_retry_timer_fn(unsigned long arg)
1460 {
1461         struct scst_tgt *tgt = (struct scst_tgt*)arg;
1462         unsigned long flags;
1463
1464         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1465
1466         spin_lock_irqsave(&tgt->tgt_lock, flags);
1467         tgt->retry_timer_active = 0;
1468         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1469
1470         scst_check_retries(tgt);
1471
1472         TRACE_EXIT();
1473         return;
1474 }
1475
1476 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1477 {
1478         struct scst_mgmt_cmd *mcmd;
1479
1480         TRACE_ENTRY();
1481
1482         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1483         if (mcmd == NULL) {
1484                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1485                         "failed, some commands and their data could leak");
1486                 goto out;
1487         }
1488         memset(mcmd, 0, sizeof(*mcmd));
1489
1490 out:
1491         TRACE_EXIT();
1492         return mcmd;
1493 }
1494
1495 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1496 {
1497         unsigned long flags;
1498
1499         TRACE_ENTRY();
1500
1501         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1502         atomic_dec(&mcmd->sess->sess_cmd_count);
1503         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1504
1505         scst_sess_put(mcmd->sess);
1506
1507         if (mcmd->mcmd_tgt_dev != NULL)
1508                 __scst_put();
1509
1510         mempool_free(mcmd, scst_mgmt_mempool);
1511
1512         TRACE_EXIT();
1513         return;
1514 }
1515
1516 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1517 int scst_alloc_request(struct scst_cmd *cmd)
1518 {
1519         int res = 0;
1520         struct scsi_request *req;
1521         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1522
1523         TRACE_ENTRY();
1524
1525         /* cmd->dev->scsi_dev must be non-NULL here */
1526         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1527         if (req == NULL) {
1528                 TRACE(TRACE_OUT_OF_MEM, "%s",
1529                       "Allocation of scsi_request failed");
1530                 res = -ENOMEM;
1531                 goto out;
1532         }
1533
1534         cmd->scsi_req = req;
1535
1536         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1537         req->sr_cmd_len = cmd->cdb_len;
1538         req->sr_data_direction = cmd->data_direction;
1539         req->sr_use_sg = cmd->sg_cnt;
1540         req->sr_bufflen = cmd->bufflen;
1541         req->sr_buffer = cmd->sg;
1542         req->sr_request->rq_disk = cmd->dev->rq_disk;
1543         req->sr_sense_buffer[0] = 0;
1544
1545         cmd->scsi_req->upper_private_data = cmd;
1546
1547 out:
1548         TRACE_EXIT();
1549         return res;
1550 }
1551
1552 void scst_release_request(struct scst_cmd *cmd)
1553 {
1554         scsi_release_request(cmd->scsi_req);
1555         cmd->scsi_req = NULL;
1556 }
1557 #endif
1558
1559 int scst_alloc_space(struct scst_cmd *cmd)
1560 {
1561         int gfp_mask;
1562         int res = -ENOMEM;
1563         int atomic = scst_cmd_atomic(cmd);
1564         int flags;
1565         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1566         int bufflen = cmd->bufflen;
1567
1568         TRACE_ENTRY();
1569
1570         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1571
1572         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1573         if (cmd->no_sgv)
1574                 flags |= SCST_POOL_ALLOC_NO_CACHED;
1575
1576         if (unlikely(cmd->bufflen == 0)) {
1577                 TRACE(TRACE_MGMT_MINOR, "Warning: data direction %d or/and "
1578                         "zero buffer length. Opcode 0x%x, handler %s, target "
1579                         "%s", cmd->data_direction, cmd->cdb[0],
1580                         cmd->dev->handler->name, cmd->tgtt->name);
1581                 /*
1582                  * Be on the safe side and alloc stub buffer. Neither target
1583                  * drivers, nor user space will touch it, since bufflen
1584                  * remains 0.
1585                  */
1586                 bufflen = PAGE_SIZE;
1587         }
1588
1589         cmd->sg = sgv_pool_alloc(tgt_dev->pool, bufflen, gfp_mask, flags,
1590                         &cmd->sg_cnt, &cmd->sgv, NULL);
1591         if (cmd->sg == NULL)
1592                 goto out;
1593
1594         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1595                 static int ll;
1596                 if (ll < 10) {
1597                         PRINT_INFO("Unable to complete command due to "
1598                                 "SG IO count limitation (requested %d, "
1599                                 "available %d, tgt lim %d)", cmd->sg_cnt,
1600                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1601                         ll++;
1602                 }
1603                 goto out_sg_free;
1604         }
1605
1606         res = 0;
1607
1608 out:
1609         TRACE_EXIT();
1610         return res;
1611
1612 out_sg_free:
1613         sgv_pool_free(cmd->sgv);
1614         cmd->sgv = NULL;
1615         cmd->sg = NULL;
1616         cmd->sg_cnt = 0;
1617         goto out;
1618 }
1619
1620 void scst_release_space(struct scst_cmd *cmd)
1621 {
1622         TRACE_ENTRY();
1623
1624         if (cmd->sgv == NULL)
1625                 goto out;
1626
1627         if (cmd->data_buf_alloced) {
1628                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1629                 goto out;
1630         }
1631
1632         sgv_pool_free(cmd->sgv);
1633
1634         cmd->sgv = NULL;
1635         cmd->sg_cnt = 0;
1636         cmd->sg = NULL;
1637         cmd->bufflen = 0;
1638         cmd->data_len = 0;
1639
1640 out:
1641         TRACE_EXIT();
1642         return;
1643 }
1644
1645 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1646
1647 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1648 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1649
1650 int scst_get_cdb_len(const uint8_t *cdb)
1651 {
1652         return SCST_GET_CDB_LEN(cdb[0]);
1653 }
1654
1655 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1656
1657 /* for special commands */
1658 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1659 {
1660         cmd->bufflen = 6;
1661         return 0;
1662 }
1663
1664 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1665 {
1666         cmd->bufflen = READ_CAP_LEN;
1667         return 0;
1668 }
1669
1670 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1671 {
1672         cmd->bufflen = 1;
1673         return 0;
1674 }
1675
1676 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1677 {
1678         uint8_t *p = (uint8_t *)cmd->cdb + off;
1679         int res = 0;
1680
1681         cmd->bufflen = 0;
1682         cmd->bufflen |= ((u32)p[0]) << 8;
1683         cmd->bufflen |= ((u32)p[1]);
1684
1685         switch (cmd->cdb[1] & 0x1f) {
1686         case 0:
1687         case 1:
1688         case 6:
1689                 if (cmd->bufflen != 0) {
1690                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1691                                 "allocation length for service action %x",
1692                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
1693                         goto out_inval;
1694                 }
1695                 break;
1696         }
1697
1698         switch (cmd->cdb[1] & 0x1f) {
1699         case 0:
1700         case 1:
1701                 cmd->bufflen = 20;
1702                 break;
1703         case 6:
1704                 cmd->bufflen = 32;
1705                 break;
1706         case 8:
1707                 cmd->bufflen = max(28, cmd->bufflen);
1708                 break;
1709         default:
1710                 PRINT_ERROR("READ POSITION: Invalid service action %x",
1711                         cmd->cdb[1] & 0x1f);
1712                 goto out_inval;
1713         }
1714
1715 out:
1716         return res;
1717
1718 out_inval:
1719         scst_set_cmd_error(cmd,
1720                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1721         res = 1;
1722         goto out;
1723 }
1724
1725 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1726 {
1727         cmd->bufflen = (u32)cmd->cdb[off];
1728         return 0;
1729 }
1730
1731 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1732 {
1733         const uint8_t *p = cmd->cdb + off;
1734
1735         cmd->bufflen = 0;
1736         cmd->bufflen |= ((u32)p[0]) << 8;
1737         cmd->bufflen |= ((u32)p[1]);
1738
1739         return 0;
1740 }
1741
1742 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1743 {
1744         const uint8_t *p = cmd->cdb + off;
1745
1746         cmd->bufflen = 0;
1747         cmd->bufflen |= ((u32)p[0]) << 16;
1748         cmd->bufflen |= ((u32)p[1]) << 8;
1749         cmd->bufflen |= ((u32)p[2]);
1750
1751         return 0;
1752 }
1753
1754 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1755 {
1756         const uint8_t *p = cmd->cdb + off;
1757
1758         cmd->bufflen = 0;
1759         cmd->bufflen |= ((u32)p[0]) << 24;
1760         cmd->bufflen |= ((u32)p[1]) << 16;
1761         cmd->bufflen |= ((u32)p[2]) << 8;
1762         cmd->bufflen |= ((u32)p[3]);
1763
1764         return 0;
1765 }
1766
1767 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1768 {
1769         cmd->bufflen = 0;
1770         return 0;
1771 }
1772
1773 int scst_get_cdb_info(struct scst_cmd *cmd)
1774 {
1775         int dev_type = cmd->dev->handler->type;
1776         int i, res = 0;
1777         uint8_t op;
1778         const struct scst_sdbops *ptr = NULL;
1779
1780         TRACE_ENTRY();
1781
1782         op = cmd->cdb[0];       /* get clear opcode */
1783
1784         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1785                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1786                 dev_type);
1787
1788         i = scst_scsi_op_list[op];
1789         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1790                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1791                         ptr = &scst_scsi_op_table[i];
1792                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>", 
1793                               ptr->ops, ptr->devkey[0], /* disk     */
1794                               ptr->devkey[1],   /* tape     */
1795                               ptr->devkey[2],   /* printer */
1796                               ptr->devkey[3],   /* cpu      */
1797                               ptr->devkey[4],   /* cdr      */
1798                               ptr->devkey[5],   /* cdrom    */
1799                               ptr->devkey[6],   /* scanner */
1800                               ptr->devkey[7],   /* worm     */
1801                               ptr->devkey[8],   /* changer */
1802                               ptr->devkey[9],   /* commdev */
1803                               ptr->op_name);
1804                         TRACE_DBG("direction=%d flags=%d off=%d",
1805                               ptr->direction,
1806                               ptr->flags,
1807                               ptr->off);
1808                         break;
1809                 }
1810                 i++;
1811         }
1812
1813         if (ptr == NULL) {
1814                 /* opcode not found or now not used !!! */
1815                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1816                       dev_type);
1817                 res = -1;
1818                 cmd->op_flags = SCST_INFO_INVALID;
1819                 goto out;
1820         }
1821
1822         cmd->cdb_len = SCST_GET_CDB_LEN(op);
1823         cmd->op_name = ptr->op_name;
1824         cmd->data_direction = ptr->direction;
1825         cmd->op_flags = ptr->flags;
1826         res = (*ptr->get_trans_len)(cmd, ptr->off);
1827
1828 out:
1829         TRACE_EXIT();
1830         return res;
1831 }
1832
1833 /*
1834  * Routine to extract a lun number from an 8-byte LUN structure
1835  * in network byte order (BE).
1836  * (see SAM-2, Section 4.12.3 page 40)
1837  * Supports 2 types of lun unpacking: peripheral and logical unit.
1838  */
1839 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1840 {
1841         lun_t res = (lun_t)-1;
1842         int address_method;
1843
1844         TRACE_ENTRY();
1845
1846         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1847
1848         if (unlikely(len < 2)) {
1849                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1850                         "more", len);
1851                 goto out;
1852         }
1853
1854         if (len > 2) {
1855                 switch(len) {
1856                 case 8:
1857                         if ((*((uint64_t*)lun) & 
1858                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1859                                 goto out_err;
1860                         break;
1861                 case 4:
1862                         if (*((uint16_t*)&lun[2]) != 0)
1863                                 goto out_err;
1864                         break;
1865                 case 6:
1866                         if (*((uint32_t*)&lun[2]) != 0)
1867                                 goto out_err;
1868                         break;
1869                 default:
1870                         goto out_err;
1871                 }
1872         }
1873
1874         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
1875         switch (address_method) {
1876         case 0: /* peripheral device addressing method */
1877 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1878                 if (*lun) {
1879                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1880                              "peripheral device addressing method 0x%02x, "
1881                              "expected 0", *lun);
1882                         break;
1883                 }
1884                 res = *(lun + 1);
1885                 break;
1886 #else
1887                 /* go through */
1888 #endif
1889
1890         case 1: /* flat space addressing method */
1891                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1892                 break;
1893
1894         case 2: /* logical unit addressing method */
1895                 if (*lun & 0x3f) {
1896                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1897                                     "addressing method 0x%02x, expected 0",
1898                                     *lun & 0x3f);
1899                         break;
1900                 }
1901                 if (*(lun + 1) & 0xe0) {
1902                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
1903                                     "addressing method 0x%02x, expected 0",
1904                                     (*(lun + 1) & 0xf8) >> 5);
1905                         break;
1906                 }
1907                 res = *(lun + 1) & 0x1f;
1908                 break;
1909
1910         case 3: /* extended logical unit addressing method */
1911         default:
1912                 PRINT_ERROR("Unimplemented LUN addressing method %u",
1913                             address_method);
1914                 break;
1915         }
1916
1917 out:
1918         TRACE_EXIT_RES((int)res);
1919         return res;
1920
1921 out_err:
1922         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
1923         goto out;
1924 }
1925
1926 int scst_calc_block_shift(int sector_size)
1927 {
1928         int block_shift = 0;
1929         int t;
1930
1931         if (sector_size == 0)
1932                 sector_size = 512;
1933
1934         t = sector_size;
1935         while(1) {
1936                 if ((t & 1) != 0)
1937                         break;
1938                 t >>= 1;
1939                 block_shift++;
1940         }
1941         if (block_shift < 9) {
1942                 PRINT_ERROR("Wrong sector size %d", sector_size);
1943                 block_shift = -1;
1944         } 
1945
1946         TRACE_EXIT_RES(block_shift);
1947         return block_shift;
1948 }
1949
1950 int scst_sbc_generic_parse(struct scst_cmd *cmd,
1951         int (*get_block_shift)(struct scst_cmd *cmd))
1952 {
1953         int res = 0;
1954
1955         TRACE_ENTRY();
1956         
1957         /*
1958          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
1959          * therefore change them only if necessary
1960          */
1961
1962         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
1963               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
1964
1965         switch (cmd->cdb[0]) {
1966         case SERVICE_ACTION_IN:
1967                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
1968                         cmd->bufflen = READ_CAP16_LEN;
1969                         cmd->data_direction = SCST_DATA_READ;
1970                 }
1971                 break;
1972         case VERIFY_6:
1973         case VERIFY:
1974         case VERIFY_12:
1975         case VERIFY_16:
1976                 if ((cmd->cdb[1] & BYTCHK) == 0) {
1977                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
1978                         cmd->bufflen = 0;
1979                         goto out;
1980                 } else
1981                         cmd->data_len = 0;
1982                 break;
1983         default:
1984                 /* It's all good */
1985                 break;
1986         }
1987
1988         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
1989                 /* 
1990                  * No need for locks here, since *_detach() can not be
1991                  * called, when there are existing commands.
1992                  */
1993                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
1994         }
1995
1996 out:
1997         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
1998               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
1999
2000         TRACE_EXIT_RES(res);
2001         return res;
2002 }
2003
2004 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2005         int (*get_block_shift)(struct scst_cmd *cmd))
2006 {
2007         int res = 0;
2008
2009         TRACE_ENTRY();
2010
2011         /*
2012          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2013          * therefore change them only if necessary
2014          */
2015
2016         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2017               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2018
2019         cmd->cdb[1] &= 0x1f;
2020
2021         switch (cmd->cdb[0]) {
2022         case VERIFY_6:
2023         case VERIFY:
2024         case VERIFY_12:
2025         case VERIFY_16:
2026                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2027                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2028                         cmd->bufflen = 0;
2029                         goto out;
2030                 }
2031                 break;
2032         default:
2033                 /* It's all good */
2034                 break;
2035         }
2036
2037         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2038                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2039
2040 out:
2041         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2042                 cmd->data_direction);
2043
2044         TRACE_EXIT();
2045         return res;
2046 }
2047
2048 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2049         int (*get_block_shift)(struct scst_cmd *cmd))
2050 {
2051         int res = 0;
2052
2053         TRACE_ENTRY();
2054
2055         /*
2056          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2057          * therefore change them only if necessary
2058          */
2059
2060         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2061               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2062
2063         cmd->cdb[1] &= 0x1f;
2064
2065         switch (cmd->cdb[0]) {
2066         case VERIFY_6:
2067         case VERIFY:
2068         case VERIFY_12:
2069         case VERIFY_16:
2070                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2071                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2072                         cmd->bufflen = 0;
2073                         goto out;
2074                 }
2075                 break;
2076         default:
2077                 /* It's all good */
2078                 break;
2079         }
2080
2081         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2082                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2083
2084 out:
2085         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2086                 cmd->data_direction);
2087
2088         TRACE_EXIT_RES(res);
2089         return res;
2090 }
2091
2092 int scst_tape_generic_parse(struct scst_cmd *cmd,
2093         int (*get_block_size)(struct scst_cmd *cmd))
2094 {
2095         int res = 0;
2096
2097         TRACE_ENTRY();
2098
2099         /*
2100          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2101          * therefore change them only if necessary
2102          */
2103
2104         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2105               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2106
2107         if (cmd->cdb[0] == READ_POSITION) {
2108                 int tclp = cmd->cdb[1] & TCLP_BIT;
2109                 int long_bit = cmd->cdb[1] & LONG_BIT;
2110                 int bt = cmd->cdb[1] & BT_BIT;
2111
2112                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2113                         cmd->bufflen =
2114                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2115                         cmd->data_direction = SCST_DATA_READ;
2116                 } else {
2117                         cmd->bufflen = 0;
2118                         cmd->data_direction = SCST_DATA_NONE;
2119                 }
2120         }
2121
2122         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2123                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2124
2125         TRACE_EXIT_RES(res);
2126         return res;
2127 }
2128
2129 static int scst_null_parse(struct scst_cmd *cmd)
2130 {
2131         int res = 0;
2132
2133         TRACE_ENTRY();
2134
2135         /*
2136          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2137          * therefore change them only if necessary
2138          */
2139
2140         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2141               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2142 #if 0
2143         switch (cmd->cdb[0]) {
2144         default:
2145                 /* It's all good */
2146                 break;
2147         }
2148 #endif
2149         TRACE_DBG("res %d bufflen %d direct %d",
2150               res, cmd->bufflen, cmd->data_direction);
2151
2152         TRACE_EXIT();
2153         return res;
2154 }
2155
2156 int scst_changer_generic_parse(struct scst_cmd *cmd,
2157         int (*nothing)(struct scst_cmd *cmd))
2158 {
2159         return scst_null_parse(cmd);
2160 }
2161
2162 int scst_processor_generic_parse(struct scst_cmd *cmd,
2163         int (*nothing)(struct scst_cmd *cmd))
2164 {
2165         return scst_null_parse(cmd);
2166 }
2167
2168 int scst_raid_generic_parse(struct scst_cmd *cmd,
2169         int (*nothing)(struct scst_cmd *cmd))
2170 {
2171         return scst_null_parse(cmd);
2172 }
2173
2174 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2175         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2176 {
2177         int opcode = cmd->cdb[0];
2178         int status = cmd->status;
2179         int res = SCST_CMD_STATE_DEFAULT;
2180
2181         TRACE_ENTRY();
2182
2183         /*
2184          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2185          * based on cmd->status and cmd->data_direction, therefore change
2186          * them only if necessary
2187          */
2188
2189         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2190                 switch (opcode) {
2191                 case READ_CAPACITY:
2192                 {
2193                         /* Always keep track of disk capacity */
2194                         int buffer_size, sector_size, sh;
2195                         uint8_t *buffer;
2196
2197                         buffer_size = scst_get_buf_first(cmd, &buffer);
2198                         if (unlikely(buffer_size <= 0)) {
2199                                 PRINT_ERROR("%s: Unable to get the buffer "
2200                                         "(%d)", __FUNCTION__, buffer_size);
2201                                 goto out;
2202                         }
2203
2204                         sector_size =
2205                             ((buffer[4] << 24) | (buffer[5] << 16) |
2206                              (buffer[6] << 8) | (buffer[7] << 0));
2207                         scst_put_buf(cmd, buffer);
2208                         if (sector_size != 0)
2209                                 sh = scst_calc_block_shift(sector_size);
2210                         else
2211                                 sh = 0;
2212                         set_block_shift(cmd, sh);
2213                         TRACE_DBG("block_shift %d", sh);
2214                         break;
2215                 }
2216                 default:
2217                         /* It's all good */
2218                         break;
2219                 }
2220         }
2221
2222         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2223               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2224
2225 out:
2226         TRACE_EXIT_RES(res);
2227         return res;
2228 }
2229
2230 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2231         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2232 {
2233         int opcode = cmd->cdb[0];
2234         int res = SCST_CMD_STATE_DEFAULT;
2235         int buffer_size, bs;
2236         uint8_t *buffer = NULL;
2237
2238         TRACE_ENTRY();
2239
2240         /*
2241          * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2242          * based on cmd->status and cmd->data_direction, therefore change
2243          * them only if necessary
2244          */
2245                 
2246         switch (opcode) {
2247         case MODE_SENSE:
2248         case MODE_SELECT:
2249                 buffer_size = scst_get_buf_first(cmd, &buffer);
2250                 if (unlikely(buffer_size <= 0)) {
2251                         PRINT_ERROR("%s: Unable to get the buffer (%d)",
2252                                 __FUNCTION__, buffer_size);
2253                         goto out;
2254                 }
2255                 break;
2256         }
2257
2258         switch (opcode) {
2259         case MODE_SENSE:
2260                 TRACE_DBG("%s", "MODE_SENSE");
2261                 if ((cmd->cdb[2] & 0xC0) == 0) {
2262                         if (buffer[3] == 8) {
2263                                 bs = (buffer[9] << 16) |
2264                                     (buffer[10] << 8) | buffer[11];
2265                                 set_block_size(cmd, bs);
2266                         }
2267                 }
2268                 break;
2269         case MODE_SELECT:
2270                 TRACE_DBG("%s", "MODE_SELECT");
2271                 if (buffer[3] == 8) {
2272                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
2273                             (buffer[11]);
2274                         set_block_size(cmd, bs);
2275                 }
2276                 break;
2277         default:
2278                 /* It's all good */
2279                 break;
2280         }
2281         
2282         switch (opcode) {
2283         case MODE_SENSE:
2284         case MODE_SELECT:
2285                 scst_put_buf(cmd, buffer);
2286                 break;
2287         }
2288
2289 out:
2290         TRACE_EXIT_RES(res);
2291         return res;
2292 }
2293
2294 static void scst_check_internal_sense(struct scst_device *dev, int result,
2295         uint8_t *sense, int sense_len)
2296 {
2297         TRACE_ENTRY();
2298
2299         if (host_byte(result) == DID_RESET) {
2300                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2301                         "reset UA");
2302                 scst_set_sense(sense, sense_len,
2303                         SCST_LOAD_SENSE(scst_sense_reset_UA));
2304                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2305         } else if ((status_byte(result) == CHECK_CONDITION) &&
2306                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2307                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2308
2309         TRACE_EXIT();
2310         return;
2311 }
2312
2313 int scst_obtain_device_parameters(struct scst_device *dev)
2314 {
2315         int res = 0, i;
2316         uint8_t cmd[16];
2317         uint8_t buffer[4+0x0A];
2318         uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2319
2320         TRACE_ENTRY();
2321
2322         sBUG_ON(in_interrupt() || in_atomic());
2323         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2324
2325         for(i = 0; i < 5; i++) {
2326                 /* Get control mode page */
2327                 memset(cmd, 0, sizeof(cmd));
2328                 cmd[0] = MODE_SENSE;
2329                 cmd[1] = 8; /* DBD */
2330                 cmd[2] = 0x0A;
2331                 cmd[4] = sizeof(buffer);
2332
2333                 memset(buffer, 0, sizeof(buffer));
2334                 memset(sense_buffer, 0, sizeof(sense_buffer));
2335
2336                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2337                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer, 
2338                            sizeof(buffer), sense_buffer, SCST_DEFAULT_TIMEOUT,
2339                             0, 0);
2340
2341                 TRACE_DBG("MODE_SENSE done: %x", res);
2342
2343                 if (scsi_status_is_good(res)) {
2344                         int q;
2345
2346                         PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode page data",
2347                                 buffer, sizeof(buffer));
2348
2349                         dev->tst = buffer[4+2] >> 5;
2350                         q = buffer[4+3] >> 4;
2351                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2352                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2353                                         "%d:%d:%d:%d", dev->queue_alg,
2354                                         dev->scsi_dev->host->host_no, dev->scsi_dev->channel,
2355                                         dev->scsi_dev->id, dev->scsi_dev->lun);
2356                         }
2357                         dev->queue_alg = q;
2358                         dev->swp = (buffer[4+4] & 0x8) >> 3;
2359                         dev->tas = (buffer[4+5] & 0x40) >> 6;
2360
2361                         /*
2362                          * Unfortunately, SCSI ML doesn't provide a way to
2363                          * specify commands task attribute, so we can rely on
2364                          * device's restricted reordering only.
2365                          */
2366                         dev->has_own_order_mgmt = !dev->queue_alg;
2367
2368                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d: TST %x, "
2369                                 "QUEUE ALG %x, SWP %x, TAS %x, has_own_order_mgmt "
2370                                 "%d", dev->scsi_dev->host->host_no,
2371                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2372                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2373                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2374
2375                         goto out;
2376                 } else {
2377 #if 0 /* 3ware controller is buggy and returns CONDITION_GOOD instead of CHECK_CONDITION */
2378                         if ((status_byte(res) == CHECK_CONDITION) &&
2379 #else
2380                         if (
2381 #endif
2382                             SCST_SENSE_VALID(sense_buffer)) {
2383                                 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2384                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device "
2385                                                 "%d:%d:%d:%d doesn't support control "
2386                                                 "mode page, using defaults: TST "
2387                                                 "%x, QUEUE ALG %x, SWP %x, TAS %x, "
2388                                                 "has_own_order_mgmt %d",
2389                                                 dev->scsi_dev->host->host_no,
2390                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2391                                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2392                                                 dev->swp, dev->tas, dev->has_own_order_mgmt);
2393                                         res = 0;
2394                                         goto out;
2395                                 } else if (sense_buffer[2] == NOT_READY) {
2396                                         TRACE(TRACE_SCSI, "Device %d:%d:%d:%d not ready",
2397                                                 dev->scsi_dev->host->host_no,
2398                                                 dev->scsi_dev->channel, dev->scsi_dev->id,
2399                                                 dev->scsi_dev->lun);
2400                                         res = 0;
2401                                         goto out;
2402                                 }
2403                         } else {
2404                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Internal MODE SENSE to "
2405                                         "device %d:%d:%d:%d failed: %x",
2406                                         dev->scsi_dev->host->host_no,
2407                                         dev->scsi_dev->channel, dev->scsi_dev->id,
2408                                         dev->scsi_dev->lun, res);
2409                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR, "MODE SENSE "
2410                                         "sense", sense_buffer, sizeof(sense_buffer));
2411                         }
2412                         scst_check_internal_sense(dev, res, sense_buffer,
2413                                         sizeof(sense_buffer));
2414                 }
2415         }
2416         res = -ENODEV;
2417
2418 out:
2419         TRACE_EXIT_RES(res);
2420         return res;
2421 }
2422
2423 /* Called under dev_lock and BH off */
2424 void scst_process_reset(struct scst_device *dev,
2425         struct scst_session *originator, struct scst_cmd *exclude_cmd,
2426         struct scst_mgmt_cmd *mcmd)
2427 {
2428         struct scst_tgt_dev *tgt_dev;
2429         struct scst_cmd *cmd, *tcmd;
2430
2431         TRACE_ENTRY();
2432
2433         /* Clear RESERVE'ation, if necessary */
2434         if (dev->dev_reserved) {
2435                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2436                                     dev_tgt_dev_list_entry) {
2437                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2438                                 "lun %Ld", tgt_dev->lun);
2439                         clear_bit(SCST_TGT_DEV_RESERVED,
2440                                   &tgt_dev->tgt_dev_flags);
2441                 }
2442                 dev->dev_reserved = 0;
2443                 /*
2444                  * There is no need to send RELEASE, since the device is going
2445                  * to be resetted. Actually, since we can be in RESET TM
2446                  * function, it might be dangerous.
2447                  */
2448         }
2449
2450         dev->dev_double_ua_possible = 1;
2451         dev->dev_serialized = 1;
2452
2453         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list, 
2454                 dev_tgt_dev_list_entry) {
2455                 struct scst_session *sess = tgt_dev->sess;
2456
2457                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2458                 scst_free_all_UA(tgt_dev);
2459                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2460
2461                 spin_lock_irq(&sess->sess_list_lock);
2462
2463                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2464                 list_for_each_entry(cmd, &sess->search_cmd_list, 
2465                                 search_cmd_list_entry) {
2466                         if (cmd == exclude_cmd)
2467                                 continue;
2468                         if ((cmd->tgt_dev == tgt_dev) ||
2469                             ((cmd->tgt_dev == NULL) && 
2470                              (cmd->lun == tgt_dev->lun))) {
2471                                 scst_abort_cmd(cmd, mcmd,
2472                                         (tgt_dev->sess != originator), 0);
2473                         }
2474                 }
2475                 spin_unlock_irq(&sess->sess_list_lock);
2476         }
2477
2478         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2479                                 blocked_cmd_list_entry) {
2480                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2481                         list_del(&cmd->blocked_cmd_list_entry);
2482                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2483                                 "to active cmd list", cmd);
2484                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2485                         list_add_tail(&cmd->cmd_list_entry,
2486                                 &cmd->cmd_lists->active_cmd_list);
2487                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2488                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2489                 }
2490         }
2491
2492         /* BH already off */
2493         spin_lock(&scst_temp_UA_lock);
2494         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2495                 SCST_LOAD_SENSE(scst_sense_reset_UA));
2496         scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2497                 sizeof(scst_temp_UA));
2498         spin_unlock(&scst_temp_UA_lock);
2499
2500         TRACE_EXIT();
2501         return;
2502 }
2503
2504 int scst_set_pending_UA(struct scst_cmd *cmd)
2505 {
2506         int res = 0;
2507         struct scst_tgt_dev_UA *UA_entry;
2508
2509         TRACE_ENTRY();
2510
2511         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2512
2513         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2514
2515         /* UA list could be cleared behind us, so retest */
2516         if (list_empty(&cmd->tgt_dev->UA_list)) {
2517                 TRACE_DBG("%s",
2518                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2519                 res = -1;
2520                 goto out_unlock;
2521         }
2522
2523         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2524                               UA_list_entry);
2525
2526         TRACE_DBG("next %p UA_entry %p",
2527               cmd->tgt_dev->UA_list.next, UA_entry);
2528
2529         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2530                 sizeof(UA_entry->UA_sense_buffer));
2531
2532         cmd->ua_ignore = 1;
2533
2534         list_del(&UA_entry->UA_list_entry);
2535
2536         mempool_free(UA_entry, scst_ua_mempool);
2537
2538         if (list_empty(&cmd->tgt_dev->UA_list)) {
2539                 clear_bit(SCST_TGT_DEV_UA_PENDING,
2540                           &cmd->tgt_dev->tgt_dev_flags);
2541         }
2542
2543         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2544
2545 out:
2546         TRACE_EXIT_RES(res);
2547         return res;
2548
2549 out_unlock:
2550         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2551         goto out;
2552 }
2553
2554 /* Called under tgt_dev_lock and BH off */
2555 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2556         const uint8_t *sense, int sense_len, int head)
2557 {
2558         struct scst_tgt_dev_UA *UA_entry = NULL;
2559
2560         TRACE_ENTRY();
2561
2562         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2563         if (UA_entry == NULL) {
2564                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2565                      "allocation failed. The UNIT ATTENTION "
2566                      "on some sessions will be missed");
2567                 PRINT_BUFFER("Lost UA", sense, sense_len);
2568                 goto out;
2569         }
2570         memset(UA_entry, 0, sizeof(*UA_entry));
2571
2572         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2573                 sense_len = sizeof(UA_entry->UA_sense_buffer);
2574         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2575
2576         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2577
2578         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2579
2580         if (head)
2581                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2582         else
2583                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2584
2585 out:
2586         TRACE_EXIT();
2587         return;
2588 }
2589
2590 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2591         const uint8_t *sense, int sense_len, int head)
2592 {
2593         int skip_UA = 0;
2594         struct scst_tgt_dev_UA *UA_entry_tmp;
2595
2596         TRACE_ENTRY();
2597
2598         spin_lock_bh(&tgt_dev->tgt_dev_lock);
2599
2600         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2601                             UA_list_entry) {
2602                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, sense_len) == 0) {
2603                         TRACE_MGMT_DBG("%s", "UA already exists");
2604                         skip_UA = 1;
2605                         break;
2606                 }
2607         }
2608
2609         if (skip_UA == 0)
2610                 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2611
2612         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2613
2614         TRACE_EXIT();
2615         return;
2616 }
2617
2618 /* Called under dev_lock and BH off */
2619 void scst_dev_check_set_local_UA(struct scst_device *dev,
2620         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2621 {
2622         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2623
2624         TRACE_ENTRY();
2625
2626         if (exclude != NULL)
2627                 exclude_tgt_dev = exclude->tgt_dev;
2628
2629         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list, 
2630                         dev_tgt_dev_list_entry) {
2631                 if (tgt_dev != exclude_tgt_dev)
2632                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2633         }
2634
2635         TRACE_EXIT();
2636         return;
2637 }
2638
2639 /* Called under dev_lock and BH off */
2640 void __scst_dev_check_set_UA(struct scst_device *dev,
2641         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2642 {
2643         TRACE_ENTRY();
2644
2645         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2646
2647         /* Check for reset UA */
2648         if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2649                 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
2650                         exclude, NULL);
2651
2652         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2653
2654         TRACE_EXIT();
2655         return;
2656 }
2657
2658 /* Called under tgt_dev_lock or when tgt_dev is unused */
2659 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2660 {
2661         struct scst_tgt_dev_UA *UA_entry, *t;
2662
2663         TRACE_ENTRY();
2664
2665         list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
2666                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %Ld", 
2667                         tgt_dev->lun);
2668                 list_del(&UA_entry->UA_list_entry);
2669                 kfree(UA_entry);
2670         }
2671         INIT_LIST_HEAD(&tgt_dev->UA_list);
2672         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2673
2674         TRACE_EXIT();
2675         return;
2676 }
2677
2678 /* No locks */
2679 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2680 {
2681         struct scst_cmd *res = NULL, *cmd, *t;
2682         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2683
2684         spin_lock_irq(&tgt_dev->sn_lock);
2685
2686         if (unlikely(tgt_dev->hq_cmd_count != 0))
2687                 goto out_unlock;
2688
2689 restart:
2690         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2691                                 sn_cmd_list_entry) {
2692                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2693                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2694                 if (cmd->sn == expected_sn) {
2695                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2696                                 cmd, cmd->sn, cmd->sn_set);
2697                         tgt_dev->def_cmd_count--;
2698                         list_del(&cmd->sn_cmd_list_entry);
2699                         if (res == NULL)
2700                                 res = cmd;
2701                         else {
2702                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2703                                 TRACE_SN("Adding cmd %p to active cmd list",
2704                                         cmd);
2705                                 list_add_tail(&cmd->cmd_list_entry,
2706                                         &cmd->cmd_lists->active_cmd_list);
2707                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2708                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2709                         }
2710                 }
2711         }
2712         if (res != NULL)
2713                 goto out_unlock;
2714
2715         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2716                                 sn_cmd_list_entry) {
2717                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2718                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2719                 if (cmd->sn == expected_sn) {
2720                         atomic_t *slot = cmd->sn_slot;
2721                         /* 
2722                          * !! At this point any pointer in cmd, except !!
2723                          * !! sn_slot and sn_cmd_list_entry, could be   !!
2724                          * !! already destroyed                         !!
2725                          */
2726                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2727                                 cmd, cmd->tag, cmd->sn);
2728                         tgt_dev->def_cmd_count--;
2729                         list_del(&cmd->sn_cmd_list_entry);
2730                         spin_unlock_irq(&tgt_dev->sn_lock);
2731                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED, 
2732                                         &cmd->cmd_flags)) {
2733                                 scst_destroy_put_cmd(cmd);
2734                         }
2735                         scst_inc_expected_sn(tgt_dev, slot);
2736                         expected_sn = tgt_dev->expected_sn;
2737                         spin_lock_irq(&tgt_dev->sn_lock);
2738                         goto restart;
2739                 }
2740         }
2741
2742 out_unlock:
2743         spin_unlock_irq(&tgt_dev->sn_lock);
2744         return res;
2745 }
2746
2747 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2748         struct scst_thr_data_hdr *data,
2749         void (*free_fn) (struct scst_thr_data_hdr *data))
2750 {
2751         data->pid = current->pid;
2752         atomic_set(&data->ref, 1);
2753         EXTRACHECKS_BUG_ON(free_fn == NULL);
2754         data->free_fn = free_fn;
2755         spin_lock(&tgt_dev->thr_data_lock);
2756         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2757         spin_unlock(&tgt_dev->thr_data_lock);
2758 }
2759
2760 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2761 {
2762         spin_lock(&tgt_dev->thr_data_lock);
2763         while (!list_empty(&tgt_dev->thr_data_list)) {
2764                 struct scst_thr_data_hdr *d = list_entry(
2765                                 tgt_dev->thr_data_list.next, typeof(*d),
2766                                 thr_data_list_entry);
2767                 list_del(&d->thr_data_list_entry);
2768                 spin_unlock(&tgt_dev->thr_data_lock);
2769                 scst_thr_data_put(d);
2770                 spin_lock(&tgt_dev->thr_data_lock);
2771         }
2772         spin_unlock(&tgt_dev->thr_data_lock);
2773         return;
2774 }
2775
2776 void scst_dev_del_all_thr_data(struct scst_device *dev)
2777 {
2778         struct scst_tgt_dev *tgt_dev;
2779
2780         TRACE_ENTRY();
2781
2782         mutex_lock(&scst_mutex);
2783
2784         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2785                                 dev_tgt_dev_list_entry) {
2786                 scst_del_all_thr_data(tgt_dev);
2787         }
2788
2789         mutex_unlock(&scst_mutex);
2790
2791         TRACE_EXIT();
2792         return;
2793 }
2794
2795 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2796 {
2797         struct scst_thr_data_hdr *res = NULL, *d;
2798
2799         spin_lock(&tgt_dev->thr_data_lock);
2800         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2801                 if (d->pid == current->pid) {
2802                         res = d;
2803                         scst_thr_data_get(res);
2804                         break;
2805                 }
2806         }
2807         spin_unlock(&tgt_dev->thr_data_lock);
2808         return res;
2809 }
2810
2811 /* dev_lock supposed to be held and BH disabled */
2812 void __scst_block_dev(struct scst_device *dev)
2813 {
2814         dev->block_count++;
2815         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
2816 }
2817
2818 /* No locks */
2819 void scst_block_dev(struct scst_device *dev, int outstanding)
2820 {
2821         spin_lock_bh(&dev->dev_lock);
2822         __scst_block_dev(dev);
2823         spin_unlock_bh(&dev->dev_lock);
2824
2825         /* spin_unlock_bh() doesn't provide the necessary memory barrier */
2826         smp_mb();
2827
2828         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
2829                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
2830         wait_event(dev->on_dev_waitQ, 
2831                 atomic_read(&dev->on_dev_count) <= outstanding);
2832         TRACE_MGMT_DBG("%s", "wait_event() returned");
2833 }
2834
2835 /* No locks */
2836 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
2837 {
2838         sBUG_ON(cmd->needs_unblocking);
2839
2840         cmd->needs_unblocking = 1;
2841         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)", cmd, cmd->tag);
2842
2843         scst_block_dev(cmd->dev, outstanding);
2844 }
2845
2846 /* No locks */
2847 void scst_unblock_dev(struct scst_device *dev)
2848 {
2849         spin_lock_bh(&dev->dev_lock);
2850         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
2851                 dev->block_count-1, dev);
2852         if (--dev->block_count == 0)
2853                 scst_unblock_cmds(dev);
2854         spin_unlock_bh(&dev->dev_lock);
2855         sBUG_ON(dev->block_count < 0);
2856 }
2857
2858 /* No locks */
2859 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
2860 {
2861         scst_unblock_dev(cmd->dev);
2862         cmd->needs_unblocking = 0;
2863 }
2864
2865 /* No locks */
2866 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
2867 {
2868         int res = 0;
2869         struct scst_device *dev = cmd->dev;
2870
2871         TRACE_ENTRY();
2872
2873         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
2874
2875         atomic_inc(&dev->on_dev_count);
2876         cmd->dec_on_dev_needed = 1;
2877         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
2878
2879 #ifdef STRICT_SERIALIZING
2880         spin_lock_bh(&dev->dev_lock);
2881         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2882                 goto out_unlock;
2883         if (dev->block_count > 0) {
2884                 scst_dec_on_dev_cmd(cmd);
2885                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
2886                         "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
2887                 list_add_tail(&cmd->blocked_cmd_list_entry,
2888                               &dev->blocked_cmd_list);
2889                 res = 1;
2890         } else {
2891                 __scst_block_dev(dev);
2892                 cmd->inc_blocking = 1;
2893         }
2894         spin_unlock_bh(&dev->dev_lock);
2895         goto out;
2896 #else
2897 repeat:
2898         if (unlikely(dev->block_count > 0)) {
2899                 spin_lock_bh(&dev->dev_lock);
2900                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2901                         goto out_unlock;
2902                 barrier(); /* to reread block_count */
2903                 if (dev->block_count > 0) {
2904                         scst_dec_on_dev_cmd(cmd);
2905                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
2906                                 "serializing (tag %llu, dev %p)", cmd,
2907                                 cmd->tag, dev);
2908                         list_add_tail(&cmd->blocked_cmd_list_entry,
2909                                       &dev->blocked_cmd_list);
2910                         res = 1;
2911                         spin_unlock_bh(&dev->dev_lock);
2912                         goto out;
2913                 } else {
2914                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
2915                                 "continuing");
2916                 }
2917                 spin_unlock_bh(&dev->dev_lock);
2918         }
2919         if (unlikely(dev->dev_serialized)) {
2920                 spin_lock_bh(&dev->dev_lock);
2921                 barrier(); /* to reread block_count */
2922                 if (dev->block_count == 0) {
2923                         TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
2924                                 "cmds due to serializing (dev %p)", cmd,
2925                                 cmd->tag, dev);
2926                         __scst_block_dev(dev);
2927                         cmd->inc_blocking = 1;
2928                 } else {
2929                         spin_unlock_bh(&dev->dev_lock);
2930                         TRACE_MGMT_DBG("Somebody blocked the device, "
2931                                 "repeating (count %d)", dev->block_count);
2932                         goto repeat;
2933                 }
2934                 spin_unlock_bh(&dev->dev_lock);
2935         }
2936 #endif
2937
2938 out:
2939         TRACE_EXIT_RES(res);
2940         return res;
2941
2942 out_unlock:
2943         spin_unlock_bh(&dev->dev_lock);
2944         goto out;
2945 }
2946
2947 /* Called under dev_lock */
2948 void scst_unblock_cmds(struct scst_device *dev)
2949 {
2950 #ifdef STRICT_SERIALIZING
2951         struct scst_cmd *cmd, *t;
2952         unsigned long flags;
2953
2954         TRACE_ENTRY();
2955
2956         local_irq_save(flags);
2957         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
2958                                  blocked_cmd_list_entry) {
2959                 int brk = 0;
2960                 /* 
2961                  * Since only one cmd per time is being executed, expected_sn
2962                  * can't change behind us, if the corresponding cmd is in
2963                  * blocked_cmd_list, but we could be called before
2964                  * scst_inc_expected_sn().
2965                  */
2966                 if (likely(!cmd->internal && !cmd->retry)) {
2967                         typeof(cmd->tgt_dev->expected_sn) expected_sn;
2968                         if (cmd->tgt_dev == NULL)
2969                                 sBUG();
2970                         expected_sn = cmd->tgt_dev->expected_sn;
2971                         if (cmd->sn == expected_sn)
2972                                 brk = 1;
2973                         else if (cmd->sn != (expected_sn+1))
2974                                 continue;
2975                 }
2976                         
2977                 list_del(&cmd->blocked_cmd_list_entry);
2978                 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
2979                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2980                 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
2981                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2982                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2983                 if (brk)
2984                         break;
2985         }
2986         local_irq_restore(flags);
2987 #else /* STRICT_SERIALIZING */
2988         struct scst_cmd *cmd, *tcmd;
2989         unsigned long flags;
2990
2991         TRACE_ENTRY();
2992
2993         local_irq_save(flags);
2994         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2995                                  blocked_cmd_list_entry) {
2996                 list_del(&cmd->blocked_cmd_list_entry);
2997                 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
2998                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2999                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3000                         list_add(&cmd->cmd_list_entry,
3001                                 &cmd->cmd_lists->active_cmd_list);
3002                 else
3003                         list_add_tail(&cmd->cmd_list_entry,
3004                                 &cmd->cmd_lists->active_cmd_list);
3005                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3006                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3007         }
3008         local_irq_restore(flags);
3009 #endif /* STRICT_SERIALIZING */
3010
3011         TRACE_EXIT();
3012         return;
3013 }
3014
3015 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3016         struct scst_cmd *out_of_sn_cmd)
3017 {
3018         EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3019
3020         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3021                 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3022                 scst_make_deferred_commands_active(tgt_dev, out_of_sn_cmd);
3023         } else {
3024                 out_of_sn_cmd->out_of_sn = 1;
3025                 spin_lock_irq(&tgt_dev->sn_lock);
3026                 tgt_dev->def_cmd_count++;
3027                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3028                               &tgt_dev->skipped_sn_list);
3029                 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list "
3030                         "(expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3031                         tgt_dev->expected_sn);
3032                 spin_unlock_irq(&tgt_dev->sn_lock);
3033         }
3034
3035         return;
3036 }
3037
3038 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3039         struct scst_cmd *out_of_sn_cmd)
3040 {
3041         TRACE_ENTRY();
3042
3043         if (!out_of_sn_cmd->sn_set) {
3044                 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3045                 goto out;
3046         }
3047
3048         __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3049
3050 out:
3051         TRACE_EXIT();
3052         return;
3053 }
3054
3055 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3056 {
3057         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3058
3059         TRACE_ENTRY();
3060
3061         if (!cmd->hq_cmd_inced)
3062                 goto out;
3063
3064         spin_lock_irq(&tgt_dev->sn_lock);
3065         tgt_dev->hq_cmd_count--;
3066         spin_unlock_irq(&tgt_dev->sn_lock);
3067
3068         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3069
3070         /*
3071          * There is no problem in checking hq_cmd_count in the
3072          * non-locked state. In the worst case we will only have
3073          * unneeded run of the deferred commands.
3074          */
3075         if (tgt_dev->hq_cmd_count == 0)
3076                 scst_make_deferred_commands_active(tgt_dev, cmd);
3077
3078 out:
3079         TRACE_EXIT();
3080         return;
3081 }
3082
3083 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3084 {
3085         TRACE_ENTRY();
3086
3087         TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3088                 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3089                 atomic_read(&scst_cmd_count));
3090
3091         scst_done_cmd_mgmt(cmd);
3092
3093         smp_rmb();
3094         if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3095                 if (cmd->completed) {
3096                         /* It's completed and it's OK to return its result */
3097                         goto out;
3098                 }
3099                 
3100                 if (cmd->dev->tas) {
3101                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3102                                 "(tag %llu), returning TASK ABORTED ", cmd,
3103                                 cmd->tag);
3104                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3105                 } else {
3106                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3107                                 "(tag %llu), aborting without delivery or "
3108                                 "notification", cmd, cmd->tag);
3109                         /*
3110                          * There is no need to check/requeue possible UA,
3111                          * because, if it exists, it will be delivered
3112                          * by the "completed" branch above.
3113                          */
3114                         clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3115                 }
3116         }
3117
3118 out:
3119         TRACE_EXIT();
3120         return;
3121 }
3122
3123 void __init scst_scsi_op_list_init(void)
3124 {
3125         int i;
3126         uint8_t op = 0xff;
3127
3128         TRACE_ENTRY();
3129
3130         for (i = 0; i < 256; i++)
3131                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3132
3133         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3134                 if (scst_scsi_op_table[i].ops != op) {
3135                         op = scst_scsi_op_table[i].ops;
3136                         scst_scsi_op_list[op] = i;
3137                 }
3138         }
3139
3140         TRACE_EXIT();
3141         return;
3142 }
3143
3144 #ifdef DEBUG
3145 /* Original taken from the XFS code */
3146 unsigned long scst_random(void)
3147 {
3148         static int Inited;
3149         static unsigned long RandomValue;
3150         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
3151         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3152         register long rv;
3153         register long lo;
3154         register long hi;
3155         unsigned long flags;
3156
3157         spin_lock_irqsave(&lock, flags);
3158         if (!Inited) {
3159                 RandomValue = jiffies;
3160                 Inited = 1;
3161         }
3162         rv = RandomValue;
3163         hi = rv / 127773;
3164         lo = rv % 127773;
3165         rv = 16807 * lo - 2836 * hi;
3166         if (rv <= 0) rv += 2147483647;
3167         RandomValue = rv;
3168         spin_unlock_irqrestore(&lock, flags);
3169         return rv;
3170 }
3171 #endif
3172
3173 #ifdef DEBUG_TM
3174
3175 #define TM_DBG_STATE_ABORT              0
3176 #define TM_DBG_STATE_RESET              1
3177 #define TM_DBG_STATE_OFFLINE            2
3178
3179 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
3180
3181 static void tm_dbg_timer_fn(unsigned long arg);
3182
3183 static spinlock_t scst_tm_dbg_lock = SPIN_LOCK_UNLOCKED;
3184 /* All serialized by scst_tm_dbg_lock */
3185 struct
3186 {
3187         unsigned int tm_dbg_release:1;
3188         unsigned int tm_dbg_blocked:1;
3189 } tm_dbg_flags;
3190 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3191 static int tm_dbg_delayed_cmds_count;
3192 static int tm_dbg_passed_cmds_count;
3193 static int tm_dbg_state;
3194 static int tm_dbg_on_state_passes;
3195 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3196 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3197
3198 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3199
3200 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3201         struct scst_acg_dev *acg_dev)
3202 {
3203         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3204                 unsigned long flags;
3205                 /* Do TM debugging only for LUN 0 */
3206                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3207                 tm_dbg_p_cmd_list_waitQ = 
3208                         &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3209                 tm_dbg_state = INIT_TM_DBG_STATE;
3210                 tm_dbg_on_state_passes =
3211                         tm_dbg_on_state_num_passes[tm_dbg_state];
3212                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3213                 PRINT_INFO("LUN 0 connected from initiator %s is under "
3214                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3215                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3216         }
3217 }
3218
3219 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3220 {
3221         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3222                 unsigned long flags;
3223                 del_timer_sync(&tm_dbg_timer);
3224                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3225                 tm_dbg_p_cmd_list_waitQ = NULL;
3226                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3227         }
3228 }
3229
3230 static void tm_dbg_timer_fn(unsigned long arg)
3231 {
3232         TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3233         tm_dbg_flags.tm_dbg_release = 1;
3234         smp_wmb();
3235         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3236 }
3237
3238 /* Called under scst_tm_dbg_lock and IRQs off */
3239 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3240 {
3241         switch(tm_dbg_state) {
3242         case TM_DBG_STATE_ABORT:
3243                 if (tm_dbg_delayed_cmds_count == 0) {
3244                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3245                         TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu) "
3246                                 "for %ld.%ld seconds (%ld HZ), "
3247                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3248                                 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3249                         mod_timer(&tm_dbg_timer, jiffies + d);
3250 #if 0
3251                         tm_dbg_flags.tm_dbg_blocked = 1;
3252 #endif
3253                 } else {
3254                         TRACE_MGMT_DBG("Delaying another timed cmd %p "
3255                                 "(tag %llu), delayed_cmds_count=%d, "
3256                                 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3257                                 tm_dbg_delayed_cmds_count,
3258                                 tm_dbg_on_state_passes);
3259                         if (tm_dbg_delayed_cmds_count == 2)
3260                                 tm_dbg_flags.tm_dbg_blocked = 0;
3261                 }
3262                 break;
3263
3264         case TM_DBG_STATE_RESET:
3265         case TM_DBG_STATE_OFFLINE:
3266                 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3267                         "(tag %llu), delayed_cmds_count=%d, "
3268                         "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3269                         tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3270                 tm_dbg_flags.tm_dbg_blocked = 1;
3271                 break;
3272
3273         default:
3274                 sBUG();
3275         }
3276         /* IRQs already off */
3277         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3278         list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3279         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3280         cmd->tm_dbg_delayed = 1;
3281         tm_dbg_delayed_cmds_count++;
3282         return;
3283 }
3284
3285 /* No locks */
3286 void tm_dbg_check_released_cmds(void)
3287 {
3288         if (tm_dbg_flags.tm_dbg_release) {
3289                 struct scst_cmd *cmd, *tc;
3290                 spin_lock_irq(&scst_tm_dbg_lock);
3291                 list_for_each_entry_safe_reverse(cmd, tc, 
3292                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3293                         TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3294                                 "delayed_cmds_count=%d", cmd, cmd->tag,
3295                                 tm_dbg_delayed_cmds_count);
3296                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3297                         list_move(&cmd->cmd_list_entry,
3298                                 &cmd->cmd_lists->active_cmd_list);
3299                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3300                 }
3301                 tm_dbg_flags.tm_dbg_release = 0;
3302                 spin_unlock_irq(&scst_tm_dbg_lock);
3303         }
3304 }
3305
3306 /* Called under scst_tm_dbg_lock */
3307 static void tm_dbg_change_state(void)
3308 {
3309         tm_dbg_flags.tm_dbg_blocked = 0;
3310         if (--tm_dbg_on_state_passes == 0) {
3311                 switch(tm_dbg_state) {
3312                 case TM_DBG_STATE_ABORT:
3313                         TRACE_MGMT_DBG("%s", "Changing "
3314                             "tm_dbg_state to RESET");
3315                         tm_dbg_state =
3316                                 TM_DBG_STATE_RESET;
3317                         tm_dbg_flags.tm_dbg_blocked = 0;
3318                         break;
3319                 case TM_DBG_STATE_RESET:
3320                 case TM_DBG_STATE_OFFLINE:
3321                         if (TM_DBG_GO_OFFLINE) {
3322                             TRACE_MGMT_DBG("%s", "Changing "
3323                                     "tm_dbg_state to OFFLINE");
3324                             tm_dbg_state =
3325                                 TM_DBG_STATE_OFFLINE;
3326                         } else {
3327                             TRACE_MGMT_DBG("%s", "Changing "
3328                                     "tm_dbg_state to ABORT");
3329                             tm_dbg_state =
3330                                 TM_DBG_STATE_ABORT;
3331                         }
3332                         break;
3333                 default:
3334                         sBUG();
3335                 }
3336                 tm_dbg_on_state_passes =
3337                     tm_dbg_on_state_num_passes[tm_dbg_state];
3338         }
3339                 
3340         TRACE_MGMT_DBG("%s", "Deleting timer");
3341         del_timer(&tm_dbg_timer);
3342 }
3343
3344 /* No locks */
3345 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3346 {
3347         int res = 0;
3348         unsigned long flags;
3349
3350         if (cmd->tm_dbg_immut)
3351                 goto out;
3352
3353         if (cmd->tm_dbg_delayed) {
3354                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3355                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3356                         "delayed_cmds_count=%d", cmd, cmd->tag,
3357                         tm_dbg_delayed_cmds_count);
3358
3359                 cmd->tm_dbg_immut = 1;
3360                 tm_dbg_delayed_cmds_count--;
3361                 if ((tm_dbg_delayed_cmds_count == 0) &&
3362                     (tm_dbg_state == TM_DBG_STATE_ABORT))
3363                         tm_dbg_change_state();
3364                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3365         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3366                                         &cmd->tgt_dev->tgt_dev_flags)) {
3367                 /* Delay 50th command */
3368                 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3369                 if (tm_dbg_flags.tm_dbg_blocked ||
3370                     (++tm_dbg_passed_cmds_count % 50) == 0) {
3371                         tm_dbg_delay_cmd(cmd);
3372                         res = 1;
3373                 } else
3374                         cmd->tm_dbg_immut = 1;
3375                 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3376         }
3377
3378 out:
3379         return res;
3380 }
3381
3382 /* No locks */
3383 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3384 {
3385         struct scst_cmd *c;
3386         unsigned long flags;
3387
3388         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3389         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3390                                 cmd_list_entry) {
3391                 if (c == cmd) {
3392                         TRACE_MGMT_DBG("Abort request for "
3393                                 "delayed cmd %p (tag=%llu), moving it to "
3394                                 "active cmd list (delayed_cmds_count=%d)",
3395                                 c, c->tag, tm_dbg_delayed_cmds_count);
3396
3397                         if (!test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3398                                 /* Test how completed commands handled */
3399                                 if (((scst_random() % 10) == 5)) {
3400                                         scst_set_cmd_error(cmd,
3401                                            SCST_LOAD_SENSE(scst_sense_hardw_error));
3402                                         /* It's completed now */
3403                                 }
3404                         }
3405
3406                         spin_lock(&cmd->cmd_lists->cmd_list_lock);
3407                         list_move(&c->cmd_list_entry, 
3408                                 &c->cmd_lists->active_cmd_list);
3409                         wake_up(&c->cmd_lists->cmd_list_waitQ);
3410                         spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3411                         break;
3412                 }
3413         }
3414         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3415 }
3416
3417 /* Might be called under scst_mutex */
3418 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3419 {
3420         unsigned long flags;
3421
3422         if (dev != NULL) {
3423                 struct scst_tgt_dev *tgt_dev;
3424                 bool found = 0;
3425
3426                 spin_lock_bh(&dev->dev_lock);
3427                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3428                                             dev_tgt_dev_list_entry) {
3429                         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3430                                         &tgt_dev->tgt_dev_flags)) {
3431                                 found = 1;
3432                                 break;
3433                         }
3434                 }
3435                 spin_unlock_bh(&dev->dev_lock);
3436
3437                 if (!found)
3438                         goto out;
3439         }
3440
3441         spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3442         if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3443                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3444                         tm_dbg_delayed_cmds_count);
3445                 tm_dbg_change_state();
3446                 tm_dbg_flags.tm_dbg_release = 1;
3447                 smp_wmb();
3448                 if (tm_dbg_p_cmd_list_waitQ != NULL)
3449                         wake_up_all(tm_dbg_p_cmd_list_waitQ);
3450         } else {
3451                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3452         }
3453         spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3454
3455 out:
3456         return;
3457 }
3458
3459 int tm_dbg_is_release(void)
3460 {
3461         return tm_dbg_flags.tm_dbg_release;
3462 }
3463 #endif /* DEBUG_TM */
3464
3465 #ifdef DEBUG_SN
3466 void scst_check_debug_sn(struct scst_cmd *cmd)
3467 {
3468         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
3469         static int type;
3470         static int cnt;
3471         unsigned long flags;
3472         int old = cmd->queue_type;
3473
3474         spin_lock_irqsave(&lock, flags);
3475
3476         if (cnt == 0) {
3477                 if ((scst_random() % 1000) == 500) {
3478                         if ((scst_random() % 3) == 1)
3479                                 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3480                         else
3481                                 type = SCST_CMD_QUEUE_ORDERED;
3482                         do {
3483                                 cnt = scst_random() % 10;
3484                         } while(cnt == 0);
3485                 } else
3486                         goto out_unlock;
3487         }
3488
3489         cmd->queue_type = type;
3490         cnt--;
3491
3492         if (((scst_random() % 1000) == 750))
3493                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3494         else if (((scst_random() % 1000) == 751))
3495                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3496         else if (((scst_random() % 1000) == 752))
3497                 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3498
3499         TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3500                 cmd->queue_type, cnt);
3501
3502 out_unlock:
3503         spin_unlock_irqrestore(&lock, flags);
3504         return;
3505 }
3506 #endif /* DEBUG_SN */