Various changes and fixes
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <asm/unistd.h>
26 #include <asm/string.h>
27
28 #ifdef SCST_HIGHMEM
29 #include <linux/highmem.h>
30 #endif
31
32 #include "scst_debug.h"
33 #include "scsi_tgt.h"
34 #include "scst_priv.h"
35 #include "scst_mem.h"
36
37 #include "scst_cdbprobe.h"
38
39 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
40
41 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
42 {
43         TRACE_ENTRY();
44
45         cmd->status = status;
46         cmd->masked_status = status >> 1;
47         cmd->host_status = DID_OK;
48
49         cmd->data_direction = SCST_DATA_NONE;
50         cmd->tgt_resp_flags = SCST_TSC_FLAG_STATUS;
51         cmd->resp_data_len = 0;
52
53         TRACE_EXIT();
54         return;
55 }
56
57 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
58 {
59         TRACE_ENTRY();
60
61         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
62         scst_set_sense(cmd->sense_buffer, sizeof(cmd->sense_buffer),
63                 key, asc, ascq);
64         TRACE_BUFFER("Sense set", cmd->sense_buffer, sizeof(cmd->sense_buffer));
65
66         TRACE_EXIT();
67         return;
68 }
69
70 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense, 
71         unsigned int len)
72 {
73         TRACE_ENTRY();
74
75         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
76
77         memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
78         memcpy(cmd->sense_buffer, sense, min((unsigned long)len, 
79                 (unsigned long)sizeof(cmd->sense_buffer)));
80         TRACE_BUFFER("Sense set", cmd->sense_buffer, sizeof(cmd->sense_buffer));
81
82         TRACE_EXIT();
83         return;
84 }
85
86 void scst_set_busy(struct scst_cmd *cmd)
87 {
88         TRACE_ENTRY();
89
90         if ((cmd->sess->sess_cmd_count <= 1) || 
91             (cmd->sess->init_phase != SCST_SESS_IPH_READY))
92         {
93                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
94                 TRACE_MGMT_DBG("Sending BUSY status to initiator %s "
95                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
96                         cmd->sess->initiator_name, cmd->sess->sess_cmd_count,
97                         cmd->queue_type, cmd->sess->init_phase);
98         } else {
99                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
100                 TRACE_MGMT_DBG("Sending QUEUE_FULL status to initiator %s "
101                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
102                         cmd->sess->initiator_name, cmd->sess->sess_cmd_count,
103                         cmd->queue_type, cmd->sess->init_phase);
104         }
105
106         TRACE_EXIT();
107         return;
108 }
109
110 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
111 {
112         int i, l;
113
114         TRACE_ENTRY();
115
116         scst_check_restore_sg_buff(cmd);
117         cmd->resp_data_len = resp_data_len;
118
119         if (resp_data_len == cmd->bufflen)
120                 goto out;
121
122         l = 0;
123         for(i = 0; i < cmd->sg_cnt; i++) {
124                 l += cmd->sg[i].length;
125                 if (l >= resp_data_len) {
126                         int left = resp_data_len - (l - cmd->sg[i].length);
127                         TRACE(TRACE_SG, "cmd %p (tag %d), "
128                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
129                                 "left %d", cmd, cmd->tag, resp_data_len, i,
130                                 cmd->sg[i].length, left);
131                         cmd->orig_sg_cnt = cmd->sg_cnt;
132                         cmd->orig_sg_entry = i;
133                         cmd->orig_entry_len = cmd->sg[i].length;
134                         cmd->sg_cnt = i+1;
135                         cmd->sg[i].length = left;
136                         cmd->sg_buff_modified = 1;
137                         break;
138                 }
139         }
140
141 out:
142         TRACE_EXIT();
143         return;
144 }
145
146 struct scst_device *scst_alloc_device(int gfp_mask)
147 {
148         struct scst_device *dev;
149
150         TRACE_ENTRY();
151
152         dev = kzalloc(sizeof(*dev), gfp_mask);
153         if (dev == NULL) {
154                 TRACE(TRACE_OUT_OF_MEM, "%s",
155                       "Allocation of scst_device failed");
156                 goto out;
157         }
158
159         spin_lock_init(&dev->dev_lock);
160         atomic_set(&dev->on_dev_count, 0);
161         INIT_LIST_HEAD(&dev->blocked_cmd_list);
162         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
163         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
164         init_waitqueue_head(&dev->on_dev_waitQ);
165         dev->dev_double_ua_possible = 1;
166         dev->dev_serialized = 1;
167
168 out:
169         TRACE_EXIT_HRES(dev);
170         return dev;
171 }
172
173 void scst_free_device(struct scst_device *dev)
174 {
175         TRACE_ENTRY();
176
177 #ifdef EXTRACHECKS
178         if (!list_empty(&dev->dev_tgt_dev_list) || 
179             !list_empty(&dev->dev_acg_dev_list))
180         {
181                 PRINT_ERROR_PR("%s: dev_tgt_dev_list or dev_acg_dev_list "
182                         "is not empty!", __FUNCTION__);
183                 sBUG();
184         }
185 #endif
186
187         kfree(dev);
188
189         TRACE_EXIT();
190         return;
191 }
192
193 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
194         struct scst_device *dev, lun_t lun)
195 {
196         struct scst_acg_dev *res;
197
198         TRACE_ENTRY();
199
200 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
201         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
202 #else
203         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
204 #endif
205         if (res == NULL) {
206                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
207                 goto out;
208         }
209 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
210         memset(res, 0, sizeof(*res));
211 #endif
212         
213         res->dev = dev;
214         res->acg = acg;
215         res->lun = lun;
216         
217 out:
218         TRACE_EXIT_HRES(res);
219         return res;
220 }
221
222 /* scst_mutex supposed to be held */
223 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
224 {
225         TRACE_ENTRY();
226         
227         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list", 
228                 acg_dev);
229         list_del(&acg_dev->acg_dev_list_entry);
230         list_del(&acg_dev->dev_acg_dev_list_entry);
231         
232         kmem_cache_free(scst_acgd_cachep, acg_dev);
233         
234         TRACE_EXIT();
235         return;
236 }
237
238 /* scst_mutex supposed to be held */
239 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
240 {
241         struct scst_acg *acg;
242
243         TRACE_ENTRY();
244
245         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
246         if (acg == NULL) {
247                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
248                 goto out;
249         }
250
251         INIT_LIST_HEAD(&acg->acg_dev_list);
252         INIT_LIST_HEAD(&acg->acg_sess_list);
253         INIT_LIST_HEAD(&acg->acn_list);
254         acg->acg_name = acg_name;
255         
256         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
257         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
258         
259 out:
260         TRACE_EXIT_HRES(acg);
261         return acg;
262 }
263
264 /* scst_mutex supposed to be held */
265 int scst_destroy_acg(struct scst_acg *acg)
266 {
267         struct scst_acn *n, *nn;
268         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
269         int res = 0;
270
271         TRACE_ENTRY();
272
273         if (!list_empty(&acg->acg_sess_list)) {
274                 PRINT_ERROR_PR("%s: acg_sess_list is not empty!", __FUNCTION__);
275                 res = -EBUSY;
276                 goto out;
277         }
278
279         __scst_suspend_activity();
280
281         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
282         list_del(&acg->scst_acg_list_entry);
283         
284         /* Freeing acg_devs */
285         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list, 
286                 acg_dev_list_entry)
287         {
288                 struct scst_tgt_dev *tgt_dev, *tt;
289                 list_for_each_entry_safe(tgt_dev, tt,
290                          &acg_dev->dev->dev_tgt_dev_list,
291                          dev_tgt_dev_list_entry)
292                 {
293                         if (tgt_dev->acg_dev == acg_dev)
294                                 scst_free_tgt_dev(tgt_dev);
295                 }
296                 scst_free_acg_dev(acg_dev);
297         }
298
299         __scst_resume_activity();
300
301         /* Freeing names */
302         list_for_each_entry_safe(n, nn, &acg->acn_list, 
303                 acn_list_entry)
304         {
305                 list_del(&n->acn_list_entry);
306                 kfree(n->name);
307                 kfree(n);
308         }
309         INIT_LIST_HEAD(&acg->acn_list);
310
311         kfree(acg);
312 out:
313         TRACE_EXIT_RES(res);
314         return res;
315 }
316
317 /*
318  * No spin locks supposed to be held, scst_mutex - held.
319  * The activity is suspended.
320  */
321 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
322         struct scst_acg_dev *acg_dev)
323 {
324         struct scst_tgt_dev *tgt_dev;
325         struct scst_device *dev = acg_dev->dev;
326         int res;
327
328         TRACE_ENTRY();
329
330 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
331         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
332 #else
333         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
334 #endif
335         if (tgt_dev == NULL) {
336                 TRACE(TRACE_OUT_OF_MEM, "%s",
337                       "Allocation of scst_tgt_dev failed");
338                 goto out;
339         }
340 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
341         memset(tgt_dev, 0, sizeof(*tgt_dev));
342 #endif
343
344         tgt_dev->acg_dev = acg_dev;
345         tgt_dev->sess = sess;
346         tgt_dev->cmd_count = 0;
347
348         if (dev->scsi_dev != NULL) {
349                 TRACE(TRACE_DEBUG, "host=%d, channel=%d, id=%d, lun=%d, "
350                       "SCST lun=%Ld", dev->scsi_dev->host->host_no, 
351                       dev->scsi_dev->channel, dev->scsi_dev->id, 
352                       dev->scsi_dev->lun, (uint64_t)tgt_dev->acg_dev->lun);
353         }
354         else {
355                 TRACE_MGMT_DBG("Virtual device SCST lun=%Ld", 
356                       (uint64_t)tgt_dev->acg_dev->lun);
357         }
358
359         spin_lock_init(&tgt_dev->tgt_dev_lock);
360         INIT_LIST_HEAD(&tgt_dev->UA_list);
361         spin_lock_init(&tgt_dev->sn_lock);
362         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
363         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
364
365         spin_lock_bh(&scst_temp_UA_lock);
366         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
367                 SCST_LOAD_SENSE(scst_sense_reset_UA));
368         scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA));
369         spin_unlock_bh(&scst_temp_UA_lock);
370
371         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
372
373         if (dev->handler && dev->handler->attach_tgt) {
374                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
375                       tgt_dev);
376                 res = dev->handler->attach_tgt(tgt_dev);
377                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
378                 if (res != 0) {
379                         PRINT_ERROR_PR("Device handler's %s attach_tgt() "
380                             "failed: %d", dev->handler->name, res);
381                         goto out_free;
382                 }
383         }
384         
385         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
386         if (dev->dev_reserved)
387                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
388
389         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
390                 &sess->sess_tgt_dev_list);
391
392 out:
393         TRACE_EXIT();
394         return tgt_dev;
395
396 out_free:
397         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
398         tgt_dev = NULL;
399         goto out;
400 }
401
402 static void scst_send_release(struct scst_tgt_dev *tgt_dev);
403
404 /* 
405  * No locks supposed to be held, scst_mutex - held.
406  * The activity is suspended.
407  */
408 void scst_reset_tgt_dev(struct scst_tgt_dev *tgt_dev, int nexus_loss)
409 {
410         struct scst_device *dev = tgt_dev->acg_dev->dev;
411
412         if (dev->dev_reserved &&
413             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) 
414         {
415                 /* This is one who holds the reservation */
416                 struct scst_tgt_dev *tgt_dev_tmp;
417                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
418                                     dev_tgt_dev_list_entry) 
419                 {
420                         clear_bit(SCST_TGT_DEV_RESERVED,
421                                     &tgt_dev_tmp->tgt_dev_flags);
422                 }
423                 dev->dev_reserved = 0;
424
425                 scst_send_release(tgt_dev);
426         }
427
428         spin_lock_bh(&scst_temp_UA_lock);
429         if (nexus_loss) {
430                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
431                         SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
432         } else {
433                 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
434                         SCST_LOAD_SENSE(scst_sense_reset_UA));
435         }
436         scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA));
437         spin_unlock_bh(&scst_temp_UA_lock);
438 }
439
440 /* 
441  * No locks supposed to be held, scst_mutex - held.
442  * The activity is suspended.
443  */
444 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
445 {
446         struct scst_device *dev = tgt_dev->acg_dev->dev;
447
448         TRACE_ENTRY();
449
450         tm_dbg_deinit_tgt_dev(tgt_dev);
451
452         list_del(&tgt_dev->dev_tgt_dev_list_entry);
453         list_del(&tgt_dev->sess_tgt_dev_list_entry);
454
455         scst_reset_tgt_dev(tgt_dev, 0);
456         scst_free_all_UA(tgt_dev);
457
458         if (dev->handler && dev->handler->detach_tgt) {
459                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
460                       tgt_dev);
461                 dev->handler->detach_tgt(tgt_dev);
462                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
463         }
464
465         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
466
467         TRACE_EXIT();
468         return;
469 }
470
471 /* scst_mutex supposed to be held */
472 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
473 {
474         int res = 0;
475         struct scst_acg_dev *acg_dev;
476         struct scst_tgt_dev *tgt_dev;
477
478         TRACE_ENTRY();
479
480         __scst_suspend_activity();
481
482         INIT_LIST_HEAD(&sess->sess_tgt_dev_list);
483         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list, 
484                 acg_dev_list_entry)
485         {
486                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
487                 if (tgt_dev == NULL) {
488                         res = -ENOMEM;
489                         goto out_free;
490                 }
491         }
492
493 out_resume:
494         __scst_resume_activity();
495
496         TRACE_EXIT();
497         return res;
498
499 out_free:
500         scst_sess_free_tgt_devs(sess);
501         goto out_resume;
502 }
503
504 /* scst_mutex supposed to be held and activity suspended */
505 void scst_sess_free_tgt_devs(struct scst_session *sess)
506 {
507         struct scst_tgt_dev *tgt_dev, *t;
508
509         TRACE_ENTRY();
510         
511         /* The session is going down, no users, so no locks */
512         list_for_each_entry_safe(tgt_dev, t, &sess->sess_tgt_dev_list,
513                                  sess_tgt_dev_list_entry) 
514         {
515                 scst_free_tgt_dev(tgt_dev);
516         }
517         INIT_LIST_HEAD(&sess->sess_tgt_dev_list);
518
519         TRACE_EXIT();
520         return;
521 }
522
523 /* scst_mutex supposed to be held */
524 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
525         int read_only)
526 {
527         int res = 0;
528         struct scst_acg_dev *acg_dev;
529         struct scst_tgt_dev *tgt_dev;
530         struct scst_session *sess;
531         LIST_HEAD(tmp_tgt_dev_list);
532         
533         TRACE_ENTRY();
534         
535         INIT_LIST_HEAD(&tmp_tgt_dev_list);
536         
537 #ifdef EXTRACHECKS
538         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
539                 if (acg_dev->dev == dev) {
540                         PRINT_ERROR_PR("Device is already in group %s", 
541                                 acg->acg_name);
542                         res = -EINVAL;
543                         goto out;
544                 }
545         }
546 #endif
547         
548         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
549         if (acg_dev == NULL) {
550                 res = -ENOMEM;
551                 goto out;
552         }
553         acg_dev->rd_only_flag = read_only;
554
555         __scst_suspend_activity();
556
557         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list", 
558                 acg_dev);
559         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
560         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
561         
562         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) 
563         {
564                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
565                 if (tgt_dev == NULL) {
566                         res = -ENOMEM;
567                         goto out_free;
568                 }
569                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
570                               &tmp_tgt_dev_list);
571         }
572
573 out_resume:
574         __scst_resume_activity();
575
576 out:
577         TRACE_EXIT_RES(res);
578         return res;
579
580 out_free:
581         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
582                          extra_tgt_dev_list_entry) 
583         {
584                 scst_free_tgt_dev(tgt_dev);
585         }
586         scst_free_acg_dev(acg_dev);
587         goto out_resume;
588 }
589
590 /* scst_mutex supposed to be held */
591 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
592 {
593         int res = 0;
594         struct scst_acg_dev *acg_dev = NULL, *a;
595         struct scst_tgt_dev *tgt_dev, *tt;
596         
597         TRACE_ENTRY();
598         
599         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
600                 if (a->dev == dev) {
601                         acg_dev = a;
602                         break;
603                 }
604         }
605         
606         if (acg_dev == NULL) {
607                 PRINT_ERROR_PR("Device is not found in group %s", acg->acg_name);
608                 res = -EINVAL;
609                 goto out;
610         }
611
612         __scst_suspend_activity();
613
614         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
615                  dev_tgt_dev_list_entry) 
616         {
617                 if (tgt_dev->acg_dev == acg_dev)
618                         scst_free_tgt_dev(tgt_dev);
619         }
620         scst_free_acg_dev(acg_dev);
621
622         __scst_resume_activity();
623
624 out:    
625         TRACE_EXIT_RES(res);
626         return res;
627 }
628
629 /* scst_mutex supposed to be held */
630 int scst_acg_add_name(struct scst_acg *acg, const char *name)
631 {
632         int res = 0;
633         struct scst_acn *n;
634         int len;
635         char *nm;
636         
637         TRACE_ENTRY();
638
639         list_for_each_entry(n, &acg->acn_list, acn_list_entry) 
640         {
641                 if (strcmp(n->name, name) == 0) {
642                         PRINT_ERROR_PR("Name %s already exists in access "
643                                 "control group %s", name, acg->acg_name);
644                         res = -EINVAL;
645                         goto out;
646                 }
647         }
648         
649         n = kmalloc(sizeof(*n), GFP_KERNEL);
650         if (n == NULL) {
651                 PRINT_ERROR_PR("%s", "Unable to allocate scst_acn");
652                 res = -ENOMEM;
653                 goto out;
654         }
655         
656         len = strlen(name);
657         nm = kmalloc(len + 1, GFP_KERNEL);
658         if (nm == NULL) {
659                 PRINT_ERROR_PR("%s", "Unable to allocate scst_acn->name");
660                 res = -ENOMEM;
661                 goto out_free;
662         }
663         
664         strcpy(nm, name);
665         n->name = nm;
666         
667         list_add_tail(&n->acn_list_entry, &acg->acn_list);
668
669 out:
670         TRACE_EXIT_RES(res);
671         return res;
672
673 out_free:
674         kfree(n);
675         goto out;
676 }
677
678 /* scst_mutex supposed to be held */
679 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
680 {
681         int res = -EINVAL;
682         struct scst_acn *n;
683         
684         TRACE_ENTRY();
685         
686         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
687         {
688                 if (strcmp(n->name, name) == 0) {
689                         list_del(&n->acn_list_entry);
690                         kfree(n->name);
691                         kfree(n);
692                         res = 0;
693                         break;
694                 }
695         }
696         
697         if (res != 0) {
698                 PRINT_ERROR_PR("Unable to find name %s in access control "
699                         "group %s", name, acg->acg_name);
700         }
701
702         TRACE_EXIT_RES(res);
703         return res;
704 }
705
706 struct scst_cmd *scst_create_prepare_internal_cmd(
707         struct scst_cmd *orig_cmd, int bufsize)
708 {
709         struct scst_cmd *res;
710         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
711
712         TRACE_ENTRY();
713
714         res = scst_alloc_cmd(gfp_mask);
715         if (unlikely(res == NULL)) {
716                 goto out;
717         }
718
719         res->sess = orig_cmd->sess;
720         res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
721         res->atomic = scst_cmd_atomic(orig_cmd);
722         res->internal = 1;
723         res->tgtt = orig_cmd->tgtt;
724         res->tgt = orig_cmd->tgt;
725         res->dev = orig_cmd->dev;
726         res->tgt_dev = orig_cmd->tgt_dev;
727         res->lun = orig_cmd->lun;
728         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
729         res->data_direction = SCST_DATA_UNKNOWN;
730         res->orig_cmd = orig_cmd;
731
732         res->bufflen = bufsize;
733         if (bufsize > 0) {
734                 if (scst_alloc_space(res) != 0)
735                         PRINT_ERROR_PR("Unable to create buffer (size %d) for "
736                                 "internal cmd", bufsize);
737                         goto out_free_res;
738         }
739
740 out:
741         TRACE_EXIT_HRES((unsigned long)res);
742         return res;
743
744 out_free_res:
745         scst_destroy_cmd(res);
746         res = NULL;
747         goto out;
748 }
749
750 void scst_free_internal_cmd(struct scst_cmd *cmd)
751 {
752         TRACE_ENTRY();
753
754         if (cmd->bufflen > 0)
755                 scst_release_space(cmd);
756         scst_destroy_cmd(cmd);
757
758         TRACE_EXIT();
759         return;
760 }
761
762 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
763 {
764         int res = SCST_CMD_STATE_RES_RESTART;
765 #define sbuf_size 252
766         static const unsigned char request_sense[6] =
767             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
768         struct scst_cmd *rs_cmd;
769
770         TRACE_ENTRY();
771
772         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
773         if (rs_cmd != 0)
774                 goto out_error;
775
776         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
777         rs_cmd->cdb_len = sizeof(request_sense);
778         rs_cmd->data_direction = SCST_DATA_READ;
779
780         spin_lock_irq(&scst_list_lock);
781         list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
782         spin_unlock_irq(&scst_list_lock);
783
784 out:
785         TRACE_EXIT_RES(res);
786         return res;
787
788 out_error:
789         res = -1;
790         goto out;
791 #undef sbuf_size
792 }
793
794 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
795 {
796         struct scst_cmd *orig_cmd = cmd->orig_cmd;
797         uint8_t *buf;
798         int len;
799
800         TRACE_ENTRY();
801
802         sBUG_ON(orig_cmd);
803
804         len = scst_get_buf_first(cmd, &buf);
805
806         if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
807             (!SCST_NO_SENSE(buf))) 
808         {
809                 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
810                         buf, len);
811                 memcpy(orig_cmd->sense_buffer, buf,
812                         (sizeof(orig_cmd->sense_buffer) > len) ?
813                                 len : sizeof(orig_cmd->sense_buffer));
814         } else {
815                 PRINT_ERROR_PR("%s", "Unable to get the sense via "
816                         "REQUEST SENSE, returning HARDWARE ERROR");
817                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
818         }
819
820         scst_put_buf(cmd, buf);
821
822         scst_free_internal_cmd(cmd);
823
824         TRACE_EXIT_HRES((unsigned long)orig_cmd);
825         return orig_cmd;
826 }
827
828 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
829 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
830 {
831         struct scsi_request *req;
832
833         TRACE_ENTRY();
834
835         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
836                 if (req) {
837                         if (req->sr_bufflen)
838                                 kfree(req->sr_buffer);
839                         scsi_release_request(req);
840                 }
841         }
842
843         TRACE_EXIT();
844         return;
845 }
846
847 static void scst_send_release(struct scst_tgt_dev *tgt_dev)
848 {
849         struct scsi_request *req;
850         struct scsi_device *scsi_dev;
851         uint8_t cdb[6];
852
853         TRACE_ENTRY();
854         
855         if (tgt_dev->acg_dev->dev->scsi_dev == NULL)
856                 goto out;
857
858         scsi_dev = tgt_dev->acg_dev->dev->scsi_dev;
859
860         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
861         if (req == NULL) {
862                 PRINT_ERROR_PR("Allocation of scsi_request failed: unable "
863                             "to RELEASE device %d:%d:%d:%d",
864                             scsi_dev->host->host_no, scsi_dev->channel,
865                             scsi_dev->id, scsi_dev->lun);
866                 goto out;
867         }
868
869         memset(cdb, 0, sizeof(cdb));
870         cdb[0] = RELEASE;
871         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
872             ((scsi_dev->lun << 5) & 0xe0) : 0;
873         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
874         req->sr_cmd_len = sizeof(cdb);
875         req->sr_data_direction = SCST_DATA_NONE;
876         req->sr_use_sg = 0;
877         req->sr_bufflen = 0;
878         req->sr_buffer = NULL;
879         req->sr_request->rq_disk = tgt_dev->acg_dev->dev->rq_disk;
880         req->sr_sense_buffer[0] = 0;
881
882         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
883                 "mid-level", req);
884         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
885                     scst_req_done, SCST_DEFAULT_TIMEOUT, 3);
886
887 out:
888         TRACE_EXIT();
889         return;
890 }
891 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
892 static void scst_send_release(struct scst_tgt_dev *tgt_dev)
893 {
894         struct scsi_device *scsi_dev;
895         unsigned char cdb[6];
896         unsigned char sense[SCSI_SENSE_BUFFERSIZE];
897         int rc;
898
899         TRACE_ENTRY();
900         
901         if (tgt_dev->acg_dev->dev->scsi_dev == NULL)
902                 goto out;
903
904         scsi_dev = tgt_dev->acg_dev->dev->scsi_dev;
905
906         memset(cdb, 0, sizeof(cdb));
907         cdb[0] = RELEASE;
908         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
909             ((scsi_dev->lun << 5) & 0xe0) : 0;
910
911         TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to SCSI "
912                 "mid-level");
913         rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
914                         sense, SCST_DEFAULT_TIMEOUT,
915                         3, GFP_KERNEL);
916         if (rc) {
917                 PRINT_INFO_PR("scsi_execute() failed: %d", rc);
918                 goto out;
919         }
920
921 out:
922         TRACE_EXIT();
923         return;
924 }
925 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
926
927 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
928         const char *initiator_name)
929 {
930         struct scst_session *sess;
931         int len;
932         char *nm;
933
934         TRACE_ENTRY();
935
936 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
937         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
938 #else
939         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
940 #endif
941         if (sess == NULL) {
942                 TRACE(TRACE_OUT_OF_MEM, "%s",
943                       "Allocation of scst_session failed");
944                 goto out;
945         }
946 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
947         memset(sess, 0, sizeof(*sess));
948 #endif
949
950         sess->init_phase = SCST_SESS_IPH_INITING;
951         atomic_set(&sess->refcnt, 0);
952         INIT_LIST_HEAD(&sess->sess_tgt_dev_list);
953         INIT_LIST_HEAD(&sess->search_cmd_list);
954         sess->tgt = tgt;
955         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
956         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
957         
958         len = strlen(initiator_name);
959         nm = kmalloc(len + 1, gfp_mask);
960         if (nm == NULL) {
961                 PRINT_ERROR_PR("%s", "Unable to allocate sess->initiator_name");
962                 goto out_free;
963         }
964         
965         strcpy(nm, initiator_name);
966         sess->initiator_name = nm;
967         
968 out:
969         TRACE_EXIT();
970         return sess;
971
972 out_free:
973         kmem_cache_free(scst_sess_cachep, sess);
974         sess = NULL;
975         goto out;
976 }
977
978 void scst_free_session(struct scst_session *sess)
979 {
980         TRACE_ENTRY();
981
982         down(&scst_mutex);
983         TRACE_DBG("Removing sess %p from the list", sess);
984         list_del(&sess->sess_list_entry);
985         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
986         list_del(&sess->acg_sess_list_entry);
987         
988         __scst_suspend_activity();
989         scst_sess_free_tgt_devs(sess);
990         __scst_resume_activity();
991
992         wake_up_all(&sess->tgt->unreg_waitQ);
993
994         up(&scst_mutex);
995
996         kfree(sess->initiator_name);
997         kmem_cache_free(scst_sess_cachep, sess);
998
999         TRACE_EXIT();
1000         return;
1001 }
1002
1003 void scst_free_session_callback(struct scst_session *sess)
1004 {
1005         struct semaphore *shm;
1006
1007         TRACE_ENTRY();
1008
1009         TRACE_DBG("Freeing session %p", sess);
1010
1011         shm = sess->shutdown_mutex;
1012
1013         if (sess->unreg_done_fn) {
1014                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1015                 sess->unreg_done_fn(sess);
1016                 TRACE_DBG("%s", "unreg_done_fn() returned");
1017         }
1018         scst_free_session(sess);
1019
1020         if (shm)
1021                 up(shm);
1022
1023         TRACE_EXIT();
1024         return;
1025 }
1026
1027 void scst_sched_session_free(struct scst_session *sess)
1028 {
1029         unsigned long flags;
1030
1031         TRACE_ENTRY();
1032
1033         spin_lock_irqsave(&scst_mgmt_lock, flags);
1034         TRACE_DBG("Adding sess %p to scst_sess_mgmt_list", sess);
1035         list_add_tail(&sess->sess_mgmt_list_entry, &scst_sess_mgmt_list);
1036         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1037         
1038         wake_up(&scst_mgmt_waitQ);
1039
1040         TRACE_EXIT();
1041         return;
1042 }
1043
1044 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
1045 {
1046         struct scst_cmd *cmd;
1047
1048         TRACE_ENTRY();
1049
1050 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1051         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1052 #else
1053         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1054 #endif
1055         if (cmd == NULL) {
1056                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1057                 goto out;
1058         }
1059 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1060         memset(cmd, 0, sizeof(*cmd));
1061 #endif
1062
1063         cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
1064         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1065         cmd->retries = 1;
1066         cmd->data_len = -1;
1067         cmd->tgt_resp_flags = SCST_TSC_FLAG_STATUS;
1068         cmd->resp_data_len = -1;
1069
1070 out:
1071         TRACE_EXIT();
1072         return cmd;
1073 }
1074
1075 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1076 {
1077         scst_sess_put(cmd->sess);
1078
1079         /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1080         if (likely(cmd->tgt_dev != NULL))
1081                 scst_dec_cmd_count();
1082
1083         scst_destroy_cmd(cmd);
1084         return;
1085 }
1086
1087 /* No locks supposed to be held. Must be called only from scst_finish_cmd()! */
1088 void scst_free_cmd(struct scst_cmd *cmd)
1089 {
1090         int destroy = 1;
1091
1092         TRACE_ENTRY();
1093
1094         sBUG_ON(cmd->blocking);
1095
1096 #if defined(EXTRACHECKS) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
1097         if (cmd->scsi_req) {
1098                 PRINT_ERROR_PR("%s: %s", __FUNCTION__, "Cmd with unfreed "
1099                         "scsi_req!");
1100                 scst_release_request(cmd);
1101         }
1102 #endif
1103
1104         if (cmd->tgtt->on_free_cmd != NULL) {
1105                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1106                 cmd->tgtt->on_free_cmd(cmd);
1107                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1108         }
1109
1110         if (likely(cmd->dev != NULL)) {
1111                 struct scst_dev_type *handler = cmd->dev->handler;
1112                 if (handler->on_free_cmd != NULL) {
1113                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1114                               handler->name, cmd);
1115                         handler->on_free_cmd(cmd);
1116                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1117                                 handler->name);
1118                 }
1119         }
1120
1121         scst_release_space(cmd);
1122
1123         if (likely(cmd->tgt_dev != NULL)) {
1124 #ifdef EXTRACHECKS
1125                 if (cmd->sent_to_midlev == 0) {
1126                         PRINT_ERROR_PR("Finishing not executed cmd (opcode %d, "
1127                              "target %s, lun %Ld, sn %d, expected_sn %d)", 
1128                              cmd->cdb[0], cmd->tgtt->name, (uint64_t)cmd->lun, 
1129                              cmd->sn, cmd->tgt_dev->expected_sn);
1130                         scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
1131                 }
1132 #endif
1133                 if (unlikely(test_bit(SCST_CMD_OUT_OF_SN, 
1134                                 &cmd->cmd_flags)))
1135                 {
1136                         spin_lock_bh(&cmd->tgt_dev->sn_lock);
1137                         set_bit(SCST_CMD_CAN_BE_DESTROYED, 
1138                                 &cmd->cmd_flags);
1139                         barrier(); /* to reread SCST_CMD_OUT_OF_SN */
1140                         destroy = !test_bit(SCST_CMD_OUT_OF_SN, 
1141                                         &cmd->cmd_flags);
1142                         TRACE(TRACE_SCSI_SERIALIZING, "Out of SN "
1143                                 "cmd %p (tag %d, sn %d), destroy=%d", cmd,
1144                                 cmd->tag, cmd->sn, destroy);
1145                         spin_unlock_bh(&cmd->tgt_dev->sn_lock);
1146                 }
1147         }
1148
1149         if (likely(destroy))
1150                 scst_destroy_put_cmd(cmd);
1151
1152         TRACE_EXIT();
1153         return;
1154 }
1155
1156 /* No locks supposed to be held. */
1157 void scst_check_retries(struct scst_tgt *tgt, int processible_env)
1158 {
1159         int need_wake_up = 0;
1160
1161         TRACE_ENTRY();
1162
1163         /* 
1164          * We don't worry about overflow of finished_cmds, because we check 
1165          * only for its change 
1166          */
1167         atomic_inc(&tgt->finished_cmds);
1168         smp_mb__after_atomic_inc();
1169         if (unlikely(tgt->retry_cmds > 0)) 
1170         {
1171                 struct scst_cmd *c, *tc;
1172                 unsigned long flags;
1173
1174                 TRACE(TRACE_RETRY, "Checking retry cmd list (retry_cmds %d)",
1175                       tgt->retry_cmds);
1176
1177                 spin_lock_irqsave(&tgt->tgt_lock, flags);
1178                 spin_lock(&scst_list_lock);
1179
1180                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1181                                 cmd_list_entry)
1182                 {
1183                         tgt->retry_cmds--;
1184
1185                         TRACE(TRACE_RETRY, "Moving retry cmd %p to active cmd "
1186                             "list (retry_cmds left %d)", c, tgt->retry_cmds);
1187                         list_move(&c->cmd_list_entry, &scst_active_cmd_list);
1188
1189                         need_wake_up++;
1190                         if (need_wake_up >= 2) /* "slow start" */
1191                                 break; 
1192                 }
1193
1194                 spin_unlock(&scst_list_lock);
1195                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1196         }
1197
1198         if (need_wake_up && !processible_env)
1199                 wake_up(&scst_list_waitQ);
1200
1201         TRACE_EXIT();
1202         return;
1203 }
1204
1205 void scst_tgt_retry_timer_fn(unsigned long arg)
1206 {
1207         struct scst_tgt *tgt = (struct scst_tgt*)arg;
1208         unsigned long flags;
1209
1210         TRACE(TRACE_RETRY, "Retry timer expired (retry_cmds %d)",
1211                 tgt->retry_cmds);
1212
1213         spin_lock_irqsave(&tgt->tgt_lock, flags);
1214         tgt->retry_timer_active = 0;
1215         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1216
1217         scst_check_retries(tgt, 0);
1218
1219         TRACE_EXIT();
1220         return;
1221 }
1222
1223 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1224 {
1225         struct scst_mgmt_cmd *mcmd;
1226
1227         TRACE_ENTRY();
1228
1229         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1230         if (mcmd == NULL) {
1231                 PRINT_ERROR_PR("%s", "Allocation of management command "
1232                         "failed, some commands and their data could leak");
1233                 goto out;
1234         }
1235         memset(mcmd, 0, sizeof(*mcmd));
1236
1237 out:
1238         TRACE_EXIT();
1239         return mcmd;
1240 }
1241
1242 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int del)
1243 {
1244         unsigned long flags;
1245
1246         TRACE_ENTRY();
1247
1248         spin_lock_irqsave(&scst_list_lock, flags);
1249         if (del)
1250                 list_del(&mcmd->mgmt_cmd_list_entry);
1251         mcmd->sess->sess_cmd_count--;
1252         spin_unlock_irqrestore(&scst_list_lock, flags);
1253
1254         scst_sess_put(mcmd->sess);
1255
1256         if (mcmd->mcmd_tgt_dev != NULL)
1257                 scst_dec_cmd_count();
1258
1259         mempool_free(mcmd, scst_mgmt_mempool);
1260
1261         TRACE_EXIT();
1262         return;
1263 }
1264
1265 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1266 int scst_alloc_request(struct scst_cmd *cmd)
1267 {
1268         int res = 0;
1269         struct scsi_request *req;
1270         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1271
1272         TRACE_ENTRY();
1273
1274         /* cmd->dev->scsi_dev must be non-NULL here */
1275         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1276         if (req == NULL) {
1277                 TRACE(TRACE_OUT_OF_MEM, "%s",
1278                       "Allocation of scsi_request failed");
1279                 res = -ENOMEM;
1280                 goto out;
1281         }
1282
1283         cmd->scsi_req = req;
1284
1285         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1286         req->sr_cmd_len = cmd->cdb_len;
1287         req->sr_data_direction = cmd->data_direction;
1288         req->sr_use_sg = cmd->sg_cnt;
1289         req->sr_bufflen = cmd->bufflen;
1290         req->sr_buffer = cmd->sg;
1291         req->sr_request->rq_disk = cmd->dev->rq_disk;
1292         req->sr_sense_buffer[0] = 0;
1293
1294         cmd->scsi_req->upper_private_data = cmd;
1295
1296 out:
1297         TRACE_EXIT();
1298         return res;
1299 }
1300
1301 void scst_release_request(struct scst_cmd *cmd)
1302 {
1303         scsi_release_request(cmd->scsi_req);
1304         cmd->scsi_req = NULL;
1305 }
1306 #endif
1307
1308 int scst_alloc_space(struct scst_cmd *cmd)
1309 {
1310         int tgt_sg = cmd->tgt->sg_tablesize;
1311         int ini_sg;
1312         int gfp_mask;
1313         int res = -ENOMEM;
1314         int ini_unchecked_isa_dma, ini_use_clustering;
1315         int use_clustering = 0;
1316         struct sgv_pool *pool;
1317
1318         TRACE_ENTRY();
1319
1320         if (cmd->data_buf_alloced) {
1321                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1322                 sBUG_ON(cmd->sg == NULL);
1323                 res = 0;
1324                 goto out;
1325         }
1326
1327         gfp_mask = __GFP_NOWARN;
1328         gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1329         pool = &scst_sgv.norm;
1330
1331         if (cmd->dev->scsi_dev != NULL) {
1332                 ini_sg = cmd->dev->scsi_dev->host->sg_tablesize;
1333                 ini_unchecked_isa_dma = 
1334                         cmd->dev->scsi_dev->host->unchecked_isa_dma;
1335                 ini_use_clustering = 
1336                         (cmd->dev->scsi_dev->host->use_clustering == 
1337                                 ENABLE_CLUSTERING);
1338         }
1339         else {
1340                 ini_sg = (1 << 15) /* infinite */;
1341                 ini_unchecked_isa_dma = 0;
1342                 ini_use_clustering = 0;
1343         }
1344
1345         if ((cmd->tgtt->use_clustering || ini_use_clustering) && 
1346             !cmd->tgtt->no_clustering)
1347         {
1348                 TRACE_MEM("%s", "Use clustering");
1349                 pool = &scst_sgv.norm_clust;
1350                 use_clustering = 1;
1351         }
1352
1353         if (cmd->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
1354                 TRACE_MEM("%s", "Use ISA DMA memory");
1355                 gfp_mask |= GFP_DMA;
1356                 pool = &scst_sgv.dma;
1357         } else {
1358 #ifdef SCST_HIGHMEM
1359                 gfp_mask |= __GFP_HIGHMEM;
1360                 pool = &scst_sgv.highmem;
1361 #endif
1362         }
1363
1364         if (cmd->no_sgv) {
1365                 cmd->sg = scst_alloc(cmd->bufflen, gfp_mask, use_clustering,    
1366                         &cmd->sg_cnt);
1367                 if (cmd->sg == NULL)
1368                         goto out;
1369         } else {
1370                 cmd->sg = sgv_pool_alloc(pool, cmd->bufflen, gfp_mask,
1371                                 &cmd->sg_cnt, &cmd->sgv);
1372                 if (cmd->sg == NULL)
1373                         goto out;
1374         }
1375
1376         if (unlikely(cmd->sg_cnt > ini_sg)) {
1377                 static int ll;
1378                 if (ll < 10) {
1379                         PRINT_INFO("Unable to complete command due to "
1380                                 "underlying device SG IO count limitation "
1381                                 "(requested %d, available %d)", cmd->sg_cnt,
1382                                 ini_sg);
1383                         ll++;
1384                 }
1385                 goto out_sg_free;
1386         }
1387
1388         if (unlikely(cmd->sg_cnt > tgt_sg)) {
1389                 static int ll;
1390                 if (ll < 10) {
1391                         PRINT_INFO("Unable to complete command due to "
1392                                 "target device %s SG IO count limitation "
1393                                 "(requested %d, available %d)", cmd->tgtt->name,
1394                                 cmd->sg_cnt, tgt_sg);
1395                         ll++;
1396                 }
1397                 goto out_sg_free;
1398         }
1399         
1400         res = 0;
1401
1402 out:
1403         TRACE_EXIT();
1404         return res;
1405
1406 out_sg_free:
1407         if (cmd->no_sgv)
1408                 scst_free(cmd->sg, cmd->sg_cnt);
1409         else
1410                 sgv_pool_free(cmd->sgv);
1411         cmd->sgv = NULL;
1412         cmd->sg = NULL;
1413         cmd->sg_cnt = 0;
1414         goto out;
1415 }
1416
1417 void scst_release_space(struct scst_cmd *cmd)
1418 {
1419         TRACE_ENTRY();
1420
1421         if (cmd->data_buf_alloced) {
1422                 TRACE_MEM("%s", "data_buf_alloced set, returning");
1423                 goto out;
1424         }
1425
1426         if (cmd->sgv) {
1427                 scst_check_restore_sg_buff(cmd);
1428                 sgv_pool_free(cmd->sgv);
1429         } else if (cmd->sg)
1430                 scst_free(cmd->sg, cmd->sg_cnt);
1431
1432         cmd->sgv = NULL;
1433         cmd->sg_cnt = 0;
1434         cmd->sg = NULL;
1435         cmd->bufflen = 0;
1436         cmd->data_len = 0;
1437
1438 out:
1439         TRACE_EXIT();
1440         return;
1441 }
1442
1443 int __scst_get_buf(struct scst_cmd *cmd, uint8_t **buf)
1444 {
1445         int res = 0;
1446         struct scatterlist *sg = cmd->sg;
1447         int i = cmd->get_sg_buf_entry_num;
1448         
1449         TRACE_ENTRY();
1450         
1451         *buf = NULL;
1452         
1453         if (i >= cmd->sg_cnt)
1454                 goto out;
1455 #ifdef SCST_HIGHMEM
1456         /* 
1457          * HIGHMEM pages not merged (clustered), so if it's 
1458          * not HIGHMEM page, kmap() is the same as page_address()
1459          */
1460         if (scst_cmd_atomic(cmd)) {
1461                 enum km_type km;
1462                 if (in_softirq())
1463                         km = KM_SOFTIRQ0;
1464                 else
1465                         km = KM_USER0;
1466                 *buf = kmap_atomic(sg[i].page, km);
1467         } else
1468                 *buf = kmap(sg[i].page);
1469 #else
1470         *buf = page_address(sg[i].page);
1471 #endif
1472         *buf += sg[i].offset;
1473         res = sg[i].length;
1474         cmd->get_sg_buf_entry_num++;
1475         
1476 out:
1477         TRACE_EXIT_RES(res);
1478         return res;
1479 }
1480
1481 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1482
1483 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
1484 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1485
1486 int scst_get_cdb_len(const uint8_t *cdb)
1487 {
1488         return SCST_GET_CDB_LEN(cdb[0]);
1489 }
1490
1491 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1492
1493 static uint32_t get_trans_len_1(const uint8_t *cdb, uint8_t off)
1494 {
1495         return (*(cdb + off));
1496 }
1497
1498 static uint32_t get_trans_len_2(const uint8_t *cdb, uint8_t off)
1499 {
1500         return be16_to_cpu(*((uint16_t *)(cdb + off)));
1501 }
1502
1503 static uint32_t get_trans_len_3(const uint8_t *cdb, uint8_t off)
1504 {
1505         const uint8_t *p = cdb + off;
1506
1507         return ((*p) << 16) + (*(p + 1) << 8) + *(p + 2);
1508 }
1509
1510 static uint32_t get_trans_len_4(const uint8_t *cdb, uint8_t off)
1511 {
1512         return be32_to_cpu(*((uint32_t *)(cdb + off)));
1513 }
1514
1515 /* for special commands */
1516 static uint32_t get_trans_len_block_limit(const uint8_t *cdb, uint8_t off)
1517 {
1518         return 6;
1519 }
1520
1521 static uint32_t get_trans_len_read_capacity(const uint8_t *cdb, uint8_t off)
1522 {
1523         return 8;
1524 }
1525
1526 static uint32_t get_trans_len_single(const uint8_t *cdb, uint8_t off)
1527 {
1528         return 1;
1529 }
1530
1531 static uint32_t get_trans_len_none(const uint8_t *cdb, uint8_t off)
1532 {
1533         return 0;
1534 }
1535
1536 int scst_get_cdb_info(const uint8_t *cdb_p, int dev_type,
1537                       struct scst_info_cdb *info_p)
1538 {
1539         int i, res = 0;
1540         uint8_t op;
1541         const struct scst_sdbops *ptr = NULL;
1542
1543         TRACE_ENTRY();
1544
1545         op = *cdb_p;    /* get clear opcode */
1546
1547         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%zd, "
1548                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1549                 dev_type);
1550
1551         i = scst_scsi_op_list[op];
1552         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1553                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1554                         ptr = &scst_scsi_op_table[i];
1555                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>", 
1556                               ptr->ops, ptr->devkey[0], /* disk     */
1557                               ptr->devkey[1],   /* tape     */
1558                               ptr->devkey[2],   /* printer */
1559                               ptr->devkey[3],   /* cpu      */
1560                               ptr->devkey[4],   /* cdr      */
1561                               ptr->devkey[5],   /* cdrom    */
1562                               ptr->devkey[6],   /* scanner */
1563                               ptr->devkey[7],   /* worm     */
1564                               ptr->devkey[8],   /* changer */
1565                               ptr->devkey[9],   /* commdev */
1566                               ptr->op_name);
1567                         TRACE_DBG("direction=%d flags=%d off=%d",
1568                               ptr->direction,
1569                               ptr->flags,
1570                               ptr->off);
1571                         break;
1572                 }
1573                 i++;
1574         }
1575
1576         if (ptr == NULL) {
1577                 /* opcode not found or now not used !!! */
1578                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1579                       dev_type);
1580                 res = -1;
1581                 goto out;
1582         }
1583
1584         info_p->cdb_len = SCST_GET_CDB_LEN(op);
1585         info_p->op_name = ptr->op_name;
1586         info_p->direction = ptr->direction;
1587         info_p->flags = ptr->flags;
1588         info_p->transfer_len = (*ptr->get_trans_len)(cdb_p, ptr->off);
1589
1590 #ifdef EXTRACHECKS
1591         if (unlikely((info_p->transfer_len == 0) &&
1592                      (info_p->direction != SCST_DATA_NONE))) {
1593                 TRACE_DBG("Warning! transfer_len 0, direction %d change on %d",
1594                         info_p->direction, SCST_DATA_NONE);
1595                 info_p->direction = SCST_DATA_NONE;
1596         }
1597 #endif
1598
1599 out:
1600         TRACE_EXIT();
1601         return res;
1602 }
1603
1604 /*
1605  * Routine to extract a lun number from an 8-byte LUN structure
1606  * in network byte order (BE).
1607  * (see SAM-2, Section 4.12.3 page 40)
1608  * Supports 2 types of lun unpacking: peripheral and logical unit.
1609  */
1610 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1611 {
1612         lun_t res = (lun_t)-1;
1613         int address_method;
1614
1615         TRACE_ENTRY();
1616
1617         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1618
1619         if (len < 2) {
1620                 PRINT_ERROR_PR("Illegal lun length %d, expected 2 bytes or "
1621                         "more", len);
1622                 goto out;
1623         }
1624
1625         if (len > 2) {
1626                 switch(len) {
1627                 case 8:
1628                 {
1629                         if ((*((uint64_t*)lun) & 
1630                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1631                                 goto out_err;
1632                         break;
1633                 }
1634                 case 4:
1635                         if (*((uint16_t*)&lun[2]) != 0)
1636                                 goto out_err;
1637                         break;
1638                 case 6:
1639                         if (*((uint32_t*)&lun[2]) != 0)
1640                                 goto out_err;
1641                         break;
1642                 default:
1643                         goto out_err;
1644                 }
1645         }
1646
1647         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
1648         switch (address_method) {
1649         case 0: /* peripheral device addressing method */
1650 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1651                 if (*lun) {
1652                         PRINT_ERROR_PR("Illegal BUS INDENTIFIER in LUN "
1653                              "peripheral device addressing method 0x%02x, "
1654                              "expected 0", *lun);
1655                         break;
1656                 }
1657                 res = *(lun + 1);
1658                 break;
1659 #else
1660                 /* go through */
1661 #endif
1662
1663         case 1: /* flat space addressing method */
1664                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1665                 break;
1666
1667         case 2: /* logical unit addressing method */
1668                 if (*lun & 0x3f) {
1669                         PRINT_ERROR_PR("Illegal BUS NUMBER in LUN logical unit "
1670                                     "addressing method 0x%02x, expected 0",
1671                                     *lun & 0x3f);
1672                         break;
1673                 }
1674                 if (*(lun + 1) & 0xe0) {
1675                         PRINT_ERROR_PR("Illegal TARGET in LUN logical unit "
1676                                     "addressing method 0x%02x, expected 0",
1677                                     (*(lun + 1) & 0xf8) >> 5);
1678                         break;
1679                 }
1680                 res = *(lun + 1) & 0x1f;
1681                 break;
1682
1683         case 3: /* extended logical unit addressing method */
1684         default:
1685                 PRINT_ERROR_PR("Unimplemented LUN addressing method %u",
1686                             address_method);
1687                 break;
1688         }
1689
1690 out:
1691         TRACE_EXIT_RES((int)res);
1692         return res;
1693
1694 out_err:
1695         PRINT_ERROR_PR("%s", "Multi-level LUN unimplemented");
1696         goto out;
1697 }
1698
1699 /* Called under dev_lock and BH off */
1700 void scst_process_reset(struct scst_device *dev,
1701         struct scst_session *originator, struct scst_cmd *exclude_cmd,
1702         struct scst_mgmt_cmd *mcmd)
1703 {
1704         struct scst_tgt_dev *tgt_dev;
1705         struct scst_cmd *cmd, *tcmd;
1706         int wake = 0;
1707
1708         TRACE_ENTRY();
1709
1710         /* Clear RESERVE'ation, if necessary */
1711         if (dev->dev_reserved) {
1712                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1713                                     dev_tgt_dev_list_entry) 
1714                 {
1715                         TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
1716                                 "lun %d", tgt_dev->acg_dev->lun);
1717                         clear_bit(SCST_TGT_DEV_RESERVED,
1718                                   &tgt_dev->tgt_dev_flags);
1719                 }
1720                 dev->dev_reserved = 0;
1721                 /*
1722                  * There is no need to send RELEASE, since the device is going
1723                  * to be resetted
1724                  */
1725         }
1726
1727         dev->dev_double_ua_possible = 1;
1728         dev->dev_serialized = 1;
1729
1730         /* BH already off */
1731         spin_lock(&scst_temp_UA_lock);
1732         scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
1733                 SCST_LOAD_SENSE(scst_sense_reset_UA));
1734         __scst_process_UA(dev, exclude_cmd, scst_temp_UA, sizeof(scst_temp_UA),
1735                 1);
1736         spin_unlock(&scst_temp_UA_lock);
1737
1738         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list, 
1739                 dev_tgt_dev_list_entry) 
1740         {
1741                 struct scst_session *sess = tgt_dev->sess;
1742
1743                 local_bh_disable();
1744                 spin_lock_irq(&scst_list_lock);
1745
1746                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
1747                 list_for_each_entry(cmd, &sess->search_cmd_list, 
1748                                 search_cmd_list_entry) {
1749                         if (cmd == exclude_cmd)
1750                                 continue;
1751                         if ((cmd->tgt_dev == tgt_dev) ||
1752                             ((cmd->tgt_dev == NULL) && 
1753                              (cmd->lun == tgt_dev->acg_dev->lun))) {
1754                                 scst_abort_cmd(cmd, mcmd, 
1755                                         (tgt_dev->sess != originator), 0);
1756                         }
1757                 }
1758                 spin_unlock_irq(&scst_list_lock);
1759                 local_bh_enable();
1760         }
1761
1762         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
1763                                 blocked_cmd_list_entry) {
1764                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
1765                         list_del(&cmd->blocked_cmd_list_entry);
1766                         TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
1767                                 "to active cmd list", cmd);
1768                         spin_lock_irq(&scst_list_lock);
1769                         list_move_tail(&cmd->cmd_list_entry,
1770                                 &scst_active_cmd_list);
1771                         spin_unlock_irq(&scst_list_lock);
1772                         wake = 1;
1773                 }
1774         }
1775
1776         if (wake)
1777                 wake_up(&scst_list_waitQ);
1778
1779         TRACE_EXIT();
1780         return;
1781 }
1782
1783 int scst_set_pending_UA(struct scst_cmd *cmd)
1784 {
1785         int res = 0;
1786         struct scst_tgt_dev_UA *UA_entry;
1787
1788         TRACE_ENTRY();
1789
1790         TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
1791
1792         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
1793
1794         /* UA list could be cleared behind us, so retest */
1795         if (list_empty(&cmd->tgt_dev->UA_list)) {
1796                 TRACE_DBG("%s",
1797                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
1798                 res = -1;
1799                 goto out_unlock;
1800         }
1801
1802         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
1803                               UA_list_entry);
1804
1805         TRACE_DBG("next %p UA_entry %p",
1806               cmd->tgt_dev->UA_list.next, UA_entry);
1807
1808         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
1809                 sizeof(UA_entry->UA_sense_buffer));
1810
1811         cmd->ua_ignore = 1;
1812
1813         list_del(&UA_entry->UA_list_entry);
1814
1815         mempool_free(UA_entry, scst_ua_mempool);
1816
1817         if (list_empty(&cmd->tgt_dev->UA_list)) {
1818                 clear_bit(SCST_TGT_DEV_UA_PENDING,
1819                           &cmd->tgt_dev->tgt_dev_flags);
1820         }
1821
1822         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
1823
1824 out:
1825         TRACE_EXIT_RES(res);
1826         return res;
1827
1828 out_unlock:
1829         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
1830         goto out;
1831 }
1832
1833 /* Called under dev_lock, tgt_dev_lock and BH off */
1834 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
1835         const uint8_t *sense, int sense_len)
1836 {
1837         struct scst_tgt_dev_UA *UA_entry = NULL;
1838
1839         TRACE_ENTRY();
1840
1841         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
1842         if (UA_entry == NULL) {
1843                 PRINT_ERROR_PR("%s", "UNIT ATTENTION memory "
1844                      "allocation failed. The UNIT ATTENTION "
1845                      "on some sessions will be missed");
1846                 goto out;
1847         }
1848         memset(UA_entry, 0, sizeof(*UA_entry));
1849
1850         if (sense_len > sizeof(UA_entry->UA_sense_buffer))
1851                 sense_len = sizeof(UA_entry->UA_sense_buffer);
1852         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
1853         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
1854         smp_mb__after_set_bit();
1855         list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
1856
1857 out:
1858         TRACE_EXIT();
1859         return;
1860 }
1861
1862 /* Called under dev_lock and BH off */
1863 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
1864         const uint8_t *sense, int sense_len)
1865 {
1866         int skip_UA = 0;
1867         struct scst_tgt_dev_UA *UA_entry_tmp;
1868
1869         TRACE_ENTRY();
1870
1871         spin_lock(&tgt_dev->tgt_dev_lock);
1872
1873         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
1874                             UA_list_entry) 
1875         {
1876                 if (sense[12] == UA_entry_tmp->UA_sense_buffer[12]) {
1877                         skip_UA = 1;
1878                         break;
1879                 }
1880         }
1881
1882         if (skip_UA == 0)
1883                 scst_alloc_set_UA(tgt_dev, sense, sense_len);
1884
1885         spin_unlock(&tgt_dev->tgt_dev_lock);
1886
1887         TRACE_EXIT();
1888         return;
1889 }
1890
1891 /* Called under dev_lock and BH off */
1892 void __scst_process_UA(struct scst_device *dev,
1893         struct scst_cmd *exclude, const uint8_t *sense, int sense_len,
1894         int internal)
1895 {
1896         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
1897
1898         TRACE_ENTRY();
1899
1900         TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
1901
1902         if (exclude != NULL)
1903                 exclude_tgt_dev = exclude->tgt_dev;
1904
1905         /* Check for reset UA */
1906         if (!internal && (sense[12] == SCST_SENSE_ASC_UA_RESET)) {
1907                 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
1908                         exclude, NULL);
1909         }
1910
1911         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list, 
1912                                 dev_tgt_dev_list_entry) {
1913                 if (tgt_dev != exclude_tgt_dev)
1914                         scst_check_set_UA(tgt_dev, sense, sense_len);
1915         }
1916
1917         TRACE_EXIT();
1918         return;
1919 }
1920
1921 /* Called under tgt_dev_lock or when tgt_dev is unused */
1922 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
1923 {
1924         struct scst_tgt_dev_UA *UA_entry, *t;
1925
1926         TRACE_ENTRY();
1927
1928         list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
1929                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %d", 
1930                         tgt_dev->acg_dev->lun);
1931                 list_del(&UA_entry->UA_list_entry);
1932                 kfree(UA_entry);
1933         }
1934         INIT_LIST_HEAD(&tgt_dev->UA_list);
1935         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
1936
1937         TRACE_EXIT();
1938         return;
1939 }
1940
1941 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev,
1942         int expected_sn)
1943 {
1944         struct scst_cmd *cmd = NULL, *tcmd;
1945
1946         if (tgt_dev->def_cmd_count == 0)
1947                 goto out;
1948
1949         spin_lock_bh(&tgt_dev->sn_lock);
1950
1951 restart:
1952         list_for_each_entry(tcmd, &tgt_dev->deferred_cmd_list,
1953                                 sn_cmd_list_entry) {
1954                 if (tcmd->sn == expected_sn) {
1955                         TRACE(TRACE_SCSI_SERIALIZING,
1956                               "Deferred command sn %d found", tcmd->sn);
1957                         tgt_dev->def_cmd_count--;
1958                         list_del(&tcmd->sn_cmd_list_entry);
1959                         cmd = tcmd;
1960                         goto out_unlock;
1961                 }
1962         }
1963
1964         list_for_each_entry(tcmd, &tgt_dev->skipped_sn_list,
1965                                 sn_cmd_list_entry) {
1966                 if (tcmd->sn == expected_sn) {
1967                         /* 
1968                          * !! At this point any pointer in tcmd, except      !!
1969                          * !! sn_cmd_list_entry, could be already destroyed  !!
1970                          */
1971                         TRACE(TRACE_SCSI_SERIALIZING,
1972                               "cmd %p (tag %d) with skipped sn %d found", tcmd,
1973                               tcmd->tag, tcmd->sn);
1974                         tgt_dev->def_cmd_count--;
1975                         list_del(&tcmd->sn_cmd_list_entry);
1976                         if (test_bit(SCST_CMD_CAN_BE_DESTROYED, 
1977                                         &tcmd->cmd_flags)) {
1978                                 scst_destroy_put_cmd(tcmd);
1979                         } else {
1980                                 smp_mb__before_clear_bit();
1981                                 clear_bit(SCST_CMD_OUT_OF_SN, &tcmd->cmd_flags);
1982                         }
1983                         expected_sn = __scst_inc_expected_sn(tgt_dev);
1984                         goto restart;
1985                 }
1986         }
1987
1988 out_unlock:
1989         spin_unlock_bh(&tgt_dev->sn_lock);
1990
1991 out:
1992         return cmd;
1993 }
1994
1995 /* No locks */
1996 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
1997 {
1998         int res = 0;
1999         struct scst_device *dev = cmd->dev;
2000
2001         sBUG_ON(cmd->blocking);
2002
2003         atomic_inc(&dev->on_dev_count);
2004
2005 #ifdef STRICT_SERIALIZING
2006         spin_lock_bh(&dev->dev_lock);
2007         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
2008                 goto out_unlock;
2009         if (dev->block_count > 0) {
2010                 scst_dec_on_dev_cmd(cmd);
2011                 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or serializing"
2012                       "(tag %d, dev %p)", cmd, cmd->tag, dev);
2013                 list_add_tail(&cmd->blocked_cmd_list_entry,
2014                               &dev->blocked_cmd_list);
2015                 res = 1;
2016         } else {
2017                 __scst_block_dev(cmd->dev);
2018                 cmd->blocking = 1;
2019         }
2020         spin_unlock_bh(&dev->dev_lock);
2021         goto out;
2022 #else
2023 repeat:
2024         if (unlikely(dev->block_count > 0)) {
2025                 spin_lock_bh(&dev->dev_lock);
2026                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
2027                         goto out_unlock;
2028                 barrier(); /* to reread block_count */
2029                 if (dev->block_count > 0) {
2030                         scst_dec_on_dev_cmd(cmd);
2031                         TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
2032                                 "serializing (tag %d, dev %p)", cmd,
2033                                 cmd->tag, dev);
2034                         list_add_tail(&cmd->blocked_cmd_list_entry,
2035                                       &dev->blocked_cmd_list);
2036                         res = 1;
2037                         spin_unlock_bh(&dev->dev_lock);
2038                         goto out;
2039                 } else {
2040                         TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
2041                                 "continuing");
2042                 }
2043                 spin_unlock_bh(&dev->dev_lock);
2044         }
2045         if (unlikely(cmd->dev->dev_serialized)) {
2046                 spin_lock_bh(&dev->dev_lock);
2047                 barrier(); /* to reread block_count */
2048                 if (cmd->dev->block_count == 0) {
2049                         TRACE_MGMT_DBG("cmd %p (tag %d), blocking further "
2050                                 "cmds due to serializing (dev %p)", cmd,
2051                                 cmd->tag, dev);
2052                         __scst_block_dev(cmd->dev);
2053                         cmd->blocking = 1;
2054                 } else {
2055                         spin_unlock_bh(&dev->dev_lock);
2056                         goto repeat;
2057                 }
2058                 spin_unlock_bh(&dev->dev_lock);
2059         }
2060 #endif
2061
2062 out:
2063         return res;
2064
2065 out_unlock:
2066         spin_unlock_bh(&dev->dev_lock);
2067         goto out;
2068 }
2069
2070 /* Called under dev_lock */
2071 void scst_unblock_cmds(struct scst_device *dev)
2072 {
2073 #ifdef STRICT_SERIALIZING
2074         struct scst_cmd *cmd, *t;
2075         int found = 0;
2076
2077         TRACE_ENTRY();
2078
2079         list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
2080                                  blocked_cmd_list_entry) {
2081                 unsigned long flags;
2082                 int brk = 0;
2083                 /* 
2084                  * Since only one cmd per time is being executed, expected_sn
2085                  * can't change behind us, if the corresponding cmd is in
2086                  * blocked_cmd_list, but we could be called before
2087                  * __scst_inc_expected_sn().
2088                  */
2089                 if (likely(!cmd->internal) && likely(!cmd->retry)) {
2090                         int expected_sn;
2091                         if (cmd->tgt_dev == NULL)
2092                                 sBUG();
2093                         expected_sn = cmd->tgt_dev->expected_sn;
2094                         if (cmd->sn == expected_sn)
2095                                 brk = 1;
2096                         else if (cmd->sn != (expected_sn+1))
2097                                 continue;
2098                 }
2099                         
2100                 list_del(&cmd->blocked_cmd_list_entry);
2101                 TRACE_MGMT_DBG("Moving cmd %p to active cmd list", cmd);
2102                 spin_lock_irqsave(&scst_list_lock, flags);
2103                 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2104                 spin_unlock_irqrestore(&scst_list_lock, flags);
2105                 found = 1;
2106                 if (brk)
2107                         break;
2108         }
2109         if (found)
2110                 wake_up(&scst_list_waitQ);
2111 #else /* STRICT_SERIALIZING */
2112         struct scst_cmd *cmd, *tcmd;
2113         unsigned long flags;
2114
2115         TRACE_ENTRY();
2116
2117         spin_lock_irqsave(&scst_list_lock, flags);
2118         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2119                                  blocked_cmd_list_entry) {
2120                 list_del(&cmd->blocked_cmd_list_entry);
2121                 TRACE_MGMT_DBG("Moving blocked cmd %p to active cmd list", cmd);
2122                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2123                 wake_up(&scst_list_waitQ);
2124         }
2125         spin_unlock_irqrestore(&scst_list_lock, flags);
2126 #endif /* STRICT_SERIALIZING */
2127
2128         TRACE_EXIT();
2129         return;
2130 }
2131
2132 static struct scst_cmd *scst_inc_expected_sn(
2133         struct scst_tgt_dev *tgt_dev, struct scst_cmd *out_of_sn_cmd)
2134 {
2135         struct scst_cmd *res = NULL;
2136
2137         if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
2138                 __scst_inc_expected_sn(tgt_dev);
2139         } else {
2140                 spin_lock_bh(&tgt_dev->sn_lock);
2141                 tgt_dev->def_cmd_count++;
2142                 set_bit(SCST_CMD_OUT_OF_SN, &out_of_sn_cmd->cmd_flags);
2143                 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
2144                               &tgt_dev->skipped_sn_list);
2145                 TRACE(TRACE_SCSI_SERIALIZING, "out_of_sn_cmd %p with sn %d "
2146                         "added to skipped_sn_list (expected_sn %d)",
2147                         out_of_sn_cmd, out_of_sn_cmd->sn, tgt_dev->expected_sn);
2148                 spin_unlock_bh(&tgt_dev->sn_lock);
2149                 smp_mb(); /* just in case, we need new value of tgt_dev->expected_sn */
2150         }
2151         res = scst_check_deferred_commands(tgt_dev, tgt_dev->expected_sn);
2152         return res;
2153 }
2154
2155 void scst_inc_expected_sn_unblock(struct scst_tgt_dev *tgt_dev,
2156         struct scst_cmd *out_of_sn_cmd, int locked)
2157 {
2158         struct scst_cmd *cmd;
2159
2160         TRACE_ENTRY();
2161
2162         if (out_of_sn_cmd->no_sn) {
2163                 TRACE(TRACE_SCSI_SERIALIZING, "cmd %p with no_sn", out_of_sn_cmd);
2164                 goto out;
2165         }
2166
2167         cmd = scst_inc_expected_sn(tgt_dev, out_of_sn_cmd);
2168         if (cmd != NULL) {
2169                 unsigned long flags = 0;
2170                 if (!locked)
2171                         spin_lock_irqsave(&scst_list_lock, flags);
2172                 TRACE(TRACE_SCSI_SERIALIZING, "cmd %p with sn %d "
2173                         "moved to active cmd list", cmd, cmd->sn);
2174                 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2175                 if (!locked)
2176                         spin_unlock_irqrestore(&scst_list_lock, flags);
2177                 if (!out_of_sn_cmd->processible_env)
2178                         wake_up(&scst_list_waitQ);
2179         }
2180
2181 out:
2182         TRACE_EXIT();
2183         return;
2184 }
2185
2186 void __init scst_scsi_op_list_init(void)
2187 {
2188         int i;
2189         uint8_t op = 0xff;
2190
2191         TRACE_ENTRY();
2192
2193         for (i = 0; i < 256; i++)
2194                 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
2195
2196         for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
2197                 if (scst_scsi_op_table[i].ops != op) {
2198                         op = scst_scsi_op_table[i].ops;
2199                         scst_scsi_op_list[op] = i;
2200                 }
2201         }
2202
2203         TRACE_EXIT();
2204         return;
2205 }
2206
2207
2208 #ifdef DEBUG
2209 /* Original taken from the XFS code */
2210 unsigned long scst_random(void)
2211 {
2212         static int Inited;
2213         static unsigned long RandomValue;
2214         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2215         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
2216         register long rv;
2217         register long lo;
2218         register long hi;
2219         unsigned long flags;
2220
2221         spin_lock_irqsave(&lock, flags);
2222         if (!Inited) {
2223                 RandomValue = jiffies;
2224                 Inited = 1;
2225         }
2226         rv = RandomValue;
2227         hi = rv / 127773;
2228         lo = rv % 127773;
2229         rv = 16807 * lo - 2836 * hi;
2230         if (rv <= 0) rv += 2147483647;
2231         RandomValue = rv;
2232         spin_unlock_irqrestore(&lock, flags);
2233         return rv;
2234 }
2235 #endif
2236
2237 #ifdef DEBUG_TM
2238
2239 #define TM_DBG_STATE_ABORT              0
2240 #define TM_DBG_STATE_RESET              1
2241 #define TM_DBG_STATE_OFFLINE            2
2242
2243 #define INIT_TM_DBG_STATE               TM_DBG_STATE_ABORT
2244
2245 static void tm_dbg_timer_fn(unsigned long arg);
2246
2247 /* All serialized by scst_list_lock */
2248 static int tm_dbg_release;
2249 static int tm_dbg_blocked;
2250 static LIST_HEAD(tm_dbg_delayed_cmd_list);
2251 static int tm_dbg_delayed_cmds_count;
2252 static int tm_dbg_passed_cmds_count;
2253 static int tm_dbg_state;
2254 static int tm_dbg_on_state_passes;
2255 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
2256
2257 static const int tm_dbg_on_state_num_passes[] = { 10, 1, 0x7ffffff };
2258
2259 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
2260         struct scst_acg_dev *acg_dev)
2261 {
2262         if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
2263                 /* Do TM debugging only for LUN 0 */
2264                 tm_dbg_state = INIT_TM_DBG_STATE;
2265                 tm_dbg_on_state_passes =
2266                         tm_dbg_on_state_num_passes[tm_dbg_state];
2267                 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
2268                 PRINT_INFO("LUN 0 connected from initiator %s is under "
2269                         "TM debugging", tgt_dev->sess->tgt->tgtt->name);
2270         }
2271 }
2272
2273 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
2274 {
2275         if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags))
2276                 del_timer_sync(&tm_dbg_timer);
2277 }
2278
2279 static void tm_dbg_timer_fn(unsigned long arg)
2280 {
2281         TRACE_MGMT_DBG("%s: delayed cmd timer expired", __func__);
2282         tm_dbg_release = 1;
2283         smp_mb();
2284         wake_up_all(&scst_list_waitQ);
2285 }
2286
2287 /* Called under scst_list_lock */
2288 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
2289 {
2290         switch(tm_dbg_state) {
2291         case TM_DBG_STATE_ABORT:
2292                 if (tm_dbg_delayed_cmds_count == 0) {
2293                         unsigned long d = 58*HZ + (scst_random() % (4*HZ));
2294                         TRACE_MGMT_DBG("%s: delaying timed cmd %p (tag %d) "
2295                                 "for %ld.%ld seconds (%ld HZ)", __func__, cmd, cmd->tag,
2296                                 d/HZ, (d%HZ)*100/HZ, d);
2297                         mod_timer(&tm_dbg_timer, jiffies + d);
2298 #if 0
2299                         tm_dbg_blocked = 1;
2300 #endif
2301                 } else {
2302                         TRACE_MGMT_DBG("%s: delaying another timed cmd %p "
2303                                 "(tag %d), delayed_cmds_count=%d", __func__, cmd,
2304                                 cmd->tag, tm_dbg_delayed_cmds_count);
2305                         if (tm_dbg_delayed_cmds_count == 2)
2306                                 tm_dbg_blocked = 0;
2307                 }
2308                 break;
2309
2310         case TM_DBG_STATE_RESET:
2311         case TM_DBG_STATE_OFFLINE:
2312                 TRACE_MGMT_DBG("%s: delaying cmd %p "
2313                         "(tag %d), delayed_cmds_count=%d", __func__, cmd,
2314                         cmd->tag, tm_dbg_delayed_cmds_count);
2315                 tm_dbg_blocked = 1;
2316                 break;
2317
2318         default:
2319                 sBUG();
2320         }
2321         list_move_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
2322         cmd->tm_dbg_delayed = 1;
2323         tm_dbg_delayed_cmds_count++;
2324         return;
2325 }
2326
2327 /* Called under scst_list_lock */
2328 void tm_dbg_check_released_cmds(void)
2329 {
2330         if (tm_dbg_release) {
2331                 struct scst_cmd *cmd, *tc;
2332                 list_for_each_entry_safe_reverse(cmd, tc, 
2333                                 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
2334                         TRACE_MGMT_DBG("%s: Releasing timed cmd %p "
2335                                 "(tag %d), delayed_cmds_count=%d", __func__,
2336                                 cmd, cmd->tag, tm_dbg_delayed_cmds_count);
2337                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2338                 }
2339                 tm_dbg_release = 0;
2340         }
2341 }
2342
2343 static void tm_dbg_change_state(void)
2344 {
2345         tm_dbg_blocked = 0;
2346         if (--tm_dbg_on_state_passes == 0) {
2347                 switch(tm_dbg_state) {
2348                 case TM_DBG_STATE_ABORT:
2349                         TRACE_MGMT_DBG("%s", "Changing "
2350                             "tm_dbg_state to RESET");
2351                         tm_dbg_state =
2352                                 TM_DBG_STATE_RESET;
2353                         tm_dbg_blocked = 0;
2354                         break;
2355                 case TM_DBG_STATE_RESET:
2356                 case TM_DBG_STATE_OFFLINE:
2357                         if (TM_DBG_GO_OFFLINE) {
2358                             TRACE_MGMT_DBG("%s", "Changing "
2359                                     "tm_dbg_state to OFFLINE");
2360                             tm_dbg_state =
2361                                 TM_DBG_STATE_OFFLINE;
2362                         } else {
2363                             TRACE_MGMT_DBG("%s", "Changing "
2364                                     "tm_dbg_state to ABORT");
2365                             tm_dbg_state =
2366                                 TM_DBG_STATE_ABORT;
2367                         }
2368                         break;
2369                 default:
2370                         sBUG();
2371                 }
2372                 tm_dbg_on_state_passes =
2373                     tm_dbg_on_state_num_passes[tm_dbg_state];
2374         }
2375                 
2376         TRACE_MGMT_DBG("%s", "Deleting timer");
2377         del_timer(&tm_dbg_timer);
2378 }
2379
2380 /* Called under scst_list_lock */
2381 int tm_dbg_check_cmd(struct scst_cmd *cmd)
2382 {
2383         int res = 0;
2384
2385         if (cmd->tm_dbg_immut)
2386                 goto out;
2387
2388         if (cmd->tm_dbg_delayed) {
2389                 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %d), "
2390                         "delayed_cmds_count=%d", cmd, cmd->tag,
2391                         tm_dbg_delayed_cmds_count);
2392
2393                 cmd->tm_dbg_immut = 1;
2394                 tm_dbg_delayed_cmds_count--;
2395                 if ((tm_dbg_delayed_cmds_count == 0) &&
2396                     (tm_dbg_state == TM_DBG_STATE_ABORT))
2397                         tm_dbg_change_state();
2398
2399         } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
2400                                         &cmd->tgt_dev->tgt_dev_flags)) {
2401                 /* Delay 50th command */
2402                 if (tm_dbg_blocked || (++tm_dbg_passed_cmds_count % 50) == 0) {
2403                         tm_dbg_delay_cmd(cmd);
2404                         res = 1;
2405                 } else
2406                         cmd->tm_dbg_immut = 1;
2407         }
2408
2409 out:
2410         return res;
2411 }
2412
2413 /* Called under scst_list_lock */
2414 void tm_dbg_release_cmd(struct scst_cmd *cmd)
2415 {
2416         struct scst_cmd *c;
2417         list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
2418                                 cmd_list_entry) {
2419                 if (c == cmd) {
2420                         TRACE_MGMT_DBG("Abort request for "
2421                                 "delayed cmd %p (tag=%d), moving it to "
2422                                 "active cmd list (delayed_cmds_count=%d)",
2423                                 c, c->tag, tm_dbg_delayed_cmds_count);
2424                         list_move(&c->cmd_list_entry, &scst_active_cmd_list);
2425                         wake_up_all(&scst_list_waitQ);
2426                         break;
2427                 }
2428         }
2429 }
2430
2431 /* Called under scst_list_lock */
2432 void tm_dbg_task_mgmt(const char *fn)
2433 {
2434         if (tm_dbg_state != TM_DBG_STATE_OFFLINE) {
2435                 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
2436                         tm_dbg_delayed_cmds_count);
2437                 tm_dbg_change_state();
2438                 tm_dbg_release = 1;
2439                 smp_mb();
2440                 wake_up_all(&scst_list_waitQ);
2441         } else {
2442                 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
2443         }
2444 }
2445
2446 int tm_dbg_is_release(void)
2447 {
2448         return tm_dbg_release;
2449 }
2450 #endif /* DEBUG_TM */