e15a15f9a186384cc173be1fb71cfb333cef5f0a
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2009 ID7 Ltd.
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <asm/kmap_types.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #include "scst_cdbprobe.h"
37
38 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
39 struct scsi_io_context {
40         unsigned int full_cdb_used:1;
41         void *data;
42         void (*done)(void *data, char *sense, int result, int resid);
43         char sense[SCST_SENSE_BUFFERSIZE];
44         unsigned char full_cdb[0];
45 };
46 static struct kmem_cache *scsi_io_context_cache;
47 #endif
48
49 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
50 static void scst_check_internal_sense(struct scst_device *dev, int result,
51         uint8_t *sense, int sense_len);
52 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
53         int flags);
54 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
55         const uint8_t *sense, int sense_len, int flags);
56 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
57         const uint8_t *sense, int sense_len, int flags);
58 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
59 static void scst_release_space(struct scst_cmd *cmd);
60 static void scst_sess_free_tgt_devs(struct scst_session *sess);
61 static void scst_unblock_cmds(struct scst_device *dev);
62 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
63 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
64         struct scst_acg_dev *acg_dev);
65
66 #ifdef CONFIG_SCST_DEBUG_TM
67 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
68         struct scst_acg_dev *acg_dev);
69 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
70 #else
71 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
72         struct scst_acg_dev *acg_dev) {}
73 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
74 #endif /* CONFIG_SCST_DEBUG_TM */
75
76 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
77 {
78         int res = 0;
79         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
80
81         TRACE_ENTRY();
82
83         if (cmd->sense != NULL)
84                 goto memzero;
85
86         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
87         if (cmd->sense == NULL) {
88                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
89                         "The sense data will be lost!!", cmd->cdb[0]);
90                 res = -ENOMEM;
91                 goto out;
92         }
93
94 memzero:
95         cmd->sense_bufflen = SCST_SENSE_BUFFERSIZE;
96         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
97
98 out:
99         TRACE_EXIT_RES(res);
100         return res;
101 }
102 EXPORT_SYMBOL(scst_alloc_sense);
103
104 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
105         const uint8_t *sense, unsigned int len)
106 {
107         int res;
108
109         TRACE_ENTRY();
110
111         res = scst_alloc_sense(cmd, atomic);
112         if (res != 0) {
113                 PRINT_BUFFER("Lost sense", sense, len);
114                 goto out;
115         }
116
117         memcpy(cmd->sense, sense, min((int)len, (int)cmd->sense_bufflen));
118         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
119
120 out:
121         TRACE_EXIT_RES(res);
122         return res;
123 }
124 EXPORT_SYMBOL(scst_alloc_set_sense);
125
126 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
127 {
128         TRACE_ENTRY();
129
130         cmd->status = status;
131         cmd->host_status = DID_OK;
132
133         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
134         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
135
136         cmd->data_direction = SCST_DATA_NONE;
137         cmd->resp_data_len = 0;
138         cmd->is_send_status = 1;
139
140         cmd->completed = 1;
141
142         TRACE_EXIT();
143         return;
144 }
145 EXPORT_SYMBOL(scst_set_cmd_error_status);
146
147 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
148 {
149         int rc;
150
151         TRACE_ENTRY();
152
153         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
154
155         rc = scst_alloc_sense(cmd, 1);
156         if (rc != 0) {
157                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
158                         key, asc, ascq);
159                 goto out;
160         }
161
162         scst_set_sense(cmd->sense, cmd->sense_bufflen,
163                 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
164         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
165
166 out:
167         TRACE_EXIT();
168         return;
169 }
170 EXPORT_SYMBOL(scst_set_cmd_error);
171
172 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
173         int key, int asc, int ascq)
174 {
175         sBUG_ON(len == 0);
176
177         memset(buffer, 0, len);
178
179         if (d_sense) {
180                 /* Descriptor format */
181                 if (len < 4) {
182                         PRINT_ERROR("Length %d of sense buffer too small to "
183                                 "fit sense %x:%x:%x", len, key, asc, ascq);
184                 }
185
186                 buffer[0] = 0x72;               /* Response Code        */
187                 if (len > 1)
188                         buffer[1] = key;        /* Sense Key            */
189                 if (len > 2)
190                         buffer[2] = asc;        /* ASC                  */
191                 if (len > 3)
192                         buffer[3] = ascq;       /* ASCQ                 */
193         } else {
194                 /* Fixed format */
195                 if (len < 14) {
196                         PRINT_ERROR("Length %d of sense buffer too small to "
197                                 "fit sense %x:%x:%x", len, key, asc, ascq);
198                 }
199
200                 buffer[0] = 0x70;               /* Response Code        */
201                 if (len > 2)
202                         buffer[2] = key;        /* Sense Key            */
203                 if (len > 7)
204                         buffer[7] = 0x0a;       /* Additional Sense Length */
205                 if (len > 12)
206                         buffer[12] = asc;       /* ASC                  */
207                 if (len > 13)
208                         buffer[13] = ascq;      /* ASCQ                 */
209         }
210
211         TRACE_BUFFER("Sense set", buffer, len);
212         return;
213 }
214 EXPORT_SYMBOL(scst_set_sense);
215
216 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
217         int key, int asc, int ascq)
218 {
219         bool res = false;
220
221         /* Response Code */
222         if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
223                 /* Fixed format */
224
225                 if (len < 14) {
226                         PRINT_ERROR("Sense too small to analyze (%d, "
227                                 "type fixed)", len);
228                         goto out;
229                 }
230
231                 /* Sense Key */
232                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
233                         goto out;
234
235                 /* ASC */
236                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
237                         goto out;
238
239                 /* ASCQ */
240                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
241                         goto out;
242         } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
243                 /* Descriptor format */
244
245                 if (len < 4) {
246                         PRINT_ERROR("Sense too small to analyze (%d, "
247                                 "type descriptor)", len);
248                         goto out;
249                 }
250
251                 /* Sense Key */
252                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
253                         goto out;
254
255                 /* ASC */
256                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
257                         goto out;
258
259                 /* ASCQ */
260                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
261                         goto out;
262         } else
263                 goto out;
264
265         res = true;
266
267 out:
268         TRACE_EXIT_RES((int)res);
269         return res;
270 }
271 EXPORT_SYMBOL(scst_analyze_sense);
272
273 bool scst_is_ua_sense(const uint8_t *sense, int len)
274 {
275         if (SCST_SENSE_VALID(sense))
276                 return scst_analyze_sense(sense, len,
277                         SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0);
278         else
279                 return false;
280 }
281 EXPORT_SYMBOL(scst_is_ua_sense);
282
283 bool scst_is_ua_global(const uint8_t *sense, int len)
284 {
285         bool res;
286
287         /* Changing it don't forget to change scst_requeue_ua() as well!! */
288
289         if (scst_analyze_sense(sense, len, SCST_SENSE_ALL_VALID,
290                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed)))
291                 res = true;
292         else
293                 res = false;
294
295         return res;
296 }
297
298 void scst_check_convert_sense(struct scst_cmd *cmd)
299 {
300         bool d_sense;
301
302         TRACE_ENTRY();
303
304         if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
305                 goto out;
306
307         d_sense = scst_get_cmd_dev_d_sense(cmd);
308         if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
309                 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
310                         cmd);
311                 if (cmd->sense_bufflen < 14) {
312                         PRINT_ERROR("Sense too small to convert (%d, "
313                                 "type fixed)", cmd->sense_bufflen);
314                         goto out;
315                 }
316                 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
317                         cmd->sense[2], cmd->sense[12], cmd->sense[13]);
318         } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
319                                 (cmd->sense[0] == 0x73))) {
320                 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
321                         cmd);
322                 if (cmd->sense_bufflen < 4) {
323                         PRINT_ERROR("Sense too small to convert (%d, "
324                                 "type descryptor)", cmd->sense_bufflen);
325                         goto out;
326                 }
327                 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
328                         cmd->sense[1], cmd->sense[2], cmd->sense[3]);
329         }
330
331 out:
332         TRACE_EXIT();
333         return;
334 }
335 EXPORT_SYMBOL(scst_check_convert_sense);
336
337 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
338         unsigned int len)
339 {
340         TRACE_ENTRY();
341
342         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
343         scst_alloc_set_sense(cmd, 1, sense, len);
344
345         TRACE_EXIT();
346         return;
347 }
348
349 void scst_set_busy(struct scst_cmd *cmd)
350 {
351         int c = atomic_read(&cmd->sess->sess_cmd_count);
352
353         TRACE_ENTRY();
354
355         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
356                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
357                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
358                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
359                         cmd->sess->initiator_name, c,
360                         cmd->queue_type, cmd->sess->init_phase);
361         } else {
362                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
363                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
364                         "initiator %s (cmds count %d, queue_type %x, "
365                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
366                         cmd->queue_type, cmd->sess->init_phase);
367         }
368
369         TRACE_EXIT();
370         return;
371 }
372 EXPORT_SYMBOL(scst_set_busy);
373
374 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
375 {
376         int i;
377
378         TRACE_ENTRY();
379
380         TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
381                 asc, ascq);
382
383         /* Protect sess_tgt_dev_list_hash */
384         mutex_lock(&scst_mutex);
385
386         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
387                 struct list_head *sess_tgt_dev_list_head =
388                         &sess->sess_tgt_dev_list_hash[i];
389                 struct scst_tgt_dev *tgt_dev;
390
391                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
392                                 sess_tgt_dev_list_entry) {
393                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
394                         if (!list_empty(&tgt_dev->UA_list)) {
395                                 struct scst_tgt_dev_UA *ua;
396
397                                 ua = list_entry(tgt_dev->UA_list.next,
398                                         typeof(*ua), UA_list_entry);
399                                 if (scst_analyze_sense(ua->UA_sense_buffer,
400                                                 sizeof(ua->UA_sense_buffer),
401                                                 SCST_SENSE_ALL_VALID,
402                                                 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
403                                         scst_set_sense(ua->UA_sense_buffer,
404                                                 sizeof(ua->UA_sense_buffer),
405                                                 tgt_dev->dev->d_sense,
406                                                 key, asc, ascq);
407                                 } else
408                                         PRINT_ERROR("%s",
409                                                 "The first UA isn't RESET UA");
410                         } else
411                                 PRINT_ERROR("%s", "There's no RESET UA to "
412                                         "replace");
413                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
414                 }
415         }
416
417         mutex_unlock(&scst_mutex);
418
419         TRACE_EXIT();
420         return;
421 }
422 EXPORT_SYMBOL(scst_set_initial_UA);
423
424 static struct scst_aen *scst_alloc_aen(struct scst_session *sess,
425         uint64_t unpacked_lun)
426 {
427         struct scst_aen *aen;
428
429         TRACE_ENTRY();
430
431         aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
432         if (aen == NULL) {
433                 PRINT_ERROR("AEN memory allocation failed. Corresponding "
434                         "event notification will not be performed (initiator "
435                         "%s)", sess->initiator_name);
436                 goto out;
437         }
438         memset(aen, 0, sizeof(*aen));
439
440         aen->sess = sess;
441         scst_sess_get(sess);
442
443         aen->lun = scst_pack_lun(unpacked_lun);
444
445 out:
446         TRACE_EXIT_HRES((unsigned long)aen);
447         return aen;
448 };
449
450 static void scst_free_aen(struct scst_aen *aen)
451 {
452         TRACE_ENTRY();
453
454         scst_sess_put(aen->sess);
455         mempool_free(aen, scst_aen_mempool);
456
457         TRACE_EXIT();
458         return;
459 };
460
461 /* Must be called unded scst_mutex */
462 void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
463         int key, int asc, int ascq)
464 {
465         struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
466         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
467
468         TRACE_ENTRY();
469
470         if (tgtt->report_aen != NULL) {
471                 struct scst_aen *aen;
472                 int rc;
473
474                 aen = scst_alloc_aen(tgt_dev->sess, tgt_dev->lun);
475                 if (aen == NULL)
476                         goto queue_ua;
477
478                 aen->event_fn = SCST_AEN_SCSI;
479                 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
480                 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
481                         tgt_dev->dev->d_sense, key, asc, ascq);
482
483                 TRACE_DBG("Calling target's %s report_aen(%p)",
484                         tgtt->name, aen);
485                 rc = tgtt->report_aen(aen);
486                 TRACE_DBG("Target's %s report_aen(%p) returned %d",
487                         tgtt->name, aen, rc);
488                 if (rc == SCST_AEN_RES_SUCCESS)
489                         goto out;
490
491                 scst_free_aen(aen);
492         }
493
494 queue_ua:
495         TRACE_MGMT_DBG("AEN not supported, queuing plain UA (tgt_dev %p)",
496                 tgt_dev);
497         scst_set_sense(sense_buffer, sizeof(sense_buffer),
498                 tgt_dev->dev->d_sense, key, asc, ascq);
499         scst_check_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
500
501 out:
502         TRACE_EXIT();
503         return;
504 }
505
506 /* No locks */
507 void scst_capacity_data_changed(struct scst_device *dev)
508 {
509         struct scst_tgt_dev *tgt_dev;
510
511         TRACE_ENTRY();
512
513         if (dev->type != TYPE_DISK) {
514                 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
515                         "CHANGED UA", dev->type);
516                 goto out;
517         }
518
519         TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
520
521         mutex_lock(&scst_mutex);
522
523         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
524                             dev_tgt_dev_list_entry) {
525                 scst_gen_aen_or_ua(tgt_dev,
526                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
527         }
528
529         mutex_unlock(&scst_mutex);
530
531 out:
532         TRACE_EXIT();
533         return;
534 }
535 EXPORT_SYMBOL(scst_capacity_data_changed);
536
537 static inline bool scst_is_report_luns_changed_type(int type)
538 {
539         switch (type) {
540         case TYPE_DISK:
541         case TYPE_TAPE:
542         case TYPE_PRINTER:
543         case TYPE_PROCESSOR:
544         case TYPE_WORM:
545         case TYPE_ROM:
546         case TYPE_SCANNER:
547         case TYPE_MOD:
548         case TYPE_MEDIUM_CHANGER:
549         case TYPE_RAID:
550         case TYPE_ENCLOSURE:
551                 return true;
552         default:
553                 return false;
554         }
555 }
556
557 /* scst_mutex supposed to be held */
558 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
559                                               int flags)
560 {
561         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
562         struct list_head *shead;
563         struct scst_tgt_dev *tgt_dev;
564         int i;
565
566         TRACE_ENTRY();
567
568         TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
569                 "(sess %p)", sess);
570
571         local_bh_disable();
572
573         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
574                 shead = &sess->sess_tgt_dev_list_hash[i];
575
576                 list_for_each_entry(tgt_dev, shead,
577                                 sess_tgt_dev_list_entry) {
578                         /* Lockdep triggers here a false positive.. */
579                         spin_lock(&tgt_dev->tgt_dev_lock);
580                 }
581         }
582
583         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
584                 shead = &sess->sess_tgt_dev_list_hash[i];
585
586                 list_for_each_entry(tgt_dev, shead,
587                                 sess_tgt_dev_list_entry) {
588                         if (!scst_is_report_luns_changed_type(
589                                         tgt_dev->dev->type))
590                                 continue;
591
592                         scst_set_sense(sense_buffer, sizeof(sense_buffer),
593                                 tgt_dev->dev->d_sense,
594                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
595
596                         __scst_check_set_UA(tgt_dev, sense_buffer,
597                                 sizeof(sense_buffer),
598                                 flags | SCST_SET_UA_FLAG_GLOBAL);
599                 }
600         }
601
602         for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
603                 shead = &sess->sess_tgt_dev_list_hash[i];
604
605                 list_for_each_entry_reverse(tgt_dev,
606                                 shead, sess_tgt_dev_list_entry) {
607                         spin_unlock(&tgt_dev->tgt_dev_lock);
608                 }
609         }
610
611         local_bh_enable();
612
613         TRACE_EXIT();
614         return;
615 }
616
617 /* The activity supposed to be suspended and scst_mutex held */
618 static void scst_report_luns_changed_sess(struct scst_session *sess)
619 {
620         int i;
621         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
622         int d_sense = 0;
623         uint64_t lun = 0;
624
625         TRACE_ENTRY();
626
627         TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
628
629         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
630                 struct list_head *shead;
631                 struct scst_tgt_dev *tgt_dev;
632
633                 shead = &sess->sess_tgt_dev_list_hash[i];
634
635                 list_for_each_entry(tgt_dev, shead,
636                                 sess_tgt_dev_list_entry) {
637                         if (scst_is_report_luns_changed_type(
638                                         tgt_dev->dev->type)) {
639                                 lun = tgt_dev->lun;
640                                 d_sense = tgt_dev->dev->d_sense;
641                                 goto found;
642                         }
643                 }
644         }
645
646 found:
647         if (tgtt->report_aen != NULL) {
648                 struct scst_aen *aen;
649                 int rc;
650
651                 aen = scst_alloc_aen(sess, lun);
652                 if (aen == NULL)
653                         goto queue_ua;
654
655                 aen->event_fn = SCST_AEN_SCSI;
656                 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
657                 scst_set_sense(aen->aen_sense, aen->aen_sense_len, d_sense,
658                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
659
660                 TRACE_DBG("Calling target's %s report_aen(%p)",
661                         tgtt->name, aen);
662                 rc = tgtt->report_aen(aen);
663                 TRACE_DBG("Target's %s report_aen(%p) returned %d",
664                         tgtt->name, aen, rc);
665                 if (rc == SCST_AEN_RES_SUCCESS)
666                         goto out;
667
668                 scst_free_aen(aen);
669         }
670
671 queue_ua:
672         scst_queue_report_luns_changed_UA(sess, 0);
673
674 out:
675         TRACE_EXIT();
676         return;
677 }
678
679 /* The activity supposed to be suspended and scst_mutex held */
680 void scst_report_luns_changed(struct scst_acg *acg)
681 {
682         struct scst_session *sess;
683
684         TRACE_ENTRY();
685
686         TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
687
688         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
689                 scst_report_luns_changed_sess(sess);
690         }
691
692         TRACE_EXIT();
693         return;
694 }
695
696 void scst_aen_done(struct scst_aen *aen)
697 {
698         TRACE_ENTRY();
699
700         TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
701                 aen->event_fn, aen->sess->initiator_name);
702
703         if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
704                 goto out_free;
705
706         if (aen->event_fn != SCST_AEN_SCSI)
707                 goto out_free;
708
709         TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
710                 aen->sess->initiator_name);
711
712         if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
713                         SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
714                                 scst_sense_reported_luns_data_changed))) {
715                 mutex_lock(&scst_mutex);
716                 scst_queue_report_luns_changed_UA(aen->sess,
717                         SCST_SET_UA_FLAG_AT_HEAD);
718                 mutex_unlock(&scst_mutex);
719         } else {
720                 struct list_head *shead;
721                 struct scst_tgt_dev *tgt_dev;
722                 uint64_t lun;
723
724                 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
725
726                 mutex_lock(&scst_mutex);
727
728                 /* tgt_dev might get dead, so we need to reseek it */
729                 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
730                 list_for_each_entry(tgt_dev, shead,
731                                 sess_tgt_dev_list_entry) {
732                         if (tgt_dev->lun == lun) {
733                                 TRACE_MGMT_DBG("Requeuing failed AEN UA for "
734                                         "tgt_dev %p", tgt_dev);
735                                 scst_check_set_UA(tgt_dev, aen->aen_sense,
736                                         aen->aen_sense_len,
737                                         SCST_SET_UA_FLAG_AT_HEAD);
738                                 break;
739                         }
740                 }
741
742                 mutex_unlock(&scst_mutex);
743         }
744
745 out_free:
746         scst_free_aen(aen);
747
748         TRACE_EXIT();
749         return;
750 }
751 EXPORT_SYMBOL(scst_aen_done);
752
753 void scst_requeue_ua(struct scst_cmd *cmd)
754 {
755         TRACE_ENTRY();
756
757         if (scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
758                         SCST_SENSE_ALL_VALID,
759                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
760                 TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
761                         "for delivery failed cmd %p", cmd);
762                 mutex_lock(&scst_mutex);
763                 scst_queue_report_luns_changed_UA(cmd->sess,
764                         SCST_SET_UA_FLAG_AT_HEAD);
765                 mutex_unlock(&scst_mutex);
766         } else {
767                 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
768                 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
769                         cmd->sense_bufflen, SCST_SET_UA_FLAG_AT_HEAD);
770         }
771
772         TRACE_EXIT();
773         return;
774 }
775
776 /* The activity supposed to be suspended and scst_mutex held */
777 static void scst_check_reassign_sess(struct scst_session *sess)
778 {
779         struct scst_acg *acg, *old_acg;
780         struct scst_acg_dev *acg_dev;
781         int i;
782         struct list_head *shead;
783         struct scst_tgt_dev *tgt_dev;
784         bool luns_changed = false;
785         bool add_failed, something_freed, not_needed_freed = false;
786
787         TRACE_ENTRY();
788
789         TRACE_MGMT_DBG("Checking reassignment for sess %p (initiator %s)",
790                 sess, sess->initiator_name);
791
792         acg = scst_find_acg(sess);
793         if (acg == sess->acg) {
794                 TRACE_MGMT_DBG("No reassignment for sess %p", sess);
795                 goto out;
796         }
797
798         TRACE_MGMT_DBG("sess %p will be reassigned from acg %s to acg %s",
799                 sess, sess->acg->acg_name, acg->acg_name);
800
801         old_acg = sess->acg;
802         sess->acg = NULL; /* to catch implicit dependencies earlier */
803
804 retry_add:
805         add_failed = false;
806         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
807                 unsigned int inq_changed_ua_needed = 0;
808
809                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
810                         shead = &sess->sess_tgt_dev_list_hash[i];
811
812                         list_for_each_entry(tgt_dev, shead,
813                                         sess_tgt_dev_list_entry) {
814                                 if ((tgt_dev->dev == acg_dev->dev) &&
815                                     (tgt_dev->lun == acg_dev->lun) &&
816                                     (tgt_dev->acg_dev->rd_only == acg_dev->rd_only)) {
817                                         TRACE_MGMT_DBG("sess %p: tgt_dev %p for "
818                                                 "LUN %lld stays the same",
819                                                 sess, tgt_dev,
820                                                 (unsigned long long)tgt_dev->lun);
821                                         tgt_dev->acg_dev = acg_dev;
822                                         goto next;
823                                 } else if (tgt_dev->lun == acg_dev->lun)
824                                         inq_changed_ua_needed = 1;
825                         }
826                 }
827
828                 luns_changed = true;
829
830                 TRACE_MGMT_DBG("sess %p: Allocing new tgt_dev for LUN %lld",
831                         sess, (unsigned long long)acg_dev->lun);
832
833                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
834                 if (tgt_dev == NULL) {
835                         add_failed = true;
836                         break;
837                 }
838
839                 tgt_dev->inq_changed_ua_needed = inq_changed_ua_needed ||
840                                                  not_needed_freed;
841 next:
842                 continue;
843         }
844
845         something_freed = false;
846         not_needed_freed = true;
847         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
848                 struct scst_tgt_dev *t;
849                 shead = &sess->sess_tgt_dev_list_hash[i];
850
851                 list_for_each_entry_safe(tgt_dev, t, shead,
852                                         sess_tgt_dev_list_entry) {
853                         if (tgt_dev->acg_dev->acg != acg) {
854                                 TRACE_MGMT_DBG("sess %p: Deleting not used "
855                                         "tgt_dev %p for LUN %lld",
856                                         sess, tgt_dev,
857                                         (unsigned long long)tgt_dev->lun);
858                                 luns_changed = true;
859                                 something_freed = true;
860                                 scst_free_tgt_dev(tgt_dev);
861                         }
862                 }
863         }
864
865         if (add_failed && something_freed) {
866                 TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
867                 goto retry_add;
868         }
869
870         sess->acg = acg;
871
872         TRACE_DBG("Moving sess %p from acg %s to acg %s", sess,
873                 old_acg->acg_name, acg->acg_name);
874         list_move_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
875
876         if (luns_changed) {
877                 scst_report_luns_changed_sess(sess);
878
879                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
880                         shead = &sess->sess_tgt_dev_list_hash[i];
881
882                         list_for_each_entry(tgt_dev, shead,
883                                         sess_tgt_dev_list_entry) {
884                                 if (tgt_dev->inq_changed_ua_needed) {
885                                         TRACE_MGMT_DBG("sess %p: Setting "
886                                                 "INQUIRY DATA HAS CHANGED UA "
887                                                 "(tgt_dev %p)", sess, tgt_dev);
888
889                                         tgt_dev->inq_changed_ua_needed = 0;
890
891                                         scst_gen_aen_or_ua(tgt_dev,
892                                                 SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
893                                 }
894                         }
895                 }
896         }
897
898 out:
899         TRACE_EXIT();
900         return;
901 }
902
903 /* The activity supposed to be suspended and scst_mutex held */
904 void scst_check_reassign_sessions(void)
905 {
906         struct scst_tgt_template *tgtt;
907
908         TRACE_ENTRY();
909
910         list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
911                 struct scst_tgt *tgt;
912                 list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
913                         struct scst_session *sess;
914                         list_for_each_entry(sess, &tgt->sess_list,
915                                                 sess_list_entry) {
916                                 scst_check_reassign_sess(sess);
917                         }
918                 }
919         }
920
921         TRACE_EXIT();
922         return;
923 }
924
925 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
926 {
927         int res;
928
929         TRACE_ENTRY();
930
931         switch (cmd->state) {
932         case SCST_CMD_STATE_INIT_WAIT:
933         case SCST_CMD_STATE_INIT:
934         case SCST_CMD_STATE_PRE_PARSE:
935         case SCST_CMD_STATE_DEV_PARSE:
936         case SCST_CMD_STATE_DEV_DONE:
937                 if (cmd->internal)
938                         res = SCST_CMD_STATE_FINISHED_INTERNAL;
939                 else
940                         res = SCST_CMD_STATE_PRE_XMIT_RESP;
941                 break;
942
943         case SCST_CMD_STATE_PRE_DEV_DONE:
944         case SCST_CMD_STATE_MODE_SELECT_CHECKS:
945                 res = SCST_CMD_STATE_DEV_DONE;
946                 break;
947
948         case SCST_CMD_STATE_PRE_XMIT_RESP:
949                 res = SCST_CMD_STATE_XMIT_RESP;
950                 break;
951
952         case SCST_CMD_STATE_PREPROCESS_DONE:
953         case SCST_CMD_STATE_PREPARE_SPACE:
954         case SCST_CMD_STATE_RDY_TO_XFER:
955         case SCST_CMD_STATE_DATA_WAIT:
956         case SCST_CMD_STATE_TGT_PRE_EXEC:
957         case SCST_CMD_STATE_SEND_FOR_EXEC:
958         case SCST_CMD_STATE_LOCAL_EXEC:
959         case SCST_CMD_STATE_REAL_EXEC:
960         case SCST_CMD_STATE_REAL_EXECUTING:
961                 res = SCST_CMD_STATE_PRE_DEV_DONE;
962                 break;
963
964         default:
965                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
966                         cmd->state, cmd, cmd->cdb[0]);
967                 sBUG();
968                 /* Invalid state to supress compiler's warning */
969                 res = SCST_CMD_STATE_LAST_ACTIVE;
970         }
971
972         TRACE_EXIT_RES(res);
973         return res;
974 }
975 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
976
977 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
978 {
979         TRACE_ENTRY();
980
981 #ifdef CONFIG_SCST_EXTRACHECKS
982         switch (cmd->state) {
983         case SCST_CMD_STATE_XMIT_RESP:
984         case SCST_CMD_STATE_FINISHED:
985         case SCST_CMD_STATE_FINISHED_INTERNAL:
986         case SCST_CMD_STATE_XMIT_WAIT:
987                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
988                         cmd->state, cmd, cmd->cdb[0]);
989                 sBUG();
990         }
991 #endif
992
993         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
994
995 #ifdef CONFIG_SCST_EXTRACHECKS
996         if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
997                    (cmd->tgt_dev == NULL) && !cmd->internal) {
998                 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
999                         "op %x)", cmd->state, cmd, cmd->cdb[0]);
1000                 sBUG();
1001         }
1002 #endif
1003
1004         TRACE_EXIT();
1005         return;
1006 }
1007 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
1008
1009 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
1010 {
1011         int i, l;
1012
1013         TRACE_ENTRY();
1014
1015         scst_check_restore_sg_buff(cmd);
1016         cmd->resp_data_len = resp_data_len;
1017
1018         if (resp_data_len == cmd->bufflen)
1019                 goto out;
1020
1021         l = 0;
1022         for (i = 0; i < cmd->sg_cnt; i++) {
1023                 l += cmd->sg[i].length;
1024                 if (l >= resp_data_len) {
1025                         int left = resp_data_len - (l - cmd->sg[i].length);
1026 #ifdef CONFIG_SCST_DEBUG
1027                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
1028                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
1029                                 "left %d",
1030                                 cmd, (long long unsigned int)cmd->tag,
1031                                 resp_data_len, i,
1032                                 cmd->sg[i].length, left);
1033 #endif
1034                         cmd->orig_sg_cnt = cmd->sg_cnt;
1035                         cmd->orig_sg_entry = i;
1036                         cmd->orig_entry_len = cmd->sg[i].length;
1037                         cmd->sg_cnt = (left > 0) ? i+1 : i;
1038                         cmd->sg[i].length = left;
1039                         cmd->sg_buff_modified = 1;
1040                         break;
1041                 }
1042         }
1043
1044 out:
1045         TRACE_EXIT();
1046         return;
1047 }
1048 EXPORT_SYMBOL(scst_set_resp_data_len);
1049
1050 /* No locks */
1051 int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
1052 {
1053         struct scst_tgt *tgt = cmd->tgt;
1054         int res = 0;
1055         unsigned long flags;
1056
1057         TRACE_ENTRY();
1058
1059         spin_lock_irqsave(&tgt->tgt_lock, flags);
1060         tgt->retry_cmds++;
1061         /*
1062          * Memory barrier is needed here, because we need the exact order
1063          * between the read and write between retry_cmds and finished_cmds to
1064          * not miss the case when a command finished while we queuing it for
1065          * retry after the finished_cmds check.
1066          */
1067         smp_mb();
1068         TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
1069               tgt->retry_cmds);
1070         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
1071                 /* At least one cmd finished, so try again */
1072                 tgt->retry_cmds--;
1073                 TRACE_RETRY("Some command(s) finished, direct retry "
1074                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
1075                       "retry_cmds=%d)", finished_cmds,
1076                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
1077                 res = -1;
1078                 goto out_unlock_tgt;
1079         }
1080
1081         TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
1082         list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
1083
1084         if (!tgt->retry_timer_active) {
1085                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
1086                 add_timer(&tgt->retry_timer);
1087                 tgt->retry_timer_active = 1;
1088         }
1089
1090 out_unlock_tgt:
1091         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1092
1093         TRACE_EXIT_RES(res);
1094         return res;
1095 }
1096
1097 /* Returns 0 to continue, >0 to restart, <0 to break */
1098 static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
1099         unsigned long cur_time, unsigned long max_time,
1100         struct scst_session *sess, unsigned long *flags,
1101         struct scst_tgt_template *tgtt)
1102 {
1103         int res = -1; /* break */
1104
1105         TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
1106                 "pending time %ld", cmd, cmd->cmd_hw_pending,
1107                 (long)(cur_time - cmd->start_time) / HZ,
1108                 (long)(cur_time - cmd->hw_pending_start) / HZ);
1109
1110         if (time_before_eq(cur_time, cmd->start_time + max_time)) {
1111                 /* Cmds are ordered, so no need to check more */
1112                 goto out;
1113         }
1114
1115         if (!cmd->cmd_hw_pending) {
1116                 res = 0; /* continue */
1117                 goto out;
1118         }
1119
1120         if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
1121                 /* Cmds are ordered, so no need to check more */
1122                 goto out;
1123         }
1124
1125         TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
1126                 cmd, (cur_time - cmd->hw_pending_start) / HZ,
1127                 cmd->state);
1128
1129         cmd->cmd_hw_pending = 0;
1130
1131         spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
1132         tgtt->on_hw_pending_cmd_timeout(cmd);
1133         spin_lock_irqsave(&sess->sess_list_lock, *flags);
1134
1135         res = 1; /* restart */
1136
1137 out:
1138         TRACE_EXIT_RES(res);
1139         return res;
1140 }
1141
1142 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1143 static void scst_hw_pending_work_fn(void *p)
1144 #else
1145 static void scst_hw_pending_work_fn(struct delayed_work *work)
1146 #endif
1147 {
1148 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1149         struct scst_session *sess = (struct scst_session *)p;
1150 #else
1151         struct scst_session *sess = container_of(work, struct scst_session,
1152                                         hw_pending_work);
1153 #endif
1154         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
1155         struct scst_cmd *cmd;
1156         unsigned long cur_time = jiffies;
1157         unsigned long flags;
1158         unsigned long max_time = tgtt->max_hw_pending_time * HZ;
1159
1160         TRACE_ENTRY();
1161
1162         TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
1163
1164         clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1165
1166         spin_lock_irqsave(&sess->sess_list_lock, flags);
1167
1168 restart:
1169         list_for_each_entry(cmd, &sess->search_cmd_list,
1170                                 sess_cmd_list_entry) {
1171                 int rc;
1172
1173                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1174                                         &flags, tgtt);
1175                 if (rc < 0)
1176                         break;
1177                 else if (rc == 0)
1178                         continue;
1179                 else
1180                         goto restart;
1181         }
1182
1183 restart1:
1184         list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
1185                                 sess_cmd_list_entry) {
1186                 int rc;
1187
1188                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1189                                         &flags, tgtt);
1190                 if (rc < 0)
1191                         break;
1192                 else if (rc == 0)
1193                         continue;
1194                 else
1195                         goto restart1;
1196         }
1197
1198         if (!list_empty(&sess->search_cmd_list) ||
1199             !list_empty(&sess->after_pre_xmit_cmd_list)) {
1200                 /*
1201                  * For stuck cmds if there is no activity we might need to have
1202                  * one more run to release them, so reschedule once again.
1203                  */
1204                 TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
1205                         sess, tgtt->max_hw_pending_time);
1206                 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1207                 schedule_delayed_work(&sess->hw_pending_work,
1208                                 tgtt->max_hw_pending_time * HZ);
1209         }
1210
1211         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
1212
1213         TRACE_EXIT();
1214         return;
1215 }
1216
1217 /* Called under scst_mutex and suspended activity */
1218 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
1219 {
1220         struct scst_device *dev;
1221         int res = 0;
1222         static int dev_num; /* protected by scst_mutex */
1223
1224         TRACE_ENTRY();
1225
1226         dev = kzalloc(sizeof(*dev), gfp_mask);
1227         if (dev == NULL) {
1228                 TRACE(TRACE_OUT_OF_MEM, "%s",
1229                         "Allocation of scst_device failed");
1230                 res = -ENOMEM;
1231                 goto out;
1232         }
1233
1234         dev->handler = &scst_null_devtype;
1235         dev->p_cmd_lists = &scst_main_cmd_lists;
1236         atomic_set(&dev->dev_cmd_count, 0);
1237         atomic_set(&dev->write_cmd_count, 0);
1238         scst_init_mem_lim(&dev->dev_mem_lim);
1239         spin_lock_init(&dev->dev_lock);
1240         atomic_set(&dev->on_dev_count, 0);
1241         INIT_LIST_HEAD(&dev->blocked_cmd_list);
1242         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
1243         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
1244         INIT_LIST_HEAD(&dev->threads_list);
1245         init_waitqueue_head(&dev->on_dev_waitQ);
1246         dev->dev_double_ua_possible = 1;
1247         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
1248         dev->dev_num = dev_num++;
1249
1250 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1251 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1252         dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1253         if (dev->dev_io_ctx == NULL) {
1254                 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
1255                 res = -ENOMEM;
1256                 kfree(dev);
1257                 goto out;
1258         }
1259 #endif
1260 #endif
1261
1262         *out_dev = dev;
1263
1264 out:
1265         TRACE_EXIT_RES(res);
1266         return res;
1267 }
1268
1269 /* Called under scst_mutex and suspended activity */
1270 void scst_free_device(struct scst_device *dev)
1271 {
1272         TRACE_ENTRY();
1273
1274 #ifdef CONFIG_SCST_EXTRACHECKS
1275         if (!list_empty(&dev->dev_tgt_dev_list) ||
1276             !list_empty(&dev->dev_acg_dev_list)) {
1277                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
1278                         "is not empty!", __func__);
1279                 sBUG();
1280         }
1281 #endif
1282
1283         __exit_io_context(dev->dev_io_ctx);
1284
1285         kfree(dev);
1286
1287         TRACE_EXIT();
1288         return;
1289 }
1290
1291 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
1292 {
1293         atomic_set(&mem_lim->alloced_pages, 0);
1294         mem_lim->max_allowed_pages =
1295                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
1296 }
1297 EXPORT_SYMBOL(scst_init_mem_lim);
1298
1299 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
1300                                         struct scst_device *dev, uint64_t lun)
1301 {
1302         struct scst_acg_dev *res;
1303
1304         TRACE_ENTRY();
1305
1306 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1307         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
1308 #else
1309         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
1310 #endif
1311         if (res == NULL) {
1312                 TRACE(TRACE_OUT_OF_MEM,
1313                       "%s", "Allocation of scst_acg_dev failed");
1314                 goto out;
1315         }
1316 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1317         memset(res, 0, sizeof(*res));
1318 #endif
1319
1320         res->dev = dev;
1321         res->acg = acg;
1322         res->lun = lun;
1323
1324 out:
1325         TRACE_EXIT_HRES(res);
1326         return res;
1327 }
1328
1329 /* The activity supposed to be suspended and scst_mutex held */
1330 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
1331 {
1332         TRACE_ENTRY();
1333
1334         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
1335                 acg_dev);
1336         list_del(&acg_dev->acg_dev_list_entry);
1337         list_del(&acg_dev->dev_acg_dev_list_entry);
1338
1339         kmem_cache_free(scst_acgd_cachep, acg_dev);
1340
1341         TRACE_EXIT();
1342         return;
1343 }
1344
1345 /* The activity supposed to be suspended and scst_mutex held */
1346 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
1347 {
1348         struct scst_acg *acg;
1349
1350         TRACE_ENTRY();
1351
1352         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
1353         if (acg == NULL) {
1354                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
1355                 goto out;
1356         }
1357
1358         INIT_LIST_HEAD(&acg->acg_dev_list);
1359         INIT_LIST_HEAD(&acg->acg_sess_list);
1360         INIT_LIST_HEAD(&acg->acn_list);
1361         acg->acg_name = acg_name;
1362
1363         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
1364         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
1365
1366         scst_check_reassign_sessions();
1367
1368 out:
1369         TRACE_EXIT_HRES(acg);
1370         return acg;
1371 }
1372
1373 /* The activity supposed to be suspended and scst_mutex held */
1374 int scst_destroy_acg(struct scst_acg *acg)
1375 {
1376         struct scst_acn *n, *nn;
1377         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
1378         int res = 0;
1379
1380         TRACE_ENTRY();
1381
1382         if (!list_empty(&acg->acg_sess_list)) {
1383                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
1384                 res = -EBUSY;
1385                 goto out;
1386         }
1387
1388         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
1389         list_del(&acg->scst_acg_list_entry);
1390
1391         /* Freeing acg_devs */
1392         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
1393                         acg_dev_list_entry) {
1394                 struct scst_tgt_dev *tgt_dev, *tt;
1395                 list_for_each_entry_safe(tgt_dev, tt,
1396                                  &acg_dev->dev->dev_tgt_dev_list,
1397                                  dev_tgt_dev_list_entry) {
1398                         if (tgt_dev->acg_dev == acg_dev)
1399                                 scst_free_tgt_dev(tgt_dev);
1400                 }
1401                 scst_free_acg_dev(acg_dev);
1402         }
1403
1404         /* Freeing names */
1405         list_for_each_entry_safe(n, nn, &acg->acn_list,
1406                         acn_list_entry) {
1407                 list_del(&n->acn_list_entry);
1408                 kfree(n->name);
1409                 kfree(n);
1410         }
1411         INIT_LIST_HEAD(&acg->acn_list);
1412
1413         kfree(acg);
1414 out:
1415         TRACE_EXIT_RES(res);
1416         return res;
1417 }
1418
1419 /*
1420  * scst_mutex supposed to be held, there must not be parallel activity in this
1421  * session.
1422  */
1423 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1424         struct scst_acg_dev *acg_dev)
1425 {
1426         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1427         struct scst_tgt_dev *tgt_dev, *t = NULL;
1428         struct scst_device *dev = acg_dev->dev;
1429         struct list_head *sess_tgt_dev_list_head;
1430         struct scst_tgt_template *vtt = sess->tgt->tgtt;
1431         int rc, i;
1432         bool share_io_ctx = false;
1433         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1434
1435         TRACE_ENTRY();
1436
1437 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1438         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1439 #else
1440         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1441 #endif
1442         if (tgt_dev == NULL) {
1443                 TRACE(TRACE_OUT_OF_MEM, "%s",
1444                       "Allocation of scst_tgt_dev failed");
1445                 goto out;
1446         }
1447 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1448         memset(tgt_dev, 0, sizeof(*tgt_dev));
1449 #endif
1450
1451         tgt_dev->dev = dev;
1452         tgt_dev->lun = acg_dev->lun;
1453         tgt_dev->acg_dev = acg_dev;
1454         tgt_dev->sess = sess;
1455         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1456
1457         scst_sgv_pool_use_norm(tgt_dev);
1458
1459         if (dev->scsi_dev != NULL) {
1460                 ini_sg = dev->scsi_dev->host->sg_tablesize;
1461                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1462                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1463                                 ENABLE_CLUSTERING);
1464         } else {
1465                 ini_sg = (1 << 15) /* infinite */;
1466                 ini_unchecked_isa_dma = 0;
1467                 ini_use_clustering = 0;
1468         }
1469         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1470
1471         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1472             !sess->tgt->tgtt->no_clustering)
1473                 scst_sgv_pool_use_norm_clust(tgt_dev);
1474
1475         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1476                 scst_sgv_pool_use_dma(tgt_dev);
1477
1478         if (dev->scsi_dev != NULL) {
1479                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1480                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
1481                       dev->scsi_dev->channel, dev->scsi_dev->id,
1482                       dev->scsi_dev->lun,
1483                       (long long unsigned int)tgt_dev->lun);
1484         } else {
1485                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1486                        dev->virt_name, (long long unsigned int)tgt_dev->lun);
1487         }
1488
1489         spin_lock_init(&tgt_dev->tgt_dev_lock);
1490         INIT_LIST_HEAD(&tgt_dev->UA_list);
1491         spin_lock_init(&tgt_dev->thr_data_lock);
1492         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1493         spin_lock_init(&tgt_dev->sn_lock);
1494         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1495         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1496         tgt_dev->expected_sn = 1;
1497         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1498         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1499         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1500                 atomic_set(&tgt_dev->sn_slots[i], 0);
1501
1502         if (dev->handler->parse_atomic &&
1503             (sess->tgt->tgtt->preprocessing_done == NULL)) {
1504                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1505                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1506                                 &tgt_dev->tgt_dev_flags);
1507                 if (dev->handler->exec_atomic)
1508                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1509                                 &tgt_dev->tgt_dev_flags);
1510         }
1511         if (dev->handler->exec_atomic) {
1512                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1513                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1514                                 &tgt_dev->tgt_dev_flags);
1515                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1516                                 &tgt_dev->tgt_dev_flags);
1517                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1518                         &tgt_dev->tgt_dev_flags);
1519         }
1520         if (dev->handler->dev_done_atomic &&
1521             sess->tgt->tgtt->xmit_response_atomic) {
1522                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1523                         &tgt_dev->tgt_dev_flags);
1524         }
1525
1526         scst_set_sense(sense_buffer, sizeof(sense_buffer),
1527                 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1528         scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1529
1530         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1531
1532         if (tgt_dev->sess->initiator_name != NULL) {
1533                 spin_lock_bh(&dev->dev_lock);
1534                 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1535                                 dev_tgt_dev_list_entry) {
1536                         TRACE_DBG("t name %s (tgt_dev name %s)",
1537                                 t->sess->initiator_name,
1538                                 tgt_dev->sess->initiator_name);
1539                         if (t->sess->initiator_name == NULL)
1540                                 continue;
1541                         if (strcmp(t->sess->initiator_name,
1542                                         tgt_dev->sess->initiator_name) == 0) {
1543                                 share_io_ctx = true;
1544                                 break;
1545                         }
1546                 }
1547                 spin_unlock_bh(&dev->dev_lock);
1548         }
1549
1550         if (share_io_ctx) {
1551                 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1552                         t->tgt_dev_io_ctx, tgt_dev,
1553                         tgt_dev->sess->initiator_name);
1554                 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1555         } else {
1556 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1557 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1558                 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1559                 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1560                         TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1561                                 "context for dev %s (initiator %s)",
1562                                 dev->virt_name, sess->initiator_name);
1563                         goto out_free;
1564                 }
1565 #endif
1566 #endif
1567         }
1568
1569         if (vtt->threads_num > 0) {
1570                 rc = 0;
1571                 if (dev->handler->threads_num > 0)
1572                         rc = scst_add_dev_threads(dev, vtt->threads_num);
1573                 else if (dev->handler->threads_num == 0)
1574                         rc = scst_add_global_threads(vtt->threads_num);
1575                 if (rc != 0)
1576                         goto out_free;
1577         }
1578
1579         if (dev->handler && dev->handler->attach_tgt) {
1580                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1581                       tgt_dev);
1582                 rc = dev->handler->attach_tgt(tgt_dev);
1583                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1584                 if (rc != 0) {
1585                         PRINT_ERROR("Device handler's %s attach_tgt() "
1586                             "failed: %d", dev->handler->name, rc);
1587                         goto out_thr_free;
1588                 }
1589         }
1590
1591         spin_lock_bh(&dev->dev_lock);
1592         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1593         if (dev->dev_reserved)
1594                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1595         spin_unlock_bh(&dev->dev_lock);
1596
1597         sess_tgt_dev_list_head =
1598                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1599         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1600                       sess_tgt_dev_list_head);
1601
1602 out:
1603         TRACE_EXIT();
1604         return tgt_dev;
1605
1606 out_thr_free:
1607         if (vtt->threads_num > 0) {
1608                 if (dev->handler->threads_num > 0)
1609                         scst_del_dev_threads(dev, vtt->threads_num);
1610                 else if (dev->handler->threads_num == 0)
1611                         scst_del_global_threads(vtt->threads_num);
1612         }
1613
1614 out_free:
1615         scst_free_all_UA(tgt_dev);
1616         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1617
1618         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1619         tgt_dev = NULL;
1620         goto out;
1621 }
1622
1623 /* No locks supposed to be held, scst_mutex - held */
1624 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1625 {
1626         TRACE_ENTRY();
1627
1628         scst_clear_reservation(tgt_dev);
1629
1630         /* With activity suspended the lock isn't needed, but let's be safe */
1631         spin_lock_bh(&tgt_dev->tgt_dev_lock);
1632         scst_free_all_UA(tgt_dev);
1633         memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1634         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1635
1636         if (queue_UA) {
1637                 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1638                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1639                         tgt_dev->dev->d_sense,
1640                         SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1641                 scst_check_set_UA(tgt_dev, sense_buffer,
1642                         sizeof(sense_buffer), 0);
1643         }
1644
1645         TRACE_EXIT();
1646         return;
1647 }
1648
1649 /*
1650  * scst_mutex supposed to be held, there must not be parallel activity in this
1651  * session.
1652  */
1653 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1654 {
1655         struct scst_device *dev = tgt_dev->dev;
1656         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1657
1658         TRACE_ENTRY();
1659
1660         tm_dbg_deinit_tgt_dev(tgt_dev);
1661
1662         spin_lock_bh(&dev->dev_lock);
1663         list_del(&tgt_dev->dev_tgt_dev_list_entry);
1664         spin_unlock_bh(&dev->dev_lock);
1665
1666         list_del(&tgt_dev->sess_tgt_dev_list_entry);
1667
1668         scst_clear_reservation(tgt_dev);
1669         scst_free_all_UA(tgt_dev);
1670
1671         if (dev->handler && dev->handler->detach_tgt) {
1672                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1673                       tgt_dev);
1674                 dev->handler->detach_tgt(tgt_dev);
1675                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1676         }
1677
1678         if (vtt->threads_num > 0) {
1679                 if (dev->handler->threads_num > 0)
1680                         scst_del_dev_threads(dev, vtt->threads_num);
1681                 else if (dev->handler->threads_num == 0)
1682                         scst_del_global_threads(vtt->threads_num);
1683         }
1684
1685         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1686
1687         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1688
1689         TRACE_EXIT();
1690         return;
1691 }
1692
1693 /* scst_mutex supposed to be held */
1694 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1695 {
1696         int res = 0;
1697         struct scst_acg_dev *acg_dev;
1698         struct scst_tgt_dev *tgt_dev;
1699
1700         TRACE_ENTRY();
1701
1702         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1703                         acg_dev_list_entry) {
1704                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1705                 if (tgt_dev == NULL) {
1706                         res = -ENOMEM;
1707                         goto out_free;
1708                 }
1709         }
1710
1711 out:
1712         TRACE_EXIT();
1713         return res;
1714
1715 out_free:
1716         scst_sess_free_tgt_devs(sess);
1717         goto out;
1718 }
1719
1720 /*
1721  * scst_mutex supposed to be held, there must not be parallel activity in this
1722  * session.
1723  */
1724 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1725 {
1726         int i;
1727         struct scst_tgt_dev *tgt_dev, *t;
1728
1729         TRACE_ENTRY();
1730
1731         /* The session is going down, no users, so no locks */
1732         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1733                 struct list_head *sess_tgt_dev_list_head =
1734                         &sess->sess_tgt_dev_list_hash[i];
1735                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1736                                 sess_tgt_dev_list_entry) {
1737                         scst_free_tgt_dev(tgt_dev);
1738                 }
1739                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1740         }
1741
1742         TRACE_EXIT();
1743         return;
1744 }
1745
1746 /* The activity supposed to be suspended and scst_mutex held */
1747 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1748         uint64_t lun, int read_only, bool gen_scst_report_luns_changed)
1749 {
1750         int res = 0;
1751         struct scst_acg_dev *acg_dev;
1752         struct scst_tgt_dev *tgt_dev;
1753         struct scst_session *sess;
1754         LIST_HEAD(tmp_tgt_dev_list);
1755
1756         TRACE_ENTRY();
1757
1758         INIT_LIST_HEAD(&tmp_tgt_dev_list);
1759
1760         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1761         if (acg_dev == NULL) {
1762                 res = -ENOMEM;
1763                 goto out;
1764         }
1765         acg_dev->rd_only = read_only;
1766
1767         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1768                 acg_dev);
1769         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1770         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1771
1772         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1773                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1774                 if (tgt_dev == NULL) {
1775                         res = -ENOMEM;
1776                         goto out_free;
1777                 }
1778                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1779                               &tmp_tgt_dev_list);
1780         }
1781
1782         if (gen_scst_report_luns_changed)
1783                 scst_report_luns_changed(acg);
1784
1785         if (dev->virt_name != NULL) {
1786                 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1787                         "rd_only %d)", dev->virt_name, acg->acg_name,
1788                         (long long unsigned int)lun,
1789                         read_only);
1790         } else {
1791                 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1792                         "%lld, rd_only %d)",
1793                         dev->scsi_dev->host->host_no,
1794                         dev->scsi_dev->channel, dev->scsi_dev->id,
1795                         dev->scsi_dev->lun, acg->acg_name,
1796                         (long long unsigned int)lun,
1797                         read_only);
1798         }
1799
1800 out:
1801         TRACE_EXIT_RES(res);
1802         return res;
1803
1804 out_free:
1805         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1806                          extra_tgt_dev_list_entry) {
1807                 scst_free_tgt_dev(tgt_dev);
1808         }
1809         scst_free_acg_dev(acg_dev);
1810         goto out;
1811 }
1812
1813 /* The activity supposed to be suspended and scst_mutex held */
1814 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev,
1815         bool gen_scst_report_luns_changed)
1816 {
1817         int res = 0;
1818         struct scst_acg_dev *acg_dev = NULL, *a;
1819         struct scst_tgt_dev *tgt_dev, *tt;
1820
1821         TRACE_ENTRY();
1822
1823         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1824                 if (a->dev == dev) {
1825                         acg_dev = a;
1826                         break;
1827                 }
1828         }
1829
1830         if (acg_dev == NULL) {
1831                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1832                 res = -EINVAL;
1833                 goto out;
1834         }
1835
1836         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1837                          dev_tgt_dev_list_entry) {
1838                 if (tgt_dev->acg_dev == acg_dev)
1839                         scst_free_tgt_dev(tgt_dev);
1840         }
1841         scst_free_acg_dev(acg_dev);
1842
1843         if (gen_scst_report_luns_changed)
1844                 scst_report_luns_changed(acg);
1845
1846         if (dev->virt_name != NULL) {
1847                 PRINT_INFO("Removed device %s from group %s",
1848                         dev->virt_name, acg->acg_name);
1849         } else {
1850                 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1851                         dev->scsi_dev->host->host_no,
1852                         dev->scsi_dev->channel, dev->scsi_dev->id,
1853                         dev->scsi_dev->lun, acg->acg_name);
1854         }
1855
1856 out:
1857         TRACE_EXIT_RES(res);
1858         return res;
1859 }
1860
1861 /* The activity supposed to be suspended and scst_mutex held */
1862 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1863 {
1864         int res = 0;
1865         struct scst_acn *n;
1866         int len;
1867         char *nm;
1868
1869         TRACE_ENTRY();
1870
1871         list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1872                 if (strcmp(n->name, name) == 0) {
1873                         PRINT_ERROR("Name %s already exists in group %s",
1874                                 name, acg->acg_name);
1875                         res = -EINVAL;
1876                         goto out;
1877                 }
1878         }
1879
1880         n = kmalloc(sizeof(*n), GFP_KERNEL);
1881         if (n == NULL) {
1882                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1883                 res = -ENOMEM;
1884                 goto out;
1885         }
1886
1887         len = strlen(name);
1888         nm = kmalloc(len + 1, GFP_KERNEL);
1889         if (nm == NULL) {
1890                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1891                 res = -ENOMEM;
1892                 goto out_free;
1893         }
1894
1895         strcpy(nm, name);
1896         n->name = nm;
1897
1898         list_add_tail(&n->acn_list_entry, &acg->acn_list);
1899
1900 out:
1901         if (res == 0) {
1902                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1903                 scst_check_reassign_sessions();
1904         }
1905
1906         TRACE_EXIT_RES(res);
1907         return res;
1908
1909 out_free:
1910         kfree(n);
1911         goto out;
1912 }
1913
1914 /* scst_mutex supposed to be held */
1915 void __scst_acg_remove_acn(struct scst_acn *n)
1916 {
1917         TRACE_ENTRY();
1918
1919         list_del(&n->acn_list_entry);
1920         kfree(n->name);
1921         kfree(n);
1922
1923         TRACE_EXIT();
1924         return;
1925 }
1926
1927 /* The activity supposed to be suspended and scst_mutex held */
1928 int scst_acg_remove_name(struct scst_acg *acg, const char *name, bool reassign)
1929 {
1930         int res = -EINVAL;
1931         struct scst_acn *n;
1932
1933         TRACE_ENTRY();
1934
1935         list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1936                 if (strcmp(n->name, name) == 0) {
1937                         __scst_acg_remove_acn(n);
1938                         res = 0;
1939                         break;
1940                 }
1941         }
1942
1943         if (res == 0) {
1944                 PRINT_INFO("Removed name %s from group %s", name,
1945                         acg->acg_name);
1946                 if (reassign)
1947                         scst_check_reassign_sessions();
1948         } else
1949                 PRINT_ERROR("Unable to find name %s in group %s", name,
1950                         acg->acg_name);
1951
1952         TRACE_EXIT_RES(res);
1953         return res;
1954 }
1955
1956 static struct scst_cmd *scst_create_prepare_internal_cmd(
1957         struct scst_cmd *orig_cmd, int bufsize)
1958 {
1959         struct scst_cmd *res;
1960         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1961
1962         TRACE_ENTRY();
1963
1964         res = scst_alloc_cmd(gfp_mask);
1965         if (res == NULL)
1966                 goto out;
1967
1968         res->cmd_lists = orig_cmd->cmd_lists;
1969         res->sess = orig_cmd->sess;
1970         res->atomic = scst_cmd_atomic(orig_cmd);
1971         res->internal = 1;
1972         res->tgtt = orig_cmd->tgtt;
1973         res->tgt = orig_cmd->tgt;
1974         res->dev = orig_cmd->dev;
1975         res->tgt_dev = orig_cmd->tgt_dev;
1976         res->lun = orig_cmd->lun;
1977         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1978         res->data_direction = SCST_DATA_UNKNOWN;
1979         res->orig_cmd = orig_cmd;
1980         res->bufflen = bufsize;
1981
1982         scst_sess_get(res->sess);
1983         if (res->tgt_dev != NULL)
1984                 __scst_get(0);
1985
1986         res->state = SCST_CMD_STATE_PRE_PARSE;
1987
1988 out:
1989         TRACE_EXIT_HRES((unsigned long)res);
1990         return res;
1991 }
1992
1993 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1994 {
1995         int res = 0;
1996         static const uint8_t request_sense[6] =
1997             { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1998         struct scst_cmd *rs_cmd;
1999
2000         TRACE_ENTRY();
2001
2002         if (orig_cmd->sense != NULL) {
2003                 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
2004                         orig_cmd->sense, orig_cmd);
2005                 mempool_free(orig_cmd->sense, scst_sense_mempool);
2006                 orig_cmd->sense = NULL;
2007         }
2008
2009         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
2010                         SCST_SENSE_BUFFERSIZE);
2011         if (rs_cmd == NULL)
2012                 goto out_error;
2013
2014         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
2015         rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
2016         rs_cmd->cdb_len = sizeof(request_sense);
2017         rs_cmd->data_direction = SCST_DATA_READ;
2018         rs_cmd->expected_data_direction = rs_cmd->data_direction;
2019         rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
2020         rs_cmd->expected_values_set = 1;
2021
2022         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
2023                 "cmd list", rs_cmd);
2024         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2025         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
2026         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
2027         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2028
2029 out:
2030         TRACE_EXIT_RES(res);
2031         return res;
2032
2033 out_error:
2034         res = -1;
2035         goto out;
2036 }
2037
2038 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
2039 {
2040         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
2041         uint8_t *buf;
2042         int len;
2043
2044         TRACE_ENTRY();
2045
2046         sBUG_ON(orig_cmd == NULL);
2047
2048         len = scst_get_buf_first(req_cmd, &buf);
2049
2050         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
2051             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
2052                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
2053                         buf, len);
2054                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
2055                         len);
2056         } else {
2057                 PRINT_ERROR("%s", "Unable to get the sense via "
2058                         "REQUEST SENSE, returning HARDWARE ERROR");
2059                 scst_set_cmd_error(orig_cmd,
2060                         SCST_LOAD_SENSE(scst_sense_hardw_error));
2061         }
2062
2063         if (len > 0)
2064                 scst_put_buf(req_cmd, buf);
2065
2066         TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
2067                 "cmd list", orig_cmd);
2068         spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2069         list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
2070         wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
2071         spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2072
2073         TRACE_EXIT();
2074         return;
2075 }
2076
2077 int scst_finish_internal_cmd(struct scst_cmd *cmd)
2078 {
2079         int res;
2080
2081         TRACE_ENTRY();
2082
2083         sBUG_ON(!cmd->internal);
2084
2085         if (cmd->cdb[0] == REQUEST_SENSE)
2086                 scst_complete_request_sense(cmd);
2087
2088         __scst_cmd_put(cmd);
2089
2090         res = SCST_CMD_STATE_RES_CONT_NEXT;
2091
2092         TRACE_EXIT_HRES(res);
2093         return res;
2094 }
2095
2096 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2097 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
2098 {
2099         struct scsi_request *req;
2100
2101         TRACE_ENTRY();
2102
2103         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
2104                 if (req) {
2105                         if (req->sr_bufflen)
2106                                 kfree(req->sr_buffer);
2107                         scsi_release_request(req);
2108                 }
2109         }
2110
2111         TRACE_EXIT();
2112         return;
2113 }
2114
2115 static void scst_send_release(struct scst_device *dev)
2116 {
2117         struct scsi_request *req;
2118         struct scsi_device *scsi_dev;
2119         uint8_t cdb[6];
2120
2121         TRACE_ENTRY();
2122
2123         if (dev->scsi_dev == NULL)
2124                 goto out;
2125
2126         scsi_dev = dev->scsi_dev;
2127
2128         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
2129         if (req == NULL) {
2130                 PRINT_ERROR("Allocation of scsi_request failed: unable "
2131                             "to RELEASE device %d:%d:%d:%d",
2132                             scsi_dev->host->host_no, scsi_dev->channel,
2133                             scsi_dev->id, scsi_dev->lun);
2134                 goto out;
2135         }
2136
2137         memset(cdb, 0, sizeof(cdb));
2138         cdb[0] = RELEASE;
2139         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2140             ((scsi_dev->lun << 5) & 0xe0) : 0;
2141         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
2142         req->sr_cmd_len = sizeof(cdb);
2143         req->sr_data_direction = SCST_DATA_NONE;
2144         req->sr_use_sg = 0;
2145         req->sr_bufflen = 0;
2146         req->sr_buffer = NULL;
2147         req->sr_request->rq_disk = dev->rq_disk;
2148         req->sr_sense_buffer[0] = 0;
2149
2150         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
2151                 "mid-level", req);
2152         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
2153                     scst_req_done, 15, 3);
2154
2155 out:
2156         TRACE_EXIT();
2157         return;
2158 }
2159 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2160 static void scst_send_release(struct scst_device *dev)
2161 {
2162         struct scsi_device *scsi_dev;
2163         unsigned char cdb[6];
2164         uint8_t sense[SCSI_SENSE_BUFFERSIZE];
2165         int rc, i;
2166
2167         TRACE_ENTRY();
2168
2169         if (dev->scsi_dev == NULL)
2170                 goto out;
2171
2172         scsi_dev = dev->scsi_dev;
2173
2174         for (i = 0; i < 5; i++) {
2175                 memset(cdb, 0, sizeof(cdb));
2176                 cdb[0] = RELEASE;
2177                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2178                     ((scsi_dev->lun << 5) & 0xe0) : 0;
2179
2180                 memset(sense, 0, sizeof(sense));
2181
2182                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
2183                         "SCSI mid-level");
2184                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
2185                                 sense, 15, 0, 0
2186 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
2187                                 , NULL
2188 #endif
2189                                 );
2190                 TRACE_DBG("MODE_SENSE done: %x", rc);
2191
2192                 if (scsi_status_is_good(rc)) {
2193                         break;
2194                 } else {
2195                         PRINT_ERROR("RELEASE failed: %d", rc);
2196                         PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
2197                         scst_check_internal_sense(dev, rc, sense,
2198                                 sizeof(sense));
2199                 }
2200         }
2201
2202 out:
2203         TRACE_EXIT();
2204         return;
2205 }
2206 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2207
2208 /* scst_mutex supposed to be held */
2209 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
2210 {
2211         struct scst_device *dev = tgt_dev->dev;
2212         int release = 0;
2213
2214         TRACE_ENTRY();
2215
2216         spin_lock_bh(&dev->dev_lock);
2217         if (dev->dev_reserved &&
2218             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
2219                 /* This is one who holds the reservation */
2220                 struct scst_tgt_dev *tgt_dev_tmp;
2221                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
2222                                     dev_tgt_dev_list_entry) {
2223                         clear_bit(SCST_TGT_DEV_RESERVED,
2224                                     &tgt_dev_tmp->tgt_dev_flags);
2225                 }
2226                 dev->dev_reserved = 0;
2227                 release = 1;
2228         }
2229         spin_unlock_bh(&dev->dev_lock);
2230
2231         if (release)
2232                 scst_send_release(dev);
2233
2234         TRACE_EXIT();
2235         return;
2236 }
2237
2238 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
2239         const char *initiator_name)
2240 {
2241         struct scst_session *sess;
2242         int i;
2243         int len;
2244         char *nm;
2245
2246         TRACE_ENTRY();
2247
2248 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2249         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
2250 #else
2251         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
2252 #endif
2253         if (sess == NULL) {
2254                 TRACE(TRACE_OUT_OF_MEM, "%s",
2255                       "Allocation of scst_session failed");
2256                 goto out;
2257         }
2258 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2259         memset(sess, 0, sizeof(*sess));
2260 #endif
2261
2262         sess->init_phase = SCST_SESS_IPH_INITING;
2263         sess->shut_phase = SCST_SESS_SPH_READY;
2264         atomic_set(&sess->refcnt, 0);
2265         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
2266                 struct list_head *sess_tgt_dev_list_head =
2267                          &sess->sess_tgt_dev_list_hash[i];
2268                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
2269         }
2270         spin_lock_init(&sess->sess_list_lock);
2271         INIT_LIST_HEAD(&sess->search_cmd_list);
2272         INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
2273         sess->tgt = tgt;
2274         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
2275         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
2276 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
2277         INIT_DELAYED_WORK(&sess->hw_pending_work,
2278                 (void (*)(struct work_struct *))scst_hw_pending_work_fn);
2279 #else
2280         INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
2281 #endif
2282
2283 #ifdef CONFIG_SCST_MEASURE_LATENCY
2284         spin_lock_init(&sess->meas_lock);
2285 #endif
2286
2287         len = strlen(initiator_name);
2288         nm = kmalloc(len + 1, gfp_mask);
2289         if (nm == NULL) {
2290                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
2291                 goto out_free;
2292         }
2293
2294         strcpy(nm, initiator_name);
2295         sess->initiator_name = nm;
2296
2297 out:
2298         TRACE_EXIT();
2299         return sess;
2300
2301 out_free:
2302         kmem_cache_free(scst_sess_cachep, sess);
2303         sess = NULL;
2304         goto out;
2305 }
2306
2307 void scst_free_session(struct scst_session *sess)
2308 {
2309         TRACE_ENTRY();
2310
2311         mutex_lock(&scst_mutex);
2312
2313         TRACE_DBG("Removing sess %p from the list", sess);
2314         list_del(&sess->sess_list_entry);
2315         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
2316         list_del(&sess->acg_sess_list_entry);
2317
2318         scst_sess_free_tgt_devs(sess);
2319
2320         wake_up_all(&sess->tgt->unreg_waitQ);
2321
2322         mutex_unlock(&scst_mutex);
2323
2324         kfree(sess->initiator_name);
2325         kmem_cache_free(scst_sess_cachep, sess);
2326
2327         TRACE_EXIT();
2328         return;
2329 }
2330
2331 void scst_free_session_callback(struct scst_session *sess)
2332 {
2333         struct completion *c;
2334
2335         TRACE_ENTRY();
2336
2337         TRACE_DBG("Freeing session %p", sess);
2338
2339         cancel_delayed_work_sync(&sess->hw_pending_work);
2340
2341         c = sess->shutdown_compl;
2342
2343         if (sess->unreg_done_fn) {
2344                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
2345                 sess->unreg_done_fn(sess);
2346                 TRACE_DBG("%s", "unreg_done_fn() returned");
2347         }
2348         scst_free_session(sess);
2349
2350         if (c)
2351                 complete_all(c);
2352
2353         TRACE_EXIT();
2354         return;
2355 }
2356
2357 void scst_sched_session_free(struct scst_session *sess)
2358 {
2359         unsigned long flags;
2360
2361         TRACE_ENTRY();
2362
2363         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
2364                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
2365                         "shut phase %lx", sess, sess->shut_phase);
2366                 sBUG();
2367         }
2368
2369         spin_lock_irqsave(&scst_mgmt_lock, flags);
2370         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
2371         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
2372         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
2373
2374         wake_up(&scst_mgmt_waitQ);
2375
2376         TRACE_EXIT();
2377         return;
2378 }
2379
2380 void scst_cmd_get(struct scst_cmd *cmd)
2381 {
2382         __scst_cmd_get(cmd);
2383 }
2384 EXPORT_SYMBOL(scst_cmd_get);
2385
2386 void scst_cmd_put(struct scst_cmd *cmd)
2387 {
2388         __scst_cmd_put(cmd);
2389 }
2390 EXPORT_SYMBOL(scst_cmd_put);
2391
2392 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
2393 {
2394         struct scst_cmd *cmd;
2395
2396         TRACE_ENTRY();
2397
2398 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2399         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
2400 #else
2401         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
2402 #endif
2403         if (cmd == NULL) {
2404                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
2405                 goto out;
2406         }
2407 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2408         memset(cmd, 0, sizeof(*cmd));
2409 #endif
2410
2411         cmd->state = SCST_CMD_STATE_INIT_WAIT;
2412         cmd->start_time = jiffies;
2413         atomic_set(&cmd->cmd_ref, 1);
2414         cmd->cmd_lists = &scst_main_cmd_lists;
2415         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
2416         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2417         cmd->timeout = SCST_DEFAULT_TIMEOUT;
2418         cmd->retries = 0;
2419         cmd->data_len = -1;
2420         cmd->is_send_status = 1;
2421         cmd->resp_data_len = -1;
2422
2423         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2424         cmd->dbl_ua_orig_resp_data_len = -1;
2425
2426 out:
2427         TRACE_EXIT();
2428         return cmd;
2429 }
2430
2431 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2432 {
2433         scst_sess_put(cmd->sess);
2434
2435         /*
2436          * At this point tgt_dev can be dead, but the pointer remains non-NULL
2437          */
2438         if (likely(cmd->tgt_dev != NULL))
2439                 __scst_put();
2440
2441         scst_destroy_cmd(cmd);
2442         return;
2443 }
2444
2445 /* No locks supposed to be held */
2446 void scst_free_cmd(struct scst_cmd *cmd)
2447 {
2448         int destroy = 1;
2449
2450         TRACE_ENTRY();
2451
2452         TRACE_DBG("Freeing cmd %p (tag %llu)",
2453                   cmd, (long long unsigned int)cmd->tag);
2454
2455         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2456                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2457                         cmd, atomic_read(&scst_cmd_count));
2458         }
2459
2460         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2461                 cmd->dec_on_dev_needed);
2462
2463 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2464 #if defined(CONFIG_SCST_EXTRACHECKS)
2465         if (cmd->scsi_req) {
2466                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2467                         "scsi_req!");
2468                 scst_release_request(cmd);
2469         }
2470 #endif
2471 #endif
2472
2473         /*
2474          * Target driver can already free sg buffer before calling
2475          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2476          */
2477         if (!cmd->tgt_data_buf_alloced)
2478                 scst_check_restore_sg_buff(cmd);
2479
2480         if (cmd->tgtt->on_free_cmd != NULL) {
2481                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2482                 cmd->tgtt->on_free_cmd(cmd);
2483                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2484         }
2485
2486         if (likely(cmd->dev != NULL)) {
2487                 struct scst_dev_type *handler = cmd->dev->handler;
2488                 if (handler->on_free_cmd != NULL) {
2489                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2490                               handler->name, cmd);
2491                         handler->on_free_cmd(cmd);
2492                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
2493                                 handler->name);
2494                 }
2495         }
2496
2497         scst_release_space(cmd);
2498
2499         if (unlikely(cmd->sense != NULL)) {
2500                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2501                 mempool_free(cmd->sense, scst_sense_mempool);
2502                 cmd->sense = NULL;
2503         }
2504
2505         if (likely(cmd->tgt_dev != NULL)) {
2506 #ifdef CONFIG_SCST_EXTRACHECKS
2507                 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2508                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
2509                             "%d, target %s, LUN %lld, sn %ld, expected_sn %ld)",
2510                             cmd, cmd->cdb[0], cmd->tgtt->name,
2511                             (long long unsigned int)cmd->lun,
2512                             cmd->sn, cmd->tgt_dev->expected_sn);
2513                         scst_unblock_deferred(cmd->tgt_dev, cmd);
2514                 }
2515 #endif
2516
2517                 if (unlikely(cmd->out_of_sn)) {
2518                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2519                                 "destroy=%d", cmd,
2520                                 (long long unsigned int)cmd->tag,
2521                                 cmd->sn, destroy);
2522                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2523                                         &cmd->cmd_flags);
2524                 }
2525         }
2526
2527         if (likely(destroy))
2528                 scst_destroy_put_cmd(cmd);
2529
2530         TRACE_EXIT();
2531         return;
2532 }
2533
2534 /* No locks supposed to be held. */
2535 void scst_check_retries(struct scst_tgt *tgt)
2536 {
2537         int need_wake_up = 0;
2538
2539         TRACE_ENTRY();
2540
2541         /*
2542          * We don't worry about overflow of finished_cmds, because we check
2543          * only for its change.
2544          */
2545         atomic_inc(&tgt->finished_cmds);
2546         /* See comment in scst_queue_retry_cmd() */
2547         smp_mb__after_atomic_inc();
2548         if (unlikely(tgt->retry_cmds > 0)) {
2549                 struct scst_cmd *c, *tc;
2550                 unsigned long flags;
2551
2552                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2553                       tgt->retry_cmds);
2554
2555                 spin_lock_irqsave(&tgt->tgt_lock, flags);
2556                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2557                                 cmd_list_entry) {
2558                         tgt->retry_cmds--;
2559
2560                         TRACE_RETRY("Moving retry cmd %p to head of active "
2561                                 "cmd list (retry_cmds left %d)",
2562                                 c, tgt->retry_cmds);
2563                         spin_lock(&c->cmd_lists->cmd_list_lock);
2564                         list_move(&c->cmd_list_entry,
2565                                   &c->cmd_lists->active_cmd_list);
2566                         wake_up(&c->cmd_lists->cmd_list_waitQ);
2567                         spin_unlock(&c->cmd_lists->cmd_list_lock);
2568
2569                         need_wake_up++;
2570                         if (need_wake_up >= 2) /* "slow start" */
2571                                 break;
2572                 }
2573                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2574         }
2575
2576         TRACE_EXIT();
2577         return;
2578 }
2579
2580 void scst_tgt_retry_timer_fn(unsigned long arg)
2581 {
2582         struct scst_tgt *tgt = (struct scst_tgt *)arg;
2583         unsigned long flags;
2584
2585         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2586
2587         spin_lock_irqsave(&tgt->tgt_lock, flags);
2588         tgt->retry_timer_active = 0;
2589         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2590
2591         scst_check_retries(tgt);
2592
2593         TRACE_EXIT();
2594         return;
2595 }
2596
2597 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2598 {
2599         struct scst_mgmt_cmd *mcmd;
2600
2601         TRACE_ENTRY();
2602
2603         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2604         if (mcmd == NULL) {
2605                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2606                         "failed, some commands and their data could leak");
2607                 goto out;
2608         }
2609         memset(mcmd, 0, sizeof(*mcmd));
2610
2611 out:
2612         TRACE_EXIT();
2613         return mcmd;
2614 }
2615
2616 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2617 {
2618         unsigned long flags;
2619
2620         TRACE_ENTRY();
2621
2622         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2623         atomic_dec(&mcmd->sess->sess_cmd_count);
2624         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2625
2626         scst_sess_put(mcmd->sess);
2627
2628         if (mcmd->mcmd_tgt_dev != NULL)
2629                 __scst_put();
2630
2631         mempool_free(mcmd, scst_mgmt_mempool);
2632
2633         TRACE_EXIT();
2634         return;
2635 }
2636
2637 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2638 int scst_alloc_request(struct scst_cmd *cmd)
2639 {
2640         int res = 0;
2641         struct scsi_request *req;
2642         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2643
2644         TRACE_ENTRY();
2645
2646         /* cmd->dev->scsi_dev must be non-NULL here */
2647         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2648         if (req == NULL) {
2649                 TRACE(TRACE_OUT_OF_MEM, "%s",
2650                       "Allocation of scsi_request failed");
2651                 res = -ENOMEM;
2652                 goto out;
2653         }
2654
2655         cmd->scsi_req = req;
2656
2657         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2658         req->sr_cmd_len = cmd->cdb_len;
2659         req->sr_data_direction = cmd->data_direction;
2660         req->sr_use_sg = cmd->sg_cnt;
2661         req->sr_bufflen = cmd->bufflen;
2662         req->sr_buffer = cmd->sg;
2663         req->sr_request->rq_disk = cmd->dev->rq_disk;
2664         req->sr_sense_buffer[0] = 0;
2665
2666         cmd->scsi_req->upper_private_data = cmd;
2667
2668 out:
2669         TRACE_EXIT();
2670         return res;
2671 }
2672
2673 void scst_release_request(struct scst_cmd *cmd)
2674 {
2675         scsi_release_request(cmd->scsi_req);
2676         cmd->scsi_req = NULL;
2677 }
2678 #endif
2679
2680 static bool is_report_sg_limitation(void)
2681 {
2682 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2683         return (trace_flag & TRACE_OUT_OF_MEM) != 0;
2684 #else
2685         return false;
2686 #endif
2687 }
2688
2689 int scst_alloc_space(struct scst_cmd *cmd)
2690 {
2691         gfp_t gfp_mask;
2692         int res = -ENOMEM;
2693         int atomic = scst_cmd_atomic(cmd);
2694         int flags;
2695         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2696         static int ll;
2697
2698         TRACE_ENTRY();
2699
2700         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2701
2702         flags = atomic ? SGV_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2703         if (cmd->no_sgv)
2704                 flags |= SGV_POOL_ALLOC_NO_CACHED;
2705
2706         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2707                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2708         if (cmd->sg == NULL)
2709                 goto out;
2710
2711         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2712                 if ((ll < 10) || is_report_sg_limitation()) {
2713                         PRINT_INFO("Unable to complete command due to "
2714                                 "SG IO count limitation (requested %d, "
2715                                 "available %d, tgt lim %d)", cmd->sg_cnt,
2716                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2717                         ll++;
2718                 }
2719                 goto out_sg_free;
2720         }
2721
2722         if (cmd->data_direction != SCST_DATA_BIDI)
2723                 goto success;
2724
2725         cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2726                          flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2727                          &cmd->dev->dev_mem_lim, NULL);
2728         if (cmd->in_sg == NULL)
2729                 goto out_sg_free;
2730
2731         if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2732                 if ((ll < 10)  || is_report_sg_limitation()) {
2733                         PRINT_INFO("Unable to complete command due to "
2734                                 "SG IO count limitation (IN buffer, requested "
2735                                 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2736                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2737                         ll++;
2738                 }
2739                 goto out_in_sg_free;
2740         }
2741
2742 success:
2743         res = 0;
2744
2745 out:
2746         TRACE_EXIT();
2747         return res;
2748
2749 out_in_sg_free:
2750         sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2751         cmd->in_sgv = NULL;
2752         cmd->in_sg = NULL;
2753         cmd->in_sg_cnt = 0;
2754
2755 out_sg_free:
2756         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2757         cmd->sgv = NULL;
2758         cmd->sg = NULL;
2759         cmd->sg_cnt = 0;
2760         goto out;
2761 }
2762
2763 static void scst_release_space(struct scst_cmd *cmd)
2764 {
2765         TRACE_ENTRY();
2766
2767         if (cmd->sgv == NULL)
2768                 goto out;
2769
2770         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2771                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2772                 goto out;
2773         }
2774
2775         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2776         cmd->sgv = NULL;
2777         cmd->sg_cnt = 0;
2778         cmd->sg = NULL;
2779         cmd->bufflen = 0;
2780         cmd->data_len = 0;
2781
2782         if (cmd->in_sgv != NULL) {
2783                 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2784                 cmd->in_sgv = NULL;
2785                 cmd->in_sg_cnt = 0;
2786                 cmd->in_sg = NULL;
2787                 cmd->in_bufflen = 0;
2788         }
2789
2790 out:
2791         TRACE_EXIT();
2792         return;
2793 }
2794
2795 #if !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED))
2796
2797 /*
2798  * Can switch to the next dst_sg element, so, to copy to strictly only
2799  * one dst_sg element, it must be either last in the chain, or
2800  * copy_len == dst_sg->length.
2801  */
2802 static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
2803                         size_t *pdst_offs, struct scatterlist *src_sg,
2804                         size_t copy_len,
2805                         enum km_type d_km_type, enum km_type s_km_type)
2806 {
2807         int res = 0;
2808         struct scatterlist *dst_sg;
2809         size_t src_len, dst_len, src_offs, dst_offs;
2810         struct page *src_page, *dst_page;
2811
2812         dst_sg = *pdst_sg;
2813         dst_len = *pdst_len;
2814         dst_offs = *pdst_offs;
2815         dst_page = sg_page(dst_sg);
2816
2817         src_page = sg_page(src_sg);
2818         src_len = src_sg->length;
2819         src_offs = src_sg->offset;
2820
2821         do {
2822                 void *saddr, *daddr;
2823                 size_t n;
2824
2825                 saddr = kmap_atomic(src_page +
2826                                          (src_offs >> PAGE_SHIFT), s_km_type) +
2827                                     (src_offs & ~PAGE_MASK);
2828                 daddr = kmap_atomic(dst_page +
2829                                         (dst_offs >> PAGE_SHIFT), d_km_type) +
2830                                     (dst_offs & ~PAGE_MASK);
2831
2832                 if (((src_offs & ~PAGE_MASK) == 0) &&
2833                     ((dst_offs & ~PAGE_MASK) == 0) &&
2834                     (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
2835                     (copy_len >= PAGE_SIZE)) {
2836                         copy_page(daddr, saddr);
2837                         n = PAGE_SIZE;
2838                 } else {
2839                         n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
2840                                           PAGE_SIZE - (src_offs & ~PAGE_MASK));
2841                         n = min(n, src_len);
2842                         n = min(n, dst_len);
2843                         n = min_t(size_t, n, copy_len);
2844                         memcpy(daddr, saddr, n);
2845                 }
2846                 dst_offs += n;
2847                 src_offs += n;
2848
2849                 kunmap_atomic(saddr, s_km_type);
2850                 kunmap_atomic(daddr, d_km_type);
2851
2852                 res += n;
2853                 copy_len -= n;
2854                 if (copy_len == 0)
2855                         goto out;
2856
2857                 src_len -= n;
2858                 dst_len -= n;
2859                 if (dst_len == 0) {
2860                         dst_sg = sg_next(dst_sg);
2861                         if (dst_sg == NULL)
2862                                 goto out;
2863                         dst_page = sg_page(dst_sg);
2864                         dst_len = dst_sg->length;
2865                         dst_offs = dst_sg->offset;
2866                 }
2867         } while (src_len > 0);
2868
2869 out:
2870         *pdst_sg = dst_sg;
2871         *pdst_len = dst_len;
2872         *pdst_offs = dst_offs;
2873         return res;
2874 }
2875
2876 /**
2877  * sg_copy - copy one SG vector to another
2878  * @dst_sg:     destination SG
2879  * @src_sg:     source SG
2880  * @nents_to_copy: maximum number of entries to copy
2881  * @copy_len:   maximum amount of data to copy. If 0, then copy all.
2882  * @d_km_type:  kmap_atomic type for the destination SG
2883  * @s_km_type:  kmap_atomic type for the source SG
2884  *
2885  * Description:
2886  *    Data from the source SG vector will be copied to the destination SG
2887  *    vector. End of the vectors will be determined by sg_next() returning
2888  *    NULL. Returns number of bytes copied.
2889  */
2890 static int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
2891             int nents_to_copy, size_t copy_len,
2892             enum km_type d_km_type, enum km_type s_km_type)
2893 {
2894         int res = 0;
2895         size_t dst_len, dst_offs;
2896
2897         if (copy_len == 0)
2898                 copy_len = 0x7FFFFFFF; /* copy all */
2899
2900         if (nents_to_copy == 0)
2901                 nents_to_copy = 0x7FFFFFFF; /* copy all */
2902
2903         dst_len = dst_sg->length;
2904         dst_offs = dst_sg->offset;
2905
2906         do {
2907                 int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
2908                                 src_sg, copy_len, d_km_type, s_km_type);
2909                 copy_len -= copied;
2910                 res += copied;
2911                 if ((copy_len == 0) || (dst_sg == NULL))
2912                         goto out;
2913
2914                 nents_to_copy--;
2915                 if (nents_to_copy == 0)
2916                         goto out;
2917
2918                 src_sg = sg_next(src_sg);
2919         } while (src_sg != NULL);
2920
2921 out:
2922         return res;
2923 }
2924
2925 #endif /* !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
2926
2927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) && !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED))
2928
2929 #include <linux/pfn.h>
2930
2931 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
2932 static inline int object_is_on_stack(void *obj)
2933 {
2934         void *stack = task_stack_page(current);
2935
2936         return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2937 }
2938 #endif
2939
2940 struct blk_kern_sg_work {
2941         atomic_t bios_inflight;
2942         struct sg_table sg_table;
2943         struct scatterlist *src_sgl;
2944 };
2945
2946 static void blk_rq_unmap_kern_sg(struct request *rq, int err);
2947
2948 static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
2949 {
2950         TRACE_DBG("Freeing bw %p", bw);
2951         sg_free_table(&bw->sg_table);
2952         kfree(bw);
2953         return;
2954 }
2955
2956 static void blk_bio_map_kern_endio(struct bio *bio, int err)
2957 {
2958         struct blk_kern_sg_work *bw = bio->bi_private;
2959
2960         TRACE_DBG("bio %p finished", bio);
2961
2962         if (bw != NULL) {
2963                 /* Decrement the bios in processing and, if zero, free */
2964                 BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
2965                 if (atomic_dec_and_test(&bw->bios_inflight)) {
2966                         TRACE_DBG("sgl %p, new_sgl %p, new_sgl_nents %d",
2967                                 bw->src_sgl, bw->sg_table.sgl,
2968                                 bw->sg_table.nents);
2969                         if ((bio_data_dir(bio) == READ) && (err == 0)) {
2970                                 unsigned long flags;
2971
2972                                 TRACE_DBG("Copying sgl %p (nents %d) to "
2973                                         "orig_sgl %p", bw->sg_table.sgl,
2974                                         bw->sg_table.nents, bw->src_sgl);
2975
2976                                 local_irq_save(flags);  /* to protect KMs */
2977                                 sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
2978                                         KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
2979                                 local_irq_restore(flags);
2980                         }
2981                         blk_free_kern_sg_work(bw);
2982                 }
2983         }
2984
2985         bio_put(bio);
2986         return;
2987 }
2988
2989 static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
2990                                int nents, struct blk_kern_sg_work **pbw,
2991                                gfp_t gfp, gfp_t page_gfp)
2992 {
2993         int res = 0, i;
2994         struct scatterlist *sg;
2995         struct scatterlist *new_sgl;
2996         int new_sgl_nents;
2997         size_t len = 0, to_copy;
2998         struct blk_kern_sg_work *bw;
2999
3000         bw = kzalloc(sizeof(*bw), gfp);
3001         if (bw == NULL) {
3002                 PRINT_ERROR("%s", "Unable to alloc blk_kern_sg_work");
3003                 goto out;
3004         }
3005
3006         bw->src_sgl = sgl;
3007
3008         for_each_sg(sgl, sg, nents, i)
3009                 len += sg->length;
3010         to_copy = len;
3011
3012         new_sgl_nents = PFN_UP(len);
3013
3014         res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
3015         if (res != 0) {
3016                 PRINT_ERROR("Unable to alloc copy sg table (nents %d)",
3017                         new_sgl_nents);
3018                 goto out_free_bw;
3019         }
3020
3021         new_sgl = bw->sg_table.sgl;
3022
3023         TRACE_DBG("sgl %p, nents %d, to_copy %lld, new_sgl %p, new_sgl_nents %d",
3024                 sgl, nents, (long long)to_copy, new_sgl, new_sgl_nents);
3025
3026         for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3027                 struct page *pg;
3028
3029                 pg = alloc_page(page_gfp);
3030                 if (pg == NULL) {
3031                         PRINT_ERROR("Unable to alloc copy page (left %lld)",
3032                                 (long long)len);
3033                         goto err_free_new_sgl;
3034                 }
3035
3036                 sg_assign_page(sg, pg);
3037                 sg->length = min_t(size_t, PAGE_SIZE, len);
3038
3039                 len -= PAGE_SIZE;
3040         }
3041
3042         if (rq_data_dir(rq) == WRITE) {
3043                 /*
3044                  * We need to limit amount of copied data to to_copy, because
3045                  * sgl might have the last element in sgl not marked as last in
3046                  * SG chaining.
3047                  */
3048                 TRACE_DBG("Copying sgl %p (nents %d) to new_sgl %p "
3049                         "(new_sgl_nents %d), to_copy %lld", sgl, nents,
3050                         new_sgl, new_sgl_nents, (long long)to_copy);
3051                 sg_copy(new_sgl, sgl, 0, to_copy,
3052                         KM_USER0, KM_USER1);
3053         }
3054
3055         *pbw = bw;
3056         /*
3057          * REQ_COPY_USER name is misleading. It should be something like
3058          * REQ_HAS_TAIL_SPACE_FOR_PADDING.
3059          */
3060         rq->cmd_flags |= REQ_COPY_USER;
3061
3062 out:
3063         return res;
3064
3065 err_free_new_sgl:
3066         for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3067                 struct page *pg = sg_page(sg);
3068                 if (pg == NULL)
3069                         break;
3070                 __free_page(pg);
3071         }
3072         sg_free_table(&bw->sg_table);
3073
3074 out_free_bw:
3075         kfree(bw);
3076         res = -ENOMEM;
3077         goto out;
3078 }
3079
3080 static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3081         int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
3082 {
3083         int res = 0;
3084         struct request_queue *q = rq->q;
3085         int rw = rq_data_dir(rq);
3086         int max_nr_vecs, i;
3087         size_t tot_len;
3088         bool need_new_bio;
3089         struct scatterlist *sg, *prev_sg = NULL;
3090         struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
3091         int bios;
3092
3093         if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
3094                 WARN_ON(1);
3095                 res = -EINVAL;
3096                 goto out;
3097         }
3098
3099         /*
3100          * Let's keep each bio allocation inside a single page to decrease
3101          * probability of failure.
3102          */
3103         max_nr_vecs =  min_t(size_t,
3104                 ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
3105                 BIO_MAX_PAGES);
3106
3107         TRACE_DBG("sgl %p, nents %d, bw %p, max_nr_vecs %d", sgl, nents, bw,
3108                 max_nr_vecs);
3109
3110         need_new_bio = true;
3111         tot_len = 0;
3112         bios = 0;
3113         for_each_sg(sgl, sg, nents, i) {
3114                 struct page *page = sg_page(sg);
3115                 void *page_addr = page_address(page);
3116                 size_t len = sg->length, l;
3117                 size_t offset = sg->offset;
3118
3119                 tot_len += len;
3120                 prev_sg = sg;
3121
3122                 /*
3123                  * Each segment must be aligned on DMA boundary and
3124                  * not on stack. The last one may have unaligned
3125                  * length as long as the total length is aligned to
3126                  * DMA padding alignment.
3127                  */
3128                 if (i == nents - 1)
3129                         l = 0;
3130                 else
3131                         l = len;
3132                 if (((sg->offset | l) & queue_dma_alignment(q)) ||
3133                     (page_addr && object_is_on_stack(page_addr + sg->offset))) {
3134                         TRACE_DBG("%s", "DMA alignment or offset don't match");
3135                         res = -EINVAL;
3136                         goto out_free_bios;
3137                 }
3138
3139                 while (len > 0) {
3140                         size_t bytes;
3141                         int rc;
3142
3143                         if (need_new_bio) {
3144 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
3145                                 bio = bio_kmalloc(gfp, max_nr_vecs);
3146 #else
3147                                 bio = bio_alloc(gfp, max_nr_vecs);
3148 #endif
3149                                 if (bio == NULL) {
3150                                         PRINT_ERROR("%s", "Can't to alloc bio");
3151                                         res = -ENOMEM;
3152                                         goto out_free_bios;
3153                                 }
3154
3155                                 TRACE_DBG("bio %p alloced", bio);
3156
3157                                 if (rw == WRITE)
3158                                         bio->bi_rw |= 1 << BIO_RW;
3159
3160                                 bios++;
3161                                 bio->bi_private = bw;
3162                                 bio->bi_end_io = blk_bio_map_kern_endio;
3163
3164                                 if (hbio == NULL)
3165                                         hbio = tbio = bio;
3166                                 else
3167                                         tbio = tbio->bi_next = bio;
3168                         }
3169
3170                         bytes = min_t(size_t, len, PAGE_SIZE - offset);
3171
3172                         rc = bio_add_pc_page(q, bio, page, bytes, offset);
3173                         if (rc < bytes) {
3174                                 if (unlikely(need_new_bio || (rc < 0))) {
3175                                         if (rc < 0)
3176                                                 res = rc;
3177                                         else
3178                                                 res = -EIO;
3179                                         PRINT_ERROR("bio_add_pc_page() failed: "
3180                                                 "%d", rc);
3181                                         goto out_free_bios;
3182                                 } else {
3183                                         need_new_bio = true;
3184                                         len -= rc;
3185                                         offset += rc;
3186                                         continue;
3187                                 }
3188                         }
3189
3190                         need_new_bio = false;
3191                         offset = 0;
3192                         len -= bytes;
3193                         page = nth_page(page, 1);
3194                 }
3195         }
3196
3197         if (hbio == NULL) {
3198                 res = -EINVAL;
3199                 goto out_free_bios;
3200         }
3201
3202         /* Total length must be aligned on DMA padding alignment */
3203         if ((tot_len & q->dma_pad_mask) &&
3204             !(rq->cmd_flags & REQ_COPY_USER)) {
3205                 TRACE_DBG("Total len %lld doesn't match DMA pad mask %x",
3206                         (long long)tot_len, q->dma_pad_mask);
3207                 res = -EINVAL;
3208                 goto out_free_bios;
3209         }
3210
3211         if (bw != NULL)
3212                 atomic_set(&bw->bios_inflight, bios);
3213
3214         while (hbio != NULL) {
3215                 bio = hbio;
3216                 hbio = hbio->bi_next;
3217                 bio->bi_next = NULL;
3218
3219                 blk_queue_bounce(q, &bio);
3220
3221                 res = blk_rq_append_bio(q, rq, bio);
3222                 if (unlikely(res != 0)) {
3223                         PRINT_ERROR("blk_rq_append_bio() failed: %d", res);
3224                         bio->bi_next = hbio;
3225                         hbio = bio;
3226                         /* We can have one or more bios bounced */
3227                         goto out_unmap_bios;
3228                 }
3229         }
3230
3231         rq->buffer = rq->data = NULL;
3232 out:
3233         return res;
3234
3235 out_free_bios:
3236         while (hbio != NULL) {
3237                 bio = hbio;
3238                 hbio = hbio->bi_next;
3239                 bio_put(bio);
3240         }
3241         goto out;
3242
3243 out_unmap_bios:
3244         blk_rq_unmap_kern_sg(rq, res);
3245         goto out;
3246 }
3247
3248 /**
3249  * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
3250  * @rq:         request to fill
3251  * @sgl:        area to map
3252  * @nents:      number of elements in @sgl
3253  * @gfp:        memory allocation flags
3254  *
3255  * Description:
3256  *    Data will be mapped directly if possible. Otherwise a bounce
3257  *    buffer will be used.
3258  */
3259 static int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3260                        int nents, gfp_t gfp)
3261 {
3262         int res;
3263
3264         res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
3265         if (unlikely(res != 0)) {
3266                 struct blk_kern_sg_work *bw = NULL;
3267
3268                 TRACE_DBG("__blk_rq_map_kern_sg() failed: %d", res);
3269
3270                 res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
3271                                 gfp, rq->q->bounce_gfp | gfp);
3272                 if (unlikely(res != 0))
3273                         goto out;
3274
3275                 res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
3276                                 bw->sg_table.nents, bw, gfp);
3277                 if (res != 0) {
3278                         TRACE_DBG("Copied __blk_rq_map_kern_sg() failed: %d",
3279                                 res);
3280                         blk_free_kern_sg_work(bw);
3281                         goto out;
3282                 }
3283         }
3284
3285         rq->buffer = rq->data = NULL;
3286
3287 out:
3288         return res;
3289 }
3290
3291 /**
3292  * blk_rq_unmap_kern_sg - unmap a request with kernel sg
3293  * @rq:         request to unmap
3294  * @err:        non-zero error code
3295  *
3296  * Description:
3297  *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
3298  *    only in case of an error!
3299  */
3300 static void blk_rq_unmap_kern_sg(struct request *rq, int err)
3301 {
3302         struct bio *bio = rq->bio;
3303
3304         while (bio) {
3305                 struct bio *b = bio;
3306                 bio = bio->bi_next;
3307                 b->bi_end_io(b, err);
3308         }
3309         rq->bio = NULL;
3310
3311         return;
3312 }
3313
3314 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) && !(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
3315
3316 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
3317
3318 static void scsi_end_async(struct request *req, int error)
3319 {
3320         struct scsi_io_context *sioc = req->end_io_data;
3321
3322         TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
3323
3324         if (sioc->done)
3325                 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
3326
3327         if (!sioc->full_cdb_used)
3328                 kmem_cache_free(scsi_io_context_cache, sioc);
3329         else
3330                 kfree(sioc);
3331
3332         __blk_put_request(req->q, req);
3333         return;
3334 }
3335
3336 /**
3337  * scst_scsi_exec_async - executes a SCSI command in pass-through mode
3338  * @cmd:        scst command
3339  * @done:       callback function when done
3340  */
3341 int scst_scsi_exec_async(struct scst_cmd *cmd,
3342                        void (*done)(void *, char *, int, int))
3343 {
3344         int res = 0;
3345         struct request_queue *q = cmd->dev->scsi_dev->request_queue;
3346         struct request *rq;
3347         struct scsi_io_context *sioc;
3348         int write = (cmd->data_direction & SCST_DATA_WRITE) ? WRITE : READ;
3349         gfp_t gfp = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
3350         int cmd_len = cmd->cdb_len;
3351
3352         if (cmd->ext_cdb_len == 0) {
3353                 TRACE_DBG("Simple CDB (cmd_len %d)", cmd_len);
3354                 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
3355                 if (sioc == NULL) {
3356                         res = -ENOMEM;
3357                         goto out;
3358                 }
3359         } else {
3360                 cmd_len += cmd->ext_cdb_len;
3361
3362                 TRACE_DBG("Extended CDB (cmd_len %d)", cmd_len);
3363
3364                 sioc = kzalloc(sizeof(*sioc) + cmd_len, gfp);
3365                 if (sioc == NULL) {
3366                         res = -ENOMEM;
3367                         goto out;
3368                 }
3369
3370                 sioc->full_cdb_used = 1;
3371
3372                 memcpy(sioc->full_cdb, cmd->cdb, cmd->cdb_len);
3373                 memcpy(&sioc->full_cdb[cmd->cdb_len], cmd->ext_cdb,
3374                         cmd->ext_cdb_len);
3375         }
3376
3377         rq = blk_get_request(q, write, gfp);
3378         if (rq == NULL) {
3379                 res = -ENOMEM;
3380                 goto out_free_sioc;
3381         }
3382
3383         rq->cmd_type = REQ_TYPE_BLOCK_PC;
3384         rq->cmd_flags |= REQ_QUIET;
3385
3386         if (cmd->sg != NULL) {
3387                 res = blk_rq_map_kern_sg(rq, cmd->sg, cmd->sg_cnt, gfp);
3388                 if (res) {
3389                         TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
3390                         goto out_free_rq;
3391                 }
3392         }
3393
3394         if (cmd->data_direction  == SCST_DATA_BIDI) {
3395                 struct request *next_rq;
3396
3397                 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
3398                         res = -EOPNOTSUPP;
3399                         goto out_free_unmap;
3400                 }
3401
3402                 next_rq = blk_get_request(q, READ, gfp);
3403                 if (next_rq == NULL) {
3404                         res = -ENOMEM;
3405                         goto out_free_unmap;
3406                 }
3407                 rq->next_rq = next_rq;
3408                 next_rq->cmd_type = rq->cmd_type;
3409
3410                 res = blk_rq_map_kern_sg(next_rq, cmd->in_sg,
3411                         cmd->in_sg_cnt, gfp);
3412                 if (res != 0)
3413                         goto out_free_unmap;
3414         }
3415
3416         TRACE_DBG("sioc %p, cmd %p", sioc, cmd);
3417
3418         sioc->data = cmd;
3419         sioc->done = done;
3420
3421         rq->cmd_len = cmd_len;
3422         if (cmd->ext_cdb_len == 0) {
3423                 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
3424                 memcpy(rq->cmd, cmd->cdb, cmd->cdb_len);
3425         } else
3426                 rq->cmd = sioc->full_cdb;
3427
3428         rq->sense = sioc->sense;
3429         rq->sense_len = sizeof(sioc->sense);
3430         rq->timeout = cmd->timeout;
3431         rq->retries = cmd->retries;
3432         rq->end_io_data = sioc;
3433
3434         blk_execute_rq_nowait(rq->q, NULL, rq,
3435                 (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE), scsi_end_async);
3436 out:
3437         return res;
3438
3439 out_free_unmap:
3440         if (rq->next_rq != NULL) {
3441                 blk_put_request(rq->next_rq);
3442                 rq->next_rq = NULL;
3443         }
3444         blk_rq_unmap_kern_sg(rq, res);
3445
3446 out_free_rq:
3447         blk_put_request(rq);
3448
3449 out_free_sioc:
3450         if (!sioc->full_cdb_used)
3451                 kmem_cache_free(scsi_io_context_cache, sioc);
3452         else
3453                 kfree(sioc);
3454         goto out;
3455 }
3456
3457 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) */
3458
3459 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
3460 {
3461         struct scatterlist *src_sg, *dst_sg;
3462         unsigned int to_copy;
3463         int atomic = scst_cmd_atomic(cmd);
3464
3465         TRACE_ENTRY();
3466
3467         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
3468                 if (cmd->data_direction != SCST_DATA_BIDI) {
3469                         src_sg = cmd->tgt_sg;
3470                         dst_sg = cmd->sg;
3471                         to_copy = cmd->bufflen;
3472                 } else {
3473                         TRACE_MEM("BIDI cmd %p", cmd);
3474                         src_sg = cmd->tgt_in_sg;
3475                         dst_sg = cmd->in_sg;
3476                         to_copy = cmd->in_bufflen;
3477                 }
3478         } else {
3479                 src_sg = cmd->sg;
3480                 dst_sg = cmd->tgt_sg;
3481                 to_copy = cmd->resp_data_len;
3482         }
3483
3484         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, to_copy %lld",
3485                 cmd, copy_dir, src_sg, dst_sg, (long long)to_copy);
3486
3487         if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
3488                 /*
3489                  * It can happened, e.g., with scst_user for cmd with delay
3490                  * alloc, which failed with Check Condition.
3491                  */
3492                 goto out;
3493         }
3494
3495         sg_copy(dst_sg, src_sg, 0, to_copy,
3496                 atomic ? KM_SOFTIRQ0 : KM_USER0,
3497                 atomic ? KM_SOFTIRQ1 : KM_USER1);
3498
3499 out:
3500         TRACE_EXIT();
3501         return;
3502 }
3503
3504 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
3505
3506 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
3507 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
3508
3509 int scst_get_cdb_len(const uint8_t *cdb)
3510 {
3511         return SCST_GET_CDB_LEN(cdb[0]);
3512 }
3513
3514 /* get_trans_len_x extract x bytes from cdb as length starting from off */
3515
3516 static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
3517 {
3518         cmd->cdb_len = 10;
3519         cmd->bufflen = 0;
3520         return 0;
3521 }
3522
3523 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
3524 {
3525         cmd->bufflen = 6;
3526         return 0;
3527 }
3528
3529 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
3530 {
3531         cmd->bufflen = READ_CAP_LEN;
3532         return 0;
3533 }
3534
3535 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
3536 {
3537         int res = 0;
3538
3539         TRACE_ENTRY();
3540
3541         if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
3542                 cmd->op_name = "READ CAPACITY(16)";
3543                 cmd->bufflen = READ_CAP16_LEN;
3544                 cmd->op_flags |= SCST_IMPLICIT_HQ;
3545         } else
3546                 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
3547
3548         TRACE_EXIT_RES(res);
3549         return res;
3550 }
3551
3552 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
3553 {
3554         cmd->bufflen = 1;
3555         return 0;
3556 }
3557
3558 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
3559 {
3560         uint8_t *p = (uint8_t *)cmd->cdb + off;
3561         int res = 0;
3562
3563         cmd->bufflen = 0;
3564         cmd->bufflen |= ((u32)p[0]) << 8;
3565         cmd->bufflen |= ((u32)p[1]);
3566
3567         switch (cmd->cdb[1] & 0x1f) {
3568         case 0:
3569         case 1:
3570         case 6:
3571                 if (cmd->bufflen != 0) {
3572                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
3573                                 "allocation length for service action %x",
3574                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
3575                         goto out_inval;
3576                 }
3577                 break;
3578         }
3579
3580         switch (cmd->cdb[1] & 0x1f) {
3581         case 0:
3582         case 1:
3583                 cmd->bufflen = 20;
3584                 break;
3585         case 6:
3586                 cmd->bufflen = 32;
3587                 break;
3588         case 8:
3589                 cmd->bufflen = max(28, cmd->bufflen);
3590                 break;
3591         default:
3592                 PRINT_ERROR("READ POSITION: Invalid service action %x",
3593                         cmd->cdb[1] & 0x1f);
3594                 goto out_inval;
3595         }
3596
3597 out:
3598         return res;
3599
3600 out_inval:
3601         scst_set_cmd_error(cmd,
3602                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
3603         res = 1;
3604         goto out;
3605 }
3606
3607 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
3608 {
3609         cmd->bufflen = (u32)cmd->cdb[off];
3610         return 0;
3611 }
3612
3613 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
3614 {
3615         cmd->bufflen = (u32)cmd->cdb[off];
3616         if (cmd->bufflen == 0)
3617                 cmd->bufflen = 256;
3618         return 0;
3619 }
3620
3621 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
3622 {
3623         const uint8_t *p = cmd->cdb + off;
3624
3625         cmd->bufflen = 0;
3626         cmd->bufflen |= ((u32)p[0]) << 8;
3627         cmd->bufflen |= ((u32)p[1]);
3628
3629         return 0;
3630 }
3631
3632 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
3633 {
3634         const uint8_t *p = cmd->cdb + off;
3635
3636         cmd->bufflen = 0;
3637         cmd->bufflen |= ((u32)p[0]) << 16;
3638         cmd->bufflen |= ((u32)p[1]) << 8;
3639         cmd->bufflen |= ((u32)p[2]);
3640
3641         return 0;
3642 }
3643
3644 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
3645 {
3646         const uint8_t *p = cmd->cdb + off;
3647
3648         cmd->bufflen = 0;
3649         cmd->bufflen |= ((u32)p[0]) << 24;
3650         cmd->bufflen |= ((u32)p[1]) << 16;
3651         cmd->bufflen |= ((u32)p[2]) << 8;
3652         cmd->bufflen |= ((u32)p[3]);
3653
3654         return 0;
3655 }
3656
3657 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
3658 {
3659         cmd->bufflen = 0;
3660         return 0;
3661 }
3662
3663 int scst_get_cdb_info(struct scst_cmd *cmd)
3664 {
3665         int dev_type = cmd->dev->type;
3666         int i, res = 0;
3667         uint8_t op;
3668         const struct scst_sdbops *ptr = NULL;
3669
3670         TRACE_ENTRY();
3671
3672         op = cmd->cdb[0];       /* get clear opcode */
3673
3674         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
3675                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
3676                 dev_type);
3677
3678         i = scst_scsi_op_list[op];
3679         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
3680                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
3681                         ptr = &scst_scsi_op_table[i];
3682                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
3683                               ptr->ops, ptr->devkey[0], /* disk     */
3684                               ptr->devkey[1],   /* tape     */
3685                               ptr->devkey[2],   /* printer */
3686                               ptr->devkey[3],   /* cpu      */
3687                               ptr->devkey[4],   /* cdr      */
3688                               ptr->devkey[5],   /* cdrom    */
3689                               ptr->devkey[6],   /* scanner */
3690                               ptr->devkey[7],   /* worm     */
3691                               ptr->devkey[8],   /* changer */
3692                               ptr->devkey[9],   /* commdev */
3693                               ptr->op_name);
3694                         TRACE_DBG("direction=%d flags=%d off=%d",
3695                               ptr->direction,
3696                               ptr->flags,
3697                               ptr->off);
3698                         break;
3699                 }
3700                 i++;
3701         }
3702
3703         if (unlikely(ptr == NULL)) {
3704                 /* opcode not found or now not used !!! */
3705                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
3706                       dev_type);
3707                 res = -1;
3708                 cmd->op_flags = SCST_INFO_NOT_FOUND;
3709                 goto out;
3710         }
3711
3712         cmd->cdb_len = SCST_GET_CDB_LEN(op);
3713         cmd->op_name = ptr->op_name;
3714         cmd->data_direction = ptr->direction;
3715         cmd->op_flags = ptr->flags;
3716         res = (*ptr->get_trans_len)(cmd, ptr->off);
3717
3718 out:
3719         TRACE_EXIT_RES(res);
3720         return res;
3721 }
3722 EXPORT_SYMBOL(scst_get_cdb_info);
3723
3724 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
3725 uint64_t scst_pack_lun(const uint64_t lun)
3726 {
3727         uint64_t res;
3728         uint16_t *p = (uint16_t *)&res;
3729
3730         res = lun;
3731         *p = cpu_to_be16(*p);
3732
3733         TRACE_EXIT_HRES((unsigned long)res);
3734         return res;
3735 }
3736
3737 /*
3738  * Routine to extract a lun number from an 8-byte LUN structure
3739  * in network byte order (BE).
3740  * (see SAM-2, Section 4.12.3 page 40)
3741  * Supports 2 types of lun unpacking: peripheral and logical unit.
3742  */
3743 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
3744 {
3745         uint64_t res = NO_SUCH_LUN;
3746         int address_method;
3747
3748         TRACE_ENTRY();
3749
3750         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
3751
3752         if (unlikely(len < 2)) {
3753                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
3754                         "more", len);
3755                 goto out;
3756         }
3757
3758         if (len > 2) {
3759                 switch (len) {
3760                 case 8:
3761                         if ((*((uint64_t *)lun) &
3762                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
3763                                 goto out_err;
3764                         break;
3765                 case 4:
3766                         if (*((uint16_t *)&lun[2]) != 0)
3767                                 goto out_err;
3768                         break;
3769                 case 6:
3770                         if (*((uint32_t *)&lun[2]) != 0)
3771                                 goto out_err;
3772                         break;
3773                 default:
3774                         goto out_err;
3775                 }
3776         }
3777
3778         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
3779         switch (address_method) {
3780         case 0: /* peripheral device addressing method */
3781 #if 0
3782                 if (*lun) {
3783                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
3784                              "peripheral device addressing method 0x%02x, "
3785                              "expected 0", *lun);
3786                         break;
3787                 }
3788                 res = *(lun + 1);
3789                 break;
3790 #else
3791                 /*
3792                  * Looks like it's legal to use it as flat space addressing
3793                  * method as well
3794                  */
3795
3796                 /* go through */
3797 #endif
3798
3799         case 1: /* flat space addressing method */
3800                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
3801                 break;
3802
3803         case 2: /* logical unit addressing method */
3804                 if (*lun & 0x3f) {
<