d1d5c4d44451f2d8c7f928596afaa0fac5028c87
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2009 ID7 Ltd.
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <asm/kmap_types.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #include "scst_cdbprobe.h"
37
38 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
39 static void scst_check_internal_sense(struct scst_device *dev, int result,
40         uint8_t *sense, int sense_len);
41 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
42         int flags);
43 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
44         const uint8_t *sense, int sense_len, int flags);
45 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
46         const uint8_t *sense, int sense_len, int flags);
47 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
48 static void scst_release_space(struct scst_cmd *cmd);
49 static void scst_sess_free_tgt_devs(struct scst_session *sess);
50 static void scst_unblock_cmds(struct scst_device *dev);
51 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
52 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
53         struct scst_acg_dev *acg_dev);
54
55 #ifdef CONFIG_SCST_DEBUG_TM
56 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
57         struct scst_acg_dev *acg_dev);
58 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
59 #else
60 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
61         struct scst_acg_dev *acg_dev) {}
62 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
63 #endif /* CONFIG_SCST_DEBUG_TM */
64
65 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
66 {
67         int res = 0;
68         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
69
70         TRACE_ENTRY();
71
72         if (cmd->sense != NULL)
73                 goto memzero;
74
75         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
76         if (cmd->sense == NULL) {
77                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
78                         "The sense data will be lost!!", cmd->cdb[0]);
79                 res = -ENOMEM;
80                 goto out;
81         }
82
83 memzero:
84         cmd->sense_bufflen = SCST_SENSE_BUFFERSIZE;
85         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
86
87 out:
88         TRACE_EXIT_RES(res);
89         return res;
90 }
91 EXPORT_SYMBOL(scst_alloc_sense);
92
93 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
94         const uint8_t *sense, unsigned int len)
95 {
96         int res;
97
98         TRACE_ENTRY();
99
100         res = scst_alloc_sense(cmd, atomic);
101         if (res != 0) {
102                 PRINT_BUFFER("Lost sense", sense, len);
103                 goto out;
104         }
105
106         memcpy(cmd->sense, sense, min((int)len, (int)cmd->sense_bufflen));
107         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
108
109 out:
110         TRACE_EXIT_RES(res);
111         return res;
112 }
113 EXPORT_SYMBOL(scst_alloc_set_sense);
114
115 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
116 {
117         TRACE_ENTRY();
118
119         cmd->status = status;
120         cmd->host_status = DID_OK;
121
122         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
123         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
124
125         cmd->data_direction = SCST_DATA_NONE;
126         cmd->resp_data_len = 0;
127         cmd->is_send_status = 1;
128
129         cmd->completed = 1;
130
131         TRACE_EXIT();
132         return;
133 }
134 EXPORT_SYMBOL(scst_set_cmd_error_status);
135
136 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
137 {
138         int rc;
139
140         TRACE_ENTRY();
141
142         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
143
144         rc = scst_alloc_sense(cmd, 1);
145         if (rc != 0) {
146                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
147                         key, asc, ascq);
148                 goto out;
149         }
150
151         scst_set_sense(cmd->sense, cmd->sense_bufflen,
152                 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
153         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
154
155 out:
156         TRACE_EXIT();
157         return;
158 }
159 EXPORT_SYMBOL(scst_set_cmd_error);
160
161 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
162         int key, int asc, int ascq)
163 {
164         sBUG_ON(len == 0);
165
166         memset(buffer, 0, len);
167
168         if (d_sense) {
169                 /* Descriptor format */
170                 if (len < 4) {
171                         PRINT_ERROR("Length %d of sense buffer too small to "
172                                 "fit sense %x:%x:%x", len, key, asc, ascq);
173                 }
174
175                 buffer[0] = 0x72;               /* Response Code        */
176                 if (len > 1)
177                         buffer[1] = key;        /* Sense Key            */
178                 if (len > 2)
179                         buffer[2] = asc;        /* ASC                  */
180                 if (len > 3)
181                         buffer[3] = ascq;       /* ASCQ                 */
182         } else {
183                 /* Fixed format */
184                 if (len < 14) {
185                         PRINT_ERROR("Length %d of sense buffer too small to "
186                                 "fit sense %x:%x:%x", len, key, asc, ascq);
187                 }
188
189                 buffer[0] = 0x70;               /* Response Code        */
190                 if (len > 2)
191                         buffer[2] = key;        /* Sense Key            */
192                 if (len > 7)
193                         buffer[7] = 0x0a;       /* Additional Sense Length */
194                 if (len > 12)
195                         buffer[12] = asc;       /* ASC                  */
196                 if (len > 13)
197                         buffer[13] = ascq;      /* ASCQ                 */
198         }
199
200         TRACE_BUFFER("Sense set", buffer, len);
201         return;
202 }
203 EXPORT_SYMBOL(scst_set_sense);
204
205 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
206         int key, int asc, int ascq)
207 {
208         bool res = false;
209
210         /* Response Code */
211         if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
212                 /* Fixed format */
213
214                 if (len < 14) {
215                         PRINT_ERROR("Sense too small to analyze (%d, "
216                                 "type fixed)", len);
217                         goto out;
218                 }
219
220                 /* Sense Key */
221                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
222                         goto out;
223
224                 /* ASC */
225                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
226                         goto out;
227
228                 /* ASCQ */
229                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
230                         goto out;
231         } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
232                 /* Descriptor format */
233
234                 if (len < 4) {
235                         PRINT_ERROR("Sense too small to analyze (%d, "
236                                 "type descriptor)", len);
237                         goto out;
238                 }
239
240                 /* Sense Key */
241                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
242                         goto out;
243
244                 /* ASC */
245                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
246                         goto out;
247
248                 /* ASCQ */
249                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
250                         goto out;
251         } else
252                 goto out;
253
254         res = true;
255
256 out:
257         TRACE_EXIT_RES((int)res);
258         return res;
259 }
260 EXPORT_SYMBOL(scst_analyze_sense);
261
262 bool scst_is_ua_sense(const uint8_t *sense, int len)
263 {
264         if (SCST_SENSE_VALID(sense))
265                 return scst_analyze_sense(sense, len,
266                         SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0);
267         else
268                 return false;
269 }
270 EXPORT_SYMBOL(scst_is_ua_sense);
271
272 bool scst_is_ua_global(const uint8_t *sense, int len)
273 {
274         bool res;
275
276         /* Changing it don't forget to change scst_requeue_ua() as well!! */
277
278         if (scst_analyze_sense(sense, len, SCST_SENSE_ALL_VALID,
279                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed)))
280                 res = true;
281         else
282                 res = false;
283
284         return res;
285 }
286
287 void scst_check_convert_sense(struct scst_cmd *cmd)
288 {
289         bool d_sense;
290
291         TRACE_ENTRY();
292
293         if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
294                 goto out;
295
296         d_sense = scst_get_cmd_dev_d_sense(cmd);
297         if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
298                 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
299                         cmd);
300                 if (cmd->sense_bufflen < 14) {
301                         PRINT_ERROR("Sense too small to convert (%d, "
302                                 "type fixed)", cmd->sense_bufflen);
303                         goto out;
304                 }
305                 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
306                         cmd->sense[2], cmd->sense[12], cmd->sense[13]);
307         } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
308                                 (cmd->sense[0] == 0x73))) {
309                 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
310                         cmd);
311                 if (cmd->sense_bufflen < 4) {
312                         PRINT_ERROR("Sense too small to convert (%d, "
313                                 "type descryptor)", cmd->sense_bufflen);
314                         goto out;
315                 }
316                 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
317                         cmd->sense[1], cmd->sense[2], cmd->sense[3]);
318         }
319
320 out:
321         TRACE_EXIT();
322         return;
323 }
324 EXPORT_SYMBOL(scst_check_convert_sense);
325
326 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
327         unsigned int len)
328 {
329         TRACE_ENTRY();
330
331         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
332         scst_alloc_set_sense(cmd, 1, sense, len);
333
334         TRACE_EXIT();
335         return;
336 }
337
338 void scst_set_busy(struct scst_cmd *cmd)
339 {
340         int c = atomic_read(&cmd->sess->sess_cmd_count);
341
342         TRACE_ENTRY();
343
344         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
345                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
346                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
347                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
348                         cmd->sess->initiator_name, c,
349                         cmd->queue_type, cmd->sess->init_phase);
350         } else {
351                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
352                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
353                         "initiator %s (cmds count %d, queue_type %x, "
354                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
355                         cmd->queue_type, cmd->sess->init_phase);
356         }
357
358         TRACE_EXIT();
359         return;
360 }
361 EXPORT_SYMBOL(scst_set_busy);
362
363 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
364 {
365         int i;
366
367         TRACE_ENTRY();
368
369         TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
370                 asc, ascq);
371
372         /* Protect sess_tgt_dev_list_hash */
373         mutex_lock(&scst_mutex);
374
375         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
376                 struct list_head *sess_tgt_dev_list_head =
377                         &sess->sess_tgt_dev_list_hash[i];
378                 struct scst_tgt_dev *tgt_dev;
379
380                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
381                                 sess_tgt_dev_list_entry) {
382                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
383                         if (!list_empty(&tgt_dev->UA_list)) {
384                                 struct scst_tgt_dev_UA *ua;
385
386                                 ua = list_entry(tgt_dev->UA_list.next,
387                                         typeof(*ua), UA_list_entry);
388                                 if (scst_analyze_sense(ua->UA_sense_buffer,
389                                                 sizeof(ua->UA_sense_buffer),
390                                                 SCST_SENSE_ALL_VALID,
391                                                 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
392                                         scst_set_sense(ua->UA_sense_buffer,
393                                                 sizeof(ua->UA_sense_buffer),
394                                                 tgt_dev->dev->d_sense,
395                                                 key, asc, ascq);
396                                 } else
397                                         PRINT_ERROR("%s",
398                                                 "The first UA isn't RESET UA");
399                         } else
400                                 PRINT_ERROR("%s", "There's no RESET UA to "
401                                         "replace");
402                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
403                 }
404         }
405
406         mutex_unlock(&scst_mutex);
407
408         TRACE_EXIT();
409         return;
410 }
411 EXPORT_SYMBOL(scst_set_initial_UA);
412
413 static struct scst_aen *scst_alloc_aen(struct scst_tgt_dev *tgt_dev)
414 {
415         struct scst_aen *aen;
416
417         TRACE_ENTRY();
418
419         aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
420         if (aen == NULL) {
421                 PRINT_ERROR("AEN memory allocation failed. Corresponding "
422                         "event notification will not be performed (initiator "
423                         "%s)", tgt_dev->sess->initiator_name);
424                 goto out;
425         }
426         memset(aen, 0, sizeof(*aen));
427
428         aen->sess = tgt_dev->sess;
429         scst_sess_get(aen->sess);
430
431         aen->lun = scst_pack_lun(tgt_dev->lun);
432
433 out:
434         TRACE_EXIT_HRES((unsigned long)aen);
435         return aen;
436 };
437
438 static void scst_free_aen(struct scst_aen *aen)
439 {
440         TRACE_ENTRY();
441
442         scst_sess_put(aen->sess);
443         mempool_free(aen, scst_aen_mempool);
444
445         TRACE_EXIT();
446         return;
447 };
448
449 /* No locks */
450 void scst_capacity_data_changed(struct scst_device *dev)
451 {
452         struct scst_tgt_dev *tgt_dev;
453         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
454
455         TRACE_ENTRY();
456
457         if (dev->type != TYPE_DISK) {
458                 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
459                         "CHANGED UA", dev->type);
460                 goto out;
461         }
462
463         TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
464
465         mutex_lock(&scst_mutex);
466
467         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
468                             dev_tgt_dev_list_entry) {
469                 struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
470
471                 if (tgtt->report_aen != NULL) {
472                         struct scst_aen *aen;
473                         int rc;
474
475                         aen = scst_alloc_aen(tgt_dev);
476                         if (aen == NULL)
477                                 goto queue_ua;
478
479                         aen->event_fn = SCST_AEN_SCSI;
480                         aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
481                         scst_set_sense(aen->aen_sense, aen->aen_sense_len,
482                                 tgt_dev->dev->d_sense,
483                                 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
484
485                         TRACE_DBG("Calling target's %s report_aen(%p)",
486                                 tgtt->name, aen);
487                         rc = tgtt->report_aen(aen);
488                         TRACE_DBG("Target's %s report_aen(%p) returned %d",
489                                 tgtt->name, aen, rc);
490                         if (rc == SCST_AEN_RES_SUCCESS)
491                                 continue;
492
493                         scst_free_aen(aen);
494                 }
495 queue_ua:
496                 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED UA (tgt_dev %p)",
497                         tgt_dev);
498                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
499                         tgt_dev->dev->d_sense,
500                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
501                 scst_check_set_UA(tgt_dev, sense_buffer,
502                         sizeof(sense_buffer), 0);
503         }
504
505         mutex_unlock(&scst_mutex);
506
507 out:
508         TRACE_EXIT();
509         return;
510 }
511 EXPORT_SYMBOL(scst_capacity_data_changed);
512
513 static inline bool scst_is_report_luns_changed_type(int type)
514 {
515         switch (type) {
516         case TYPE_DISK:
517         case TYPE_TAPE:
518         case TYPE_PRINTER:
519         case TYPE_PROCESSOR:
520         case TYPE_WORM:
521         case TYPE_ROM:
522         case TYPE_SCANNER:
523         case TYPE_MOD:
524         case TYPE_MEDIUM_CHANGER:
525         case TYPE_RAID:
526         case TYPE_ENCLOSURE:
527                 return true;
528         default:
529                 return false;
530         }
531 }
532
533 /* scst_mutex supposed to be held */
534 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
535                                               int flags)
536 {
537         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
538         struct list_head *shead;
539         struct scst_tgt_dev *tgt_dev;
540         int i;
541
542         TRACE_ENTRY();
543
544         TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
545                 "(sess %p)", sess);
546
547         local_bh_disable();
548
549         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
550                 shead = &sess->sess_tgt_dev_list_hash[i];
551
552                 list_for_each_entry(tgt_dev, shead,
553                                 sess_tgt_dev_list_entry) {
554                         /* Lockdep triggers here a false positive.. */
555                         spin_lock(&tgt_dev->tgt_dev_lock);
556                 }
557         }
558
559         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
560                 shead = &sess->sess_tgt_dev_list_hash[i];
561
562                 list_for_each_entry(tgt_dev, shead,
563                                 sess_tgt_dev_list_entry) {
564                         if (!scst_is_report_luns_changed_type(
565                                         tgt_dev->dev->type))
566                                 continue;
567
568                         scst_set_sense(sense_buffer, sizeof(sense_buffer),
569                                 tgt_dev->dev->d_sense,
570                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
571
572                         __scst_check_set_UA(tgt_dev, sense_buffer,
573                                 sizeof(sense_buffer),
574                                 flags | SCST_SET_UA_FLAG_GLOBAL);
575                 }
576         }
577
578         for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
579                 shead = &sess->sess_tgt_dev_list_hash[i];
580
581                 list_for_each_entry_reverse(tgt_dev,
582                                 shead, sess_tgt_dev_list_entry) {
583                         spin_unlock(&tgt_dev->tgt_dev_lock);
584                 }
585         }
586
587         local_bh_enable();
588
589         TRACE_EXIT();
590         return;
591 }
592
593 /* The activity supposed to be suspended and scst_mutex held */
594 static void scst_report_luns_changed_sess(struct scst_session *sess)
595 {
596         int i;
597         struct list_head *shead;
598         struct scst_tgt_dev *tgt_dev;
599         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
600
601         TRACE_ENTRY();
602
603         TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
604
605         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
606                 shead = &sess->sess_tgt_dev_list_hash[i];
607
608                 list_for_each_entry(tgt_dev, shead,
609                                 sess_tgt_dev_list_entry) {
610                         if (scst_is_report_luns_changed_type(
611                                         tgt_dev->dev->type))
612                                 goto found;
613                 }
614         }
615         TRACE_MGMT_DBG("Not found a device capable REPORTED "
616                 "LUNS DATA CHANGED UA (sess %p)", sess);
617         goto out;
618
619 found:
620         if (tgtt->report_aen != NULL) {
621                 struct scst_aen *aen;
622                 int rc;
623
624                 aen = scst_alloc_aen(tgt_dev);
625                 if (aen == NULL)
626                         goto queue_ua;
627
628                 aen->event_fn = SCST_AEN_SCSI;
629                 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
630                 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
631                         tgt_dev->dev->d_sense,
632                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
633
634                 TRACE_DBG("Calling target's %s report_aen(%p)",
635                         tgtt->name, aen);
636                 rc = tgtt->report_aen(aen);
637                 TRACE_DBG("Target's %s report_aen(%p) returned %d",
638                         tgtt->name, aen, rc);
639                 if (rc == SCST_AEN_RES_SUCCESS)
640                         goto out;
641
642                 scst_free_aen(aen);
643         }
644
645 queue_ua:
646         scst_queue_report_luns_changed_UA(sess, 0);
647
648 out:
649         TRACE_EXIT();
650         return;
651 }
652
653 /* The activity supposed to be suspended and scst_mutex held */
654 void scst_report_luns_changed(struct scst_acg *acg)
655 {
656         struct scst_session *sess;
657
658         TRACE_ENTRY();
659
660         TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
661
662         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
663                 scst_report_luns_changed_sess(sess);
664         }
665
666         TRACE_EXIT();
667         return;
668 }
669
670 void scst_aen_done(struct scst_aen *aen)
671 {
672         TRACE_ENTRY();
673
674         TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
675                 aen->event_fn, aen->sess->initiator_name);
676
677         if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
678                 goto out_free;
679
680         if (aen->event_fn != SCST_AEN_SCSI)
681                 goto out_free;
682
683         TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
684                 aen->sess->initiator_name);
685
686         if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
687                         SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
688                                 scst_sense_reported_luns_data_changed))) {
689                 mutex_lock(&scst_mutex);
690                 scst_queue_report_luns_changed_UA(aen->sess,
691                         SCST_SET_UA_FLAG_AT_HEAD);
692                 mutex_unlock(&scst_mutex);
693         } else if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
694                         SCST_SENSE_ALL_VALID,
695                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed))) {
696                 /* tgt_dev might get dead, so we need to reseek it */
697                 struct list_head *shead;
698                 struct scst_tgt_dev *tgt_dev;
699                 uint64_t lun;
700
701                 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
702
703                 mutex_lock(&scst_mutex);
704
705                 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
706                 list_for_each_entry(tgt_dev, shead,
707                                 sess_tgt_dev_list_entry) {
708                         if (tgt_dev->lun == lun) {
709                                 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED "
710                                         "UA (tgt_dev %p)", tgt_dev);
711                                 scst_check_set_UA(tgt_dev, aen->aen_sense,
712                                         aen->aen_sense_len,
713                                         SCST_SET_UA_FLAG_AT_HEAD);
714                                 break;
715                         }
716                 }
717
718                 mutex_unlock(&scst_mutex);
719         } else
720                 PRINT_ERROR("%s", "Unknown SCSI AEN");
721
722 out_free:
723         scst_free_aen(aen);
724
725         TRACE_EXIT();
726         return;
727 }
728 EXPORT_SYMBOL(scst_aen_done);
729
730 void scst_requeue_ua(struct scst_cmd *cmd)
731 {
732         TRACE_ENTRY();
733
734         if (scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
735                         SCST_SENSE_ALL_VALID,
736                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
737                 TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
738                         "for delivery failed cmd %p", cmd);
739                 mutex_lock(&scst_mutex);
740                 scst_queue_report_luns_changed_UA(cmd->sess,
741                         SCST_SET_UA_FLAG_AT_HEAD);
742                 mutex_unlock(&scst_mutex);
743         } else {
744                 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
745                 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
746                         cmd->sense_bufflen, SCST_SET_UA_FLAG_AT_HEAD);
747         }
748
749         TRACE_EXIT();
750         return;
751 }
752
753 /* The activity supposed to be suspended and scst_mutex held */
754 static void scst_check_reassign_sess(struct scst_session *sess)
755 {
756         struct scst_acg *acg, *old_acg;
757         struct scst_acg_dev *acg_dev;
758         int i;
759         struct list_head *shead;
760         struct scst_tgt_dev *tgt_dev;
761         bool luns_changed = false;
762         bool add_failed, something_freed, not_needed_freed = false;
763
764         TRACE_ENTRY();
765
766         TRACE_MGMT_DBG("Checking reassignment for sess %p (initiator %s)",
767                 sess, sess->initiator_name);
768
769         acg = scst_find_acg(sess);
770         if (acg == sess->acg) {
771                 TRACE_MGMT_DBG("No reassignment for sess %p", sess);
772                 goto out;
773         }
774
775         TRACE_MGMT_DBG("sess %p will be reassigned from acg %s to acg %s",
776                 sess, sess->acg->acg_name, acg->acg_name);
777
778         old_acg = sess->acg;
779         sess->acg = NULL; /* to catch implicit dependencies earlier */
780
781 retry_add:
782         add_failed = false;
783         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
784                 unsigned int inq_changed_ua_needed = 0;
785
786                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
787                         shead = &sess->sess_tgt_dev_list_hash[i];
788
789                         list_for_each_entry(tgt_dev, shead,
790                                         sess_tgt_dev_list_entry) {
791                                 if ((tgt_dev->dev == acg_dev->dev) &&
792                                     (tgt_dev->lun == acg_dev->lun) &&
793                                     (tgt_dev->acg_dev->rd_only == acg_dev->rd_only)) {
794                                         TRACE_MGMT_DBG("sess %p: tgt_dev %p for "
795                                                 "LUN %lld stays the same",
796                                                 sess, tgt_dev,
797                                                 (unsigned long long)tgt_dev->lun);
798                                         tgt_dev->acg_dev = acg_dev;
799                                         goto next;
800                                 } else if (tgt_dev->lun == acg_dev->lun)
801                                         inq_changed_ua_needed = 1;
802                         }
803                 }
804
805                 luns_changed = true;
806
807                 TRACE_MGMT_DBG("sess %p: Allocing new tgt_dev for LUN %lld",
808                         sess, (unsigned long long)acg_dev->lun);
809
810                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
811                 if (tgt_dev == NULL) {
812                         add_failed = true;
813                         break;
814                 }
815
816                 tgt_dev->inq_changed_ua_needed = inq_changed_ua_needed ||
817                                                  not_needed_freed;
818 next:
819                 continue;
820         }
821
822         something_freed = false;
823         not_needed_freed = true;
824         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
825                 struct scst_tgt_dev *t;
826                 shead = &sess->sess_tgt_dev_list_hash[i];
827
828                 list_for_each_entry_safe(tgt_dev, t, shead,
829                                         sess_tgt_dev_list_entry) {
830                         if (tgt_dev->acg_dev->acg != acg) {
831                                 TRACE_MGMT_DBG("sess %p: Deleting not used "
832                                         "tgt_dev %p for LUN %lld",
833                                         sess, tgt_dev,
834                                         (unsigned long long)tgt_dev->lun);
835                                 luns_changed = true;
836                                 something_freed = true;
837                                 scst_free_tgt_dev(tgt_dev);
838                         }
839                 }
840         }
841
842         if (add_failed && something_freed) {
843                 TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
844                 goto retry_add;
845         }
846
847         sess->acg = acg;
848
849         TRACE_DBG("Moving sess %p from acg %s to acg %s", sess,
850                 old_acg->acg_name, acg->acg_name);
851         list_move_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
852
853         if (luns_changed) {
854                 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
855
856                 scst_report_luns_changed_sess(sess);
857
858                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
859                         shead = &sess->sess_tgt_dev_list_hash[i];
860
861                         list_for_each_entry(tgt_dev, shead,
862                                         sess_tgt_dev_list_entry) {
863                                 if (tgt_dev->inq_changed_ua_needed) {
864                                         TRACE_MGMT_DBG("sess %p: Setting "
865                                                 "INQUIRY DATA HAS CHANGED UA "
866                                                 "(tgt_dev %p)", sess, tgt_dev);
867
868                                         tgt_dev->inq_changed_ua_needed = 0;
869
870                                         scst_set_sense(sense_buffer,
871                                                 sizeof(sense_buffer),
872                                                 tgt_dev->dev->d_sense,
873                                                 SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
874
875                                         scst_check_set_UA(tgt_dev, sense_buffer,
876                                                 sizeof(sense_buffer), 0);
877                                 }
878                         }
879                 }
880         }
881
882 out:
883         TRACE_EXIT();
884         return;
885 }
886
887 /* The activity supposed to be suspended and scst_mutex held */
888 void scst_check_reassign_sessions(void)
889 {
890         struct scst_tgt_template *tgtt;
891
892         TRACE_ENTRY();
893
894         list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
895                 struct scst_tgt *tgt;
896                 list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
897                         struct scst_session *sess;
898                         list_for_each_entry(sess, &tgt->sess_list,
899                                                 sess_list_entry) {
900                                 scst_check_reassign_sess(sess);
901                         }
902                 }
903         }
904
905         TRACE_EXIT();
906         return;
907 }
908
909 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
910 {
911         int res;
912
913         TRACE_ENTRY();
914
915         switch (cmd->state) {
916         case SCST_CMD_STATE_INIT_WAIT:
917         case SCST_CMD_STATE_INIT:
918         case SCST_CMD_STATE_PRE_PARSE:
919         case SCST_CMD_STATE_DEV_PARSE:
920         case SCST_CMD_STATE_DEV_DONE:
921                 if (cmd->internal)
922                         res = SCST_CMD_STATE_FINISHED_INTERNAL;
923                 else
924                         res = SCST_CMD_STATE_PRE_XMIT_RESP;
925                 break;
926
927         case SCST_CMD_STATE_PRE_DEV_DONE:
928         case SCST_CMD_STATE_MODE_SELECT_CHECKS:
929                 res = SCST_CMD_STATE_DEV_DONE;
930                 break;
931
932         case SCST_CMD_STATE_PRE_XMIT_RESP:
933                 res = SCST_CMD_STATE_XMIT_RESP;
934                 break;
935
936         case SCST_CMD_STATE_PREPROCESS_DONE:
937         case SCST_CMD_STATE_PREPARE_SPACE:
938         case SCST_CMD_STATE_RDY_TO_XFER:
939         case SCST_CMD_STATE_DATA_WAIT:
940         case SCST_CMD_STATE_TGT_PRE_EXEC:
941         case SCST_CMD_STATE_SEND_FOR_EXEC:
942         case SCST_CMD_STATE_LOCAL_EXEC:
943         case SCST_CMD_STATE_REAL_EXEC:
944         case SCST_CMD_STATE_REAL_EXECUTING:
945                 res = SCST_CMD_STATE_PRE_DEV_DONE;
946                 break;
947
948         default:
949                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
950                         cmd->state, cmd, cmd->cdb[0]);
951                 sBUG();
952                 /* Invalid state to supress compiler's warning */
953                 res = SCST_CMD_STATE_LAST_ACTIVE;
954         }
955
956         TRACE_EXIT_RES(res);
957         return res;
958 }
959 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
960
961 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
962 {
963         TRACE_ENTRY();
964
965 #ifdef CONFIG_SCST_EXTRACHECKS
966         switch (cmd->state) {
967         case SCST_CMD_STATE_XMIT_RESP:
968         case SCST_CMD_STATE_FINISHED:
969         case SCST_CMD_STATE_FINISHED_INTERNAL:
970         case SCST_CMD_STATE_XMIT_WAIT:
971                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
972                         cmd->state, cmd, cmd->cdb[0]);
973                 sBUG();
974         }
975 #endif
976
977         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
978
979 #ifdef CONFIG_SCST_EXTRACHECKS
980         if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
981                    (cmd->tgt_dev == NULL) && !cmd->internal) {
982                 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
983                         "op %x)", cmd->state, cmd, cmd->cdb[0]);
984                 sBUG();
985         }
986 #endif
987
988         TRACE_EXIT();
989         return;
990 }
991 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
992
993 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
994 {
995         int i, l;
996
997         TRACE_ENTRY();
998
999         scst_check_restore_sg_buff(cmd);
1000         cmd->resp_data_len = resp_data_len;
1001
1002         if (resp_data_len == cmd->bufflen)
1003                 goto out;
1004
1005         l = 0;
1006         for (i = 0; i < cmd->sg_cnt; i++) {
1007                 l += cmd->sg[i].length;
1008                 if (l >= resp_data_len) {
1009                         int left = resp_data_len - (l - cmd->sg[i].length);
1010 #ifdef CONFIG_SCST_DEBUG
1011                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
1012                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
1013                                 "left %d",
1014                                 cmd, (long long unsigned int)cmd->tag,
1015                                 resp_data_len, i,
1016                                 cmd->sg[i].length, left);
1017 #endif
1018                         cmd->orig_sg_cnt = cmd->sg_cnt;
1019                         cmd->orig_sg_entry = i;
1020                         cmd->orig_entry_len = cmd->sg[i].length;
1021                         cmd->sg_cnt = (left > 0) ? i+1 : i;
1022                         cmd->sg[i].length = left;
1023                         cmd->sg_buff_modified = 1;
1024                         break;
1025                 }
1026         }
1027
1028 out:
1029         TRACE_EXIT();
1030         return;
1031 }
1032 EXPORT_SYMBOL(scst_set_resp_data_len);
1033
1034 /* No locks */
1035 int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
1036 {
1037         struct scst_tgt *tgt = cmd->tgt;
1038         int res = 0;
1039         unsigned long flags;
1040
1041         TRACE_ENTRY();
1042
1043         spin_lock_irqsave(&tgt->tgt_lock, flags);
1044         tgt->retry_cmds++;
1045         /*
1046          * Memory barrier is needed here, because we need the exact order
1047          * between the read and write between retry_cmds and finished_cmds to
1048          * not miss the case when a command finished while we queuing it for
1049          * retry after the finished_cmds check.
1050          */
1051         smp_mb();
1052         TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
1053               tgt->retry_cmds);
1054         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
1055                 /* At least one cmd finished, so try again */
1056                 tgt->retry_cmds--;
1057                 TRACE_RETRY("Some command(s) finished, direct retry "
1058                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
1059                       "retry_cmds=%d)", finished_cmds,
1060                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
1061                 res = -1;
1062                 goto out_unlock_tgt;
1063         }
1064
1065         TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
1066         list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
1067
1068         if (!tgt->retry_timer_active) {
1069                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
1070                 add_timer(&tgt->retry_timer);
1071                 tgt->retry_timer_active = 1;
1072         }
1073
1074 out_unlock_tgt:
1075         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1076
1077         TRACE_EXIT_RES(res);
1078         return res;
1079 }
1080
1081 /* Returns 0 to continue, >0 to restart, <0 to break */
1082 static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
1083         unsigned long cur_time, unsigned long max_time,
1084         struct scst_session *sess, unsigned long *flags,
1085         struct scst_tgt_template *tgtt)
1086 {
1087         int res = -1; /* break */
1088
1089         TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
1090                 "pending time %ld", cmd, cmd->cmd_hw_pending,
1091                 (long)(cur_time - cmd->start_time) / HZ,
1092                 (long)(cur_time - cmd->hw_pending_start) / HZ);
1093
1094         if (time_before_eq(cur_time, cmd->start_time + max_time)) {
1095                 /* Cmds are ordered, so no need to check more */
1096                 goto out;
1097         }
1098
1099         if (!cmd->cmd_hw_pending) {
1100                 res = 0; /* continue */
1101                 goto out;
1102         }
1103
1104         if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
1105                 /* Cmds are ordered, so no need to check more */
1106                 goto out;
1107         }
1108
1109         TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
1110                 cmd, (cur_time - cmd->hw_pending_start) / HZ,
1111                 cmd->state);
1112
1113         cmd->cmd_hw_pending = 0;
1114
1115         spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
1116         tgtt->on_hw_pending_cmd_timeout(cmd);
1117         spin_lock_irqsave(&sess->sess_list_lock, *flags);
1118
1119         res = 1; /* restart */
1120
1121 out:
1122         TRACE_EXIT_RES(res);
1123         return res;
1124 }
1125
1126 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1127 static void scst_hw_pending_work_fn(void *p)
1128 #else
1129 static void scst_hw_pending_work_fn(struct delayed_work *work)
1130 #endif
1131 {
1132 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1133         struct scst_session *sess = (struct scst_session *)p;
1134 #else
1135         struct scst_session *sess = container_of(work, struct scst_session,
1136                                         hw_pending_work);
1137 #endif
1138         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
1139         struct scst_cmd *cmd;
1140         unsigned long cur_time = jiffies;
1141         unsigned long flags;
1142         unsigned long max_time = tgtt->max_hw_pending_time * HZ;
1143
1144         TRACE_ENTRY();
1145
1146         TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
1147
1148         clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1149
1150         spin_lock_irqsave(&sess->sess_list_lock, flags);
1151
1152 restart:
1153         list_for_each_entry(cmd, &sess->search_cmd_list,
1154                                 sess_cmd_list_entry) {
1155                 int rc;
1156
1157                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1158                                         &flags, tgtt);
1159                 if (rc < 0)
1160                         break;
1161                 else if (rc == 0)
1162                         continue;
1163                 else
1164                         goto restart;
1165         }
1166
1167 restart1:
1168         list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
1169                                 sess_cmd_list_entry) {
1170                 int rc;
1171
1172                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1173                                         &flags, tgtt);
1174                 if (rc < 0)
1175                         break;
1176                 else if (rc == 0)
1177                         continue;
1178                 else
1179                         goto restart1;
1180         }
1181
1182         if (!list_empty(&sess->search_cmd_list) ||
1183             !list_empty(&sess->after_pre_xmit_cmd_list)) {
1184                 /*
1185                  * For stuck cmds if there is no activity we might need to have
1186                  * one more run to release them, so reschedule once again.
1187                  */
1188                 TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
1189                         sess, tgtt->max_hw_pending_time);
1190                 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1191                 schedule_delayed_work(&sess->hw_pending_work,
1192                                 tgtt->max_hw_pending_time * HZ);
1193         }
1194
1195         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
1196
1197         TRACE_EXIT();
1198         return;
1199 }
1200
1201 /* Called under scst_mutex and suspended activity */
1202 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
1203 {
1204         struct scst_device *dev;
1205         int res = 0;
1206         static int dev_num; /* protected by scst_mutex */
1207
1208         TRACE_ENTRY();
1209
1210         dev = kzalloc(sizeof(*dev), gfp_mask);
1211         if (dev == NULL) {
1212                 TRACE(TRACE_OUT_OF_MEM, "%s",
1213                         "Allocation of scst_device failed");
1214                 res = -ENOMEM;
1215                 goto out;
1216         }
1217
1218         dev->handler = &scst_null_devtype;
1219         dev->p_cmd_lists = &scst_main_cmd_lists;
1220         atomic_set(&dev->dev_cmd_count, 0);
1221         atomic_set(&dev->write_cmd_count, 0);
1222         scst_init_mem_lim(&dev->dev_mem_lim);
1223         spin_lock_init(&dev->dev_lock);
1224         atomic_set(&dev->on_dev_count, 0);
1225         INIT_LIST_HEAD(&dev->blocked_cmd_list);
1226         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
1227         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
1228         INIT_LIST_HEAD(&dev->threads_list);
1229         init_waitqueue_head(&dev->on_dev_waitQ);
1230         dev->dev_double_ua_possible = 1;
1231         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
1232         dev->dev_num = dev_num++;
1233
1234 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1235 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1236         dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1237         if (dev->dev_io_ctx == NULL) {
1238                 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
1239                 res = -ENOMEM;
1240                 kfree(dev);
1241                 goto out;
1242         }
1243 #endif
1244 #endif
1245
1246         *out_dev = dev;
1247
1248 out:
1249         TRACE_EXIT_RES(res);
1250         return res;
1251 }
1252
1253 /* Called under scst_mutex and suspended activity */
1254 void scst_free_device(struct scst_device *dev)
1255 {
1256         TRACE_ENTRY();
1257
1258 #ifdef CONFIG_SCST_EXTRACHECKS
1259         if (!list_empty(&dev->dev_tgt_dev_list) ||
1260             !list_empty(&dev->dev_acg_dev_list)) {
1261                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
1262                         "is not empty!", __func__);
1263                 sBUG();
1264         }
1265 #endif
1266
1267         __exit_io_context(dev->dev_io_ctx);
1268
1269         kfree(dev);
1270
1271         TRACE_EXIT();
1272         return;
1273 }
1274
1275 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
1276 {
1277         atomic_set(&mem_lim->alloced_pages, 0);
1278         mem_lim->max_allowed_pages =
1279                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
1280 }
1281 EXPORT_SYMBOL(scst_init_mem_lim);
1282
1283 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
1284                                         struct scst_device *dev, uint64_t lun)
1285 {
1286         struct scst_acg_dev *res;
1287
1288         TRACE_ENTRY();
1289
1290 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1291         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
1292 #else
1293         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
1294 #endif
1295         if (res == NULL) {
1296                 TRACE(TRACE_OUT_OF_MEM,
1297                       "%s", "Allocation of scst_acg_dev failed");
1298                 goto out;
1299         }
1300 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1301         memset(res, 0, sizeof(*res));
1302 #endif
1303
1304         res->dev = dev;
1305         res->acg = acg;
1306         res->lun = lun;
1307
1308 out:
1309         TRACE_EXIT_HRES(res);
1310         return res;
1311 }
1312
1313 /* The activity supposed to be suspended and scst_mutex held */
1314 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
1315 {
1316         TRACE_ENTRY();
1317
1318         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
1319                 acg_dev);
1320         list_del(&acg_dev->acg_dev_list_entry);
1321         list_del(&acg_dev->dev_acg_dev_list_entry);
1322
1323         kmem_cache_free(scst_acgd_cachep, acg_dev);
1324
1325         TRACE_EXIT();
1326         return;
1327 }
1328
1329 /* The activity supposed to be suspended and scst_mutex held */
1330 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
1331 {
1332         struct scst_acg *acg;
1333
1334         TRACE_ENTRY();
1335
1336         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
1337         if (acg == NULL) {
1338                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
1339                 goto out;
1340         }
1341
1342         INIT_LIST_HEAD(&acg->acg_dev_list);
1343         INIT_LIST_HEAD(&acg->acg_sess_list);
1344         INIT_LIST_HEAD(&acg->acn_list);
1345         acg->acg_name = acg_name;
1346
1347         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
1348         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
1349
1350         scst_check_reassign_sessions();
1351
1352 out:
1353         TRACE_EXIT_HRES(acg);
1354         return acg;
1355 }
1356
1357 /* The activity supposed to be suspended and scst_mutex held */
1358 int scst_destroy_acg(struct scst_acg *acg)
1359 {
1360         struct scst_acn *n, *nn;
1361         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
1362         int res = 0;
1363
1364         TRACE_ENTRY();
1365
1366         if (!list_empty(&acg->acg_sess_list)) {
1367                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
1368                 res = -EBUSY;
1369                 goto out;
1370         }
1371
1372         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
1373         list_del(&acg->scst_acg_list_entry);
1374
1375         /* Freeing acg_devs */
1376         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
1377                         acg_dev_list_entry) {
1378                 struct scst_tgt_dev *tgt_dev, *tt;
1379                 list_for_each_entry_safe(tgt_dev, tt,
1380                                  &acg_dev->dev->dev_tgt_dev_list,
1381                                  dev_tgt_dev_list_entry) {
1382                         if (tgt_dev->acg_dev == acg_dev)
1383                                 scst_free_tgt_dev(tgt_dev);
1384                 }
1385                 scst_free_acg_dev(acg_dev);
1386         }
1387
1388         /* Freeing names */
1389         list_for_each_entry_safe(n, nn, &acg->acn_list,
1390                         acn_list_entry) {
1391                 list_del(&n->acn_list_entry);
1392                 kfree(n->name);
1393                 kfree(n);
1394         }
1395         INIT_LIST_HEAD(&acg->acn_list);
1396
1397         kfree(acg);
1398 out:
1399         TRACE_EXIT_RES(res);
1400         return res;
1401 }
1402
1403 /*
1404  * scst_mutex supposed to be held, there must not be parallel activity in this
1405  * session.
1406  */
1407 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1408         struct scst_acg_dev *acg_dev)
1409 {
1410         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1411         struct scst_tgt_dev *tgt_dev, *t = NULL;
1412         struct scst_device *dev = acg_dev->dev;
1413         struct list_head *sess_tgt_dev_list_head;
1414         struct scst_tgt_template *vtt = sess->tgt->tgtt;
1415         int rc, i;
1416         bool share_io_ctx = false;
1417         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1418
1419         TRACE_ENTRY();
1420
1421 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1422         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1423 #else
1424         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1425 #endif
1426         if (tgt_dev == NULL) {
1427                 TRACE(TRACE_OUT_OF_MEM, "%s",
1428                       "Allocation of scst_tgt_dev failed");
1429                 goto out;
1430         }
1431 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1432         memset(tgt_dev, 0, sizeof(*tgt_dev));
1433 #endif
1434
1435         tgt_dev->dev = dev;
1436         tgt_dev->lun = acg_dev->lun;
1437         tgt_dev->acg_dev = acg_dev;
1438         tgt_dev->sess = sess;
1439         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1440
1441         scst_sgv_pool_use_norm(tgt_dev);
1442
1443         if (dev->scsi_dev != NULL) {
1444                 ini_sg = dev->scsi_dev->host->sg_tablesize;
1445                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1446                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1447                                 ENABLE_CLUSTERING);
1448         } else {
1449                 ini_sg = (1 << 15) /* infinite */;
1450                 ini_unchecked_isa_dma = 0;
1451                 ini_use_clustering = 0;
1452         }
1453         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1454
1455         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1456             !sess->tgt->tgtt->no_clustering)
1457                 scst_sgv_pool_use_norm_clust(tgt_dev);
1458
1459         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1460                 scst_sgv_pool_use_dma(tgt_dev);
1461
1462         if (dev->scsi_dev != NULL) {
1463                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1464                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
1465                       dev->scsi_dev->channel, dev->scsi_dev->id,
1466                       dev->scsi_dev->lun,
1467                       (long long unsigned int)tgt_dev->lun);
1468         } else {
1469                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1470                        dev->virt_name, (long long unsigned int)tgt_dev->lun);
1471         }
1472
1473         spin_lock_init(&tgt_dev->tgt_dev_lock);
1474         INIT_LIST_HEAD(&tgt_dev->UA_list);
1475         spin_lock_init(&tgt_dev->thr_data_lock);
1476         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1477         spin_lock_init(&tgt_dev->sn_lock);
1478         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1479         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1480         tgt_dev->expected_sn = 1;
1481         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1482         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1483         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1484                 atomic_set(&tgt_dev->sn_slots[i], 0);
1485
1486         if (dev->handler->parse_atomic &&
1487             (sess->tgt->tgtt->preprocessing_done == NULL)) {
1488                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1489                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1490                                 &tgt_dev->tgt_dev_flags);
1491                 if (dev->handler->exec_atomic)
1492                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1493                                 &tgt_dev->tgt_dev_flags);
1494         }
1495         if (dev->handler->exec_atomic) {
1496                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1497                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1498                                 &tgt_dev->tgt_dev_flags);
1499                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1500                                 &tgt_dev->tgt_dev_flags);
1501                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1502                         &tgt_dev->tgt_dev_flags);
1503         }
1504         if (dev->handler->dev_done_atomic &&
1505             sess->tgt->tgtt->xmit_response_atomic) {
1506                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1507                         &tgt_dev->tgt_dev_flags);
1508         }
1509
1510         scst_set_sense(sense_buffer, sizeof(sense_buffer),
1511                 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1512         scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1513
1514         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1515
1516         if (tgt_dev->sess->initiator_name != NULL) {
1517                 spin_lock_bh(&dev->dev_lock);
1518                 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1519                                 dev_tgt_dev_list_entry) {
1520                         TRACE_DBG("t name %s (tgt_dev name %s)",
1521                                 t->sess->initiator_name,
1522                                 tgt_dev->sess->initiator_name);
1523                         if (t->sess->initiator_name == NULL)
1524                                 continue;
1525                         if (strcmp(t->sess->initiator_name,
1526                                         tgt_dev->sess->initiator_name) == 0) {
1527                                 share_io_ctx = true;
1528                                 break;
1529                         }
1530                 }
1531                 spin_unlock_bh(&dev->dev_lock);
1532         }
1533
1534         if (share_io_ctx) {
1535                 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1536                         t->tgt_dev_io_ctx, tgt_dev,
1537                         tgt_dev->sess->initiator_name);
1538                 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1539         } else {
1540 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1541 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1542                 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1543                 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1544                         TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1545                                 "context for dev %s (initiator %s)",
1546                                 dev->virt_name, sess->initiator_name);
1547                         goto out_free;
1548                 }
1549 #endif
1550 #endif
1551         }
1552
1553         if (vtt->threads_num > 0) {
1554                 rc = 0;
1555                 if (dev->handler->threads_num > 0)
1556                         rc = scst_add_dev_threads(dev, vtt->threads_num);
1557                 else if (dev->handler->threads_num == 0)
1558                         rc = scst_add_global_threads(vtt->threads_num);
1559                 if (rc != 0)
1560                         goto out_free;
1561         }
1562
1563         if (dev->handler && dev->handler->attach_tgt) {
1564                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1565                       tgt_dev);
1566                 rc = dev->handler->attach_tgt(tgt_dev);
1567                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1568                 if (rc != 0) {
1569                         PRINT_ERROR("Device handler's %s attach_tgt() "
1570                             "failed: %d", dev->handler->name, rc);
1571                         goto out_thr_free;
1572                 }
1573         }
1574
1575         spin_lock_bh(&dev->dev_lock);
1576         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1577         if (dev->dev_reserved)
1578                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1579         spin_unlock_bh(&dev->dev_lock);
1580
1581         sess_tgt_dev_list_head =
1582                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1583         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1584                       sess_tgt_dev_list_head);
1585
1586 out:
1587         TRACE_EXIT();
1588         return tgt_dev;
1589
1590 out_thr_free:
1591         if (vtt->threads_num > 0) {
1592                 if (dev->handler->threads_num > 0)
1593                         scst_del_dev_threads(dev, vtt->threads_num);
1594                 else if (dev->handler->threads_num == 0)
1595                         scst_del_global_threads(vtt->threads_num);
1596         }
1597
1598 out_free:
1599         scst_free_all_UA(tgt_dev);
1600         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1601
1602         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1603         tgt_dev = NULL;
1604         goto out;
1605 }
1606
1607 /* No locks supposed to be held, scst_mutex - held */
1608 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1609 {
1610         TRACE_ENTRY();
1611
1612         scst_clear_reservation(tgt_dev);
1613
1614         /* With activity suspended the lock isn't needed, but let's be safe */
1615         spin_lock_bh(&tgt_dev->tgt_dev_lock);
1616         scst_free_all_UA(tgt_dev);
1617         memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1618         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1619
1620         if (queue_UA) {
1621                 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1622                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1623                         tgt_dev->dev->d_sense,
1624                         SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1625                 scst_check_set_UA(tgt_dev, sense_buffer,
1626                         sizeof(sense_buffer), 0);
1627         }
1628
1629         TRACE_EXIT();
1630         return;
1631 }
1632
1633 /*
1634  * scst_mutex supposed to be held, there must not be parallel activity in this
1635  * session.
1636  */
1637 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1638 {
1639         struct scst_device *dev = tgt_dev->dev;
1640         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1641
1642         TRACE_ENTRY();
1643
1644         tm_dbg_deinit_tgt_dev(tgt_dev);
1645
1646         spin_lock_bh(&dev->dev_lock);
1647         list_del(&tgt_dev->dev_tgt_dev_list_entry);
1648         spin_unlock_bh(&dev->dev_lock);
1649
1650         list_del(&tgt_dev->sess_tgt_dev_list_entry);
1651
1652         scst_clear_reservation(tgt_dev);
1653         scst_free_all_UA(tgt_dev);
1654
1655         if (dev->handler && dev->handler->detach_tgt) {
1656                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1657                       tgt_dev);
1658                 dev->handler->detach_tgt(tgt_dev);
1659                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1660         }
1661
1662         if (vtt->threads_num > 0) {
1663                 if (dev->handler->threads_num > 0)
1664                         scst_del_dev_threads(dev, vtt->threads_num);
1665                 else if (dev->handler->threads_num == 0)
1666                         scst_del_global_threads(vtt->threads_num);
1667         }
1668
1669         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1670
1671         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1672
1673         TRACE_EXIT();
1674         return;
1675 }
1676
1677 /* scst_mutex supposed to be held */
1678 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1679 {
1680         int res = 0;
1681         struct scst_acg_dev *acg_dev;
1682         struct scst_tgt_dev *tgt_dev;
1683
1684         TRACE_ENTRY();
1685
1686         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1687                         acg_dev_list_entry) {
1688                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1689                 if (tgt_dev == NULL) {
1690                         res = -ENOMEM;
1691                         goto out_free;
1692                 }
1693         }
1694
1695 out:
1696         TRACE_EXIT();
1697         return res;
1698
1699 out_free:
1700         scst_sess_free_tgt_devs(sess);
1701         goto out;
1702 }
1703
1704 /*
1705  * scst_mutex supposed to be held, there must not be parallel activity in this
1706  * session.
1707  */
1708 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1709 {
1710         int i;
1711         struct scst_tgt_dev *tgt_dev, *t;
1712
1713         TRACE_ENTRY();
1714
1715         /* The session is going down, no users, so no locks */
1716         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1717                 struct list_head *sess_tgt_dev_list_head =
1718                         &sess->sess_tgt_dev_list_hash[i];
1719                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1720                                 sess_tgt_dev_list_entry) {
1721                         scst_free_tgt_dev(tgt_dev);
1722                 }
1723                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1724         }
1725
1726         TRACE_EXIT();
1727         return;
1728 }
1729
1730 /* The activity supposed to be suspended and scst_mutex held */
1731 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1732                      uint64_t lun, int read_only)
1733 {
1734         int res = 0;
1735         struct scst_acg_dev *acg_dev;
1736         struct scst_tgt_dev *tgt_dev;
1737         struct scst_session *sess;
1738         LIST_HEAD(tmp_tgt_dev_list);
1739
1740         TRACE_ENTRY();
1741
1742         INIT_LIST_HEAD(&tmp_tgt_dev_list);
1743
1744 #ifdef CONFIG_SCST_EXTRACHECKS
1745         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
1746                 if (acg_dev->dev == dev) {
1747                         PRINT_ERROR("Device is already in group %s",
1748                                 acg->acg_name);
1749                         res = -EINVAL;
1750                         goto out;
1751                 }
1752         }
1753 #endif
1754
1755         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1756         if (acg_dev == NULL) {
1757                 res = -ENOMEM;
1758                 goto out;
1759         }
1760         acg_dev->rd_only = read_only;
1761
1762         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1763                 acg_dev);
1764         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1765         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1766
1767         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1768                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1769                 if (tgt_dev == NULL) {
1770                         res = -ENOMEM;
1771                         goto out_free;
1772                 }
1773                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1774                               &tmp_tgt_dev_list);
1775         }
1776
1777         scst_report_luns_changed(acg);
1778
1779         if (dev->virt_name != NULL) {
1780                 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1781                         "rd_only %d)", dev->virt_name, acg->acg_name,
1782                         (long long unsigned int)lun,
1783                         read_only);
1784         } else {
1785                 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1786                         "%lld, rd_only %d)",
1787                         dev->scsi_dev->host->host_no,
1788                         dev->scsi_dev->channel, dev->scsi_dev->id,
1789                         dev->scsi_dev->lun, acg->acg_name,
1790                         (long long unsigned int)lun,
1791                         read_only);
1792         }
1793
1794 out:
1795         TRACE_EXIT_RES(res);
1796         return res;
1797
1798 out_free:
1799         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1800                          extra_tgt_dev_list_entry) {
1801                 scst_free_tgt_dev(tgt_dev);
1802         }
1803         scst_free_acg_dev(acg_dev);
1804         goto out;
1805 }
1806
1807 /* The activity supposed to be suspended and scst_mutex held */
1808 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
1809 {
1810         int res = 0;
1811         struct scst_acg_dev *acg_dev = NULL, *a;
1812         struct scst_tgt_dev *tgt_dev, *tt;
1813
1814         TRACE_ENTRY();
1815
1816         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1817                 if (a->dev == dev) {
1818                         acg_dev = a;
1819                         break;
1820                 }
1821         }
1822
1823         if (acg_dev == NULL) {
1824                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1825                 res = -EINVAL;
1826                 goto out;
1827         }
1828
1829         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1830                          dev_tgt_dev_list_entry) {
1831                 if (tgt_dev->acg_dev == acg_dev)
1832                         scst_free_tgt_dev(tgt_dev);
1833         }
1834         scst_free_acg_dev(acg_dev);
1835
1836         scst_report_luns_changed(acg);
1837
1838         if (dev->virt_name != NULL) {
1839                 PRINT_INFO("Removed device %s from group %s",
1840                         dev->virt_name, acg->acg_name);
1841         } else {
1842                 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1843                         dev->scsi_dev->host->host_no,
1844                         dev->scsi_dev->channel, dev->scsi_dev->id,
1845                         dev->scsi_dev->lun, acg->acg_name);
1846         }
1847
1848 out:
1849         TRACE_EXIT_RES(res);
1850         return res;
1851 }
1852
1853 /* The activity supposed to be suspended and scst_mutex held */
1854 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1855 {
1856         int res = 0;
1857         struct scst_acn *n;
1858         int len;
1859         char *nm;
1860
1861         TRACE_ENTRY();
1862
1863         list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1864                 if (strcmp(n->name, name) == 0) {
1865                         PRINT_ERROR("Name %s already exists in group %s",
1866                                 name, acg->acg_name);
1867                         res = -EINVAL;
1868                         goto out;
1869                 }
1870         }
1871
1872         n = kmalloc(sizeof(*n), GFP_KERNEL);
1873         if (n == NULL) {
1874                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1875                 res = -ENOMEM;
1876                 goto out;
1877         }
1878
1879         len = strlen(name);
1880         nm = kmalloc(len + 1, GFP_KERNEL);
1881         if (nm == NULL) {
1882                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1883                 res = -ENOMEM;
1884                 goto out_free;
1885         }
1886
1887         strcpy(nm, name);
1888         n->name = nm;
1889
1890         list_add_tail(&n->acn_list_entry, &acg->acn_list);
1891
1892 out:
1893         if (res == 0) {
1894                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1895                 scst_check_reassign_sessions();
1896         }
1897
1898         TRACE_EXIT_RES(res);
1899         return res;
1900
1901 out_free:
1902         kfree(n);
1903         goto out;
1904 }
1905
1906 /* scst_mutex supposed to be held */
1907 void __scst_acg_remove_acn(struct scst_acn *n)
1908 {
1909         TRACE_ENTRY();
1910
1911         list_del(&n->acn_list_entry);
1912         kfree(n->name);
1913         kfree(n);
1914
1915         TRACE_EXIT();
1916         return;
1917 }
1918
1919 /* The activity supposed to be suspended and scst_mutex held */
1920 int scst_acg_remove_name(struct scst_acg *acg, const char *name, bool reassign)
1921 {
1922         int res = -EINVAL;
1923         struct scst_acn *n;
1924
1925         TRACE_ENTRY();
1926
1927         list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1928                 if (strcmp(n->name, name) == 0) {
1929                         __scst_acg_remove_acn(n);
1930                         res = 0;
1931                         break;
1932                 }
1933         }
1934
1935         if (res == 0) {
1936                 PRINT_INFO("Removed name %s from group %s", name,
1937                         acg->acg_name);
1938                 if (reassign)
1939                         scst_check_reassign_sessions();
1940         } else
1941                 PRINT_ERROR("Unable to find name %s in group %s", name,
1942                         acg->acg_name);
1943
1944         TRACE_EXIT_RES(res);
1945         return res;
1946 }
1947
1948 static struct scst_cmd *scst_create_prepare_internal_cmd(
1949         struct scst_cmd *orig_cmd, int bufsize)
1950 {
1951         struct scst_cmd *res;
1952         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1953
1954         TRACE_ENTRY();
1955
1956         res = scst_alloc_cmd(gfp_mask);
1957         if (res == NULL)
1958                 goto out;
1959
1960         res->cmd_lists = orig_cmd->cmd_lists;
1961         res->sess = orig_cmd->sess;
1962         res->atomic = scst_cmd_atomic(orig_cmd);
1963         res->internal = 1;
1964         res->tgtt = orig_cmd->tgtt;
1965         res->tgt = orig_cmd->tgt;
1966         res->dev = orig_cmd->dev;
1967         res->tgt_dev = orig_cmd->tgt_dev;
1968         res->lun = orig_cmd->lun;
1969         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1970         res->data_direction = SCST_DATA_UNKNOWN;
1971         res->orig_cmd = orig_cmd;
1972         res->bufflen = bufsize;
1973
1974         scst_sess_get(res->sess);
1975         if (res->tgt_dev != NULL)
1976                 __scst_get(0);
1977
1978         res->state = SCST_CMD_STATE_PRE_PARSE;
1979
1980 out:
1981         TRACE_EXIT_HRES((unsigned long)res);
1982         return res;
1983 }
1984
1985 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1986 {
1987         int res = 0;
1988         static const uint8_t request_sense[6] =
1989             { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1990         struct scst_cmd *rs_cmd;
1991
1992         TRACE_ENTRY();
1993
1994         if (orig_cmd->sense != NULL) {
1995                 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1996                         orig_cmd->sense, orig_cmd);
1997                 mempool_free(orig_cmd->sense, scst_sense_mempool);
1998                 orig_cmd->sense = NULL;
1999         }
2000
2001         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
2002                         SCST_SENSE_BUFFERSIZE);
2003         if (rs_cmd == NULL)
2004                 goto out_error;
2005
2006         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
2007         rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
2008         rs_cmd->cdb_len = sizeof(request_sense);
2009         rs_cmd->data_direction = SCST_DATA_READ;
2010         rs_cmd->expected_data_direction = rs_cmd->data_direction;
2011         rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
2012         rs_cmd->expected_values_set = 1;
2013
2014         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
2015                 "cmd list", rs_cmd);
2016         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2017         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
2018         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
2019         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2020
2021 out:
2022         TRACE_EXIT_RES(res);
2023         return res;
2024
2025 out_error:
2026         res = -1;
2027         goto out;
2028 }
2029
2030 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
2031 {
2032         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
2033         uint8_t *buf;
2034         int len;
2035
2036         TRACE_ENTRY();
2037
2038         sBUG_ON(orig_cmd == NULL);
2039
2040         len = scst_get_buf_first(req_cmd, &buf);
2041
2042         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
2043             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
2044                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
2045                         buf, len);
2046                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
2047                         len);
2048         } else {
2049                 PRINT_ERROR("%s", "Unable to get the sense via "
2050                         "REQUEST SENSE, returning HARDWARE ERROR");
2051                 scst_set_cmd_error(orig_cmd,
2052                         SCST_LOAD_SENSE(scst_sense_hardw_error));
2053         }
2054
2055         if (len > 0)
2056                 scst_put_buf(req_cmd, buf);
2057
2058         TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
2059                 "cmd list", orig_cmd);
2060         spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2061         list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
2062         wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
2063         spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2064
2065         TRACE_EXIT();
2066         return;
2067 }
2068
2069 int scst_finish_internal_cmd(struct scst_cmd *cmd)
2070 {
2071         int res;
2072
2073         TRACE_ENTRY();
2074
2075         sBUG_ON(!cmd->internal);
2076
2077         if (cmd->cdb[0] == REQUEST_SENSE)
2078                 scst_complete_request_sense(cmd);
2079
2080         __scst_cmd_put(cmd);
2081
2082         res = SCST_CMD_STATE_RES_CONT_NEXT;
2083
2084         TRACE_EXIT_HRES(res);
2085         return res;
2086 }
2087
2088 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2089 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
2090 {
2091         struct scsi_request *req;
2092
2093         TRACE_ENTRY();
2094
2095         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
2096                 if (req) {
2097                         if (req->sr_bufflen)
2098                                 kfree(req->sr_buffer);
2099                         scsi_release_request(req);
2100                 }
2101         }
2102
2103         TRACE_EXIT();
2104         return;
2105 }
2106
2107 static void scst_send_release(struct scst_device *dev)
2108 {
2109         struct scsi_request *req;
2110         struct scsi_device *scsi_dev;
2111         uint8_t cdb[6];
2112
2113         TRACE_ENTRY();
2114
2115         if (dev->scsi_dev == NULL)
2116                 goto out;
2117
2118         scsi_dev = dev->scsi_dev;
2119
2120         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
2121         if (req == NULL) {
2122                 PRINT_ERROR("Allocation of scsi_request failed: unable "
2123                             "to RELEASE device %d:%d:%d:%d",
2124                             scsi_dev->host->host_no, scsi_dev->channel,
2125                             scsi_dev->id, scsi_dev->lun);
2126                 goto out;
2127         }
2128
2129         memset(cdb, 0, sizeof(cdb));
2130         cdb[0] = RELEASE;
2131         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2132             ((scsi_dev->lun << 5) & 0xe0) : 0;
2133         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
2134         req->sr_cmd_len = sizeof(cdb);
2135         req->sr_data_direction = SCST_DATA_NONE;
2136         req->sr_use_sg = 0;
2137         req->sr_bufflen = 0;
2138         req->sr_buffer = NULL;
2139         req->sr_request->rq_disk = dev->rq_disk;
2140         req->sr_sense_buffer[0] = 0;
2141
2142         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
2143                 "mid-level", req);
2144         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
2145                     scst_req_done, 15, 3);
2146
2147 out:
2148         TRACE_EXIT();
2149         return;
2150 }
2151 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2152 static void scst_send_release(struct scst_device *dev)
2153 {
2154         struct scsi_device *scsi_dev;
2155         unsigned char cdb[6];
2156         uint8_t sense[SCSI_SENSE_BUFFERSIZE];
2157         int rc, i;
2158
2159         TRACE_ENTRY();
2160
2161         if (dev->scsi_dev == NULL)
2162                 goto out;
2163
2164         scsi_dev = dev->scsi_dev;
2165
2166         for (i = 0; i < 5; i++) {
2167                 memset(cdb, 0, sizeof(cdb));
2168                 cdb[0] = RELEASE;
2169                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2170                     ((scsi_dev->lun << 5) & 0xe0) : 0;
2171
2172                 memset(sense, 0, sizeof(sense));
2173
2174                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
2175                         "SCSI mid-level");
2176                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
2177                                 sense, 15, 0, 0
2178 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
2179                                 , NULL
2180 #endif
2181                                 );
2182                 TRACE_DBG("MODE_SENSE done: %x", rc);
2183
2184                 if (scsi_status_is_good(rc)) {
2185                         break;
2186                 } else {
2187                         PRINT_ERROR("RELEASE failed: %d", rc);
2188                         PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
2189                         scst_check_internal_sense(dev, rc, sense,
2190                                 sizeof(sense));
2191                 }
2192         }
2193
2194 out:
2195         TRACE_EXIT();
2196         return;
2197 }
2198 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2199
2200 /* scst_mutex supposed to be held */
2201 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
2202 {
2203         struct scst_device *dev = tgt_dev->dev;
2204         int release = 0;
2205
2206         TRACE_ENTRY();
2207
2208         spin_lock_bh(&dev->dev_lock);
2209         if (dev->dev_reserved &&
2210             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
2211                 /* This is one who holds the reservation */
2212                 struct scst_tgt_dev *tgt_dev_tmp;
2213                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
2214                                     dev_tgt_dev_list_entry) {
2215                         clear_bit(SCST_TGT_DEV_RESERVED,
2216                                     &tgt_dev_tmp->tgt_dev_flags);
2217                 }
2218                 dev->dev_reserved = 0;
2219                 release = 1;
2220         }
2221         spin_unlock_bh(&dev->dev_lock);
2222
2223         if (release)
2224                 scst_send_release(dev);
2225
2226         TRACE_EXIT();
2227         return;
2228 }
2229
2230 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
2231         const char *initiator_name)
2232 {
2233         struct scst_session *sess;
2234         int i;
2235         int len;
2236         char *nm;
2237
2238         TRACE_ENTRY();
2239
2240 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2241         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
2242 #else
2243         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
2244 #endif
2245         if (sess == NULL) {
2246                 TRACE(TRACE_OUT_OF_MEM, "%s",
2247                       "Allocation of scst_session failed");
2248                 goto out;
2249         }
2250 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2251         memset(sess, 0, sizeof(*sess));
2252 #endif
2253
2254         sess->init_phase = SCST_SESS_IPH_INITING;
2255         sess->shut_phase = SCST_SESS_SPH_READY;
2256         atomic_set(&sess->refcnt, 0);
2257         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
2258                 struct list_head *sess_tgt_dev_list_head =
2259                          &sess->sess_tgt_dev_list_hash[i];
2260                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
2261         }
2262         spin_lock_init(&sess->sess_list_lock);
2263         INIT_LIST_HEAD(&sess->search_cmd_list);
2264         INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
2265         sess->tgt = tgt;
2266         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
2267         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
2268 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
2269         INIT_DELAYED_WORK(&sess->hw_pending_work,
2270                 (void (*)(struct work_struct *))scst_hw_pending_work_fn);
2271 #else
2272         INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
2273 #endif
2274
2275 #ifdef CONFIG_SCST_MEASURE_LATENCY
2276         spin_lock_init(&sess->meas_lock);
2277 #endif
2278
2279         len = strlen(initiator_name);
2280         nm = kmalloc(len + 1, gfp_mask);
2281         if (nm == NULL) {
2282                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
2283                 goto out_free;
2284         }
2285
2286         strcpy(nm, initiator_name);
2287         sess->initiator_name = nm;
2288
2289 out:
2290         TRACE_EXIT();
2291         return sess;
2292
2293 out_free:
2294         kmem_cache_free(scst_sess_cachep, sess);
2295         sess = NULL;
2296         goto out;
2297 }
2298
2299 void scst_free_session(struct scst_session *sess)
2300 {
2301         TRACE_ENTRY();
2302
2303         mutex_lock(&scst_mutex);
2304
2305         TRACE_DBG("Removing sess %p from the list", sess);
2306         list_del(&sess->sess_list_entry);
2307         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
2308         list_del(&sess->acg_sess_list_entry);
2309
2310         scst_sess_free_tgt_devs(sess);
2311
2312         wake_up_all(&sess->tgt->unreg_waitQ);
2313
2314         mutex_unlock(&scst_mutex);
2315
2316         kfree(sess->initiator_name);
2317         kmem_cache_free(scst_sess_cachep, sess);
2318
2319         TRACE_EXIT();
2320         return;
2321 }
2322
2323 void scst_free_session_callback(struct scst_session *sess)
2324 {
2325         struct completion *c;
2326
2327         TRACE_ENTRY();
2328
2329         TRACE_DBG("Freeing session %p", sess);
2330
2331         cancel_delayed_work_sync(&sess->hw_pending_work);
2332
2333         c = sess->shutdown_compl;
2334
2335         if (sess->unreg_done_fn) {
2336                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
2337                 sess->unreg_done_fn(sess);
2338                 TRACE_DBG("%s", "unreg_done_fn() returned");
2339         }
2340         scst_free_session(sess);
2341
2342         if (c)
2343                 complete_all(c);
2344
2345         TRACE_EXIT();
2346         return;
2347 }
2348
2349 void scst_sched_session_free(struct scst_session *sess)
2350 {
2351         unsigned long flags;
2352
2353         TRACE_ENTRY();
2354
2355         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
2356                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
2357                         "shut phase %lx", sess, sess->shut_phase);
2358                 sBUG();
2359         }
2360
2361         spin_lock_irqsave(&scst_mgmt_lock, flags);
2362         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
2363         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
2364         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
2365
2366         wake_up(&scst_mgmt_waitQ);
2367
2368         TRACE_EXIT();
2369         return;
2370 }
2371
2372 void scst_cmd_get(struct scst_cmd *cmd)
2373 {
2374         __scst_cmd_get(cmd);
2375 }
2376 EXPORT_SYMBOL(scst_cmd_get);
2377
2378 void scst_cmd_put(struct scst_cmd *cmd)
2379 {
2380         __scst_cmd_put(cmd);
2381 }
2382 EXPORT_SYMBOL(scst_cmd_put);
2383
2384 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
2385 {
2386         struct scst_cmd *cmd;
2387
2388         TRACE_ENTRY();
2389
2390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2391         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
2392 #else
2393         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
2394 #endif
2395         if (cmd == NULL) {
2396                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
2397                 goto out;
2398         }
2399 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2400         memset(cmd, 0, sizeof(*cmd));
2401 #endif
2402
2403         cmd->state = SCST_CMD_STATE_INIT_WAIT;
2404         cmd->start_time = jiffies;
2405         atomic_set(&cmd->cmd_ref, 1);
2406         cmd->cmd_lists = &scst_main_cmd_lists;
2407         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
2408         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2409         cmd->timeout = SCST_DEFAULT_TIMEOUT;
2410         cmd->retries = 0;
2411         cmd->data_len = -1;
2412         cmd->is_send_status = 1;
2413         cmd->resp_data_len = -1;
2414
2415         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2416         cmd->dbl_ua_orig_resp_data_len = -1;
2417
2418 out:
2419         TRACE_EXIT();
2420         return cmd;
2421 }
2422
2423 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2424 {
2425         scst_sess_put(cmd->sess);
2426
2427         /*
2428          * At this point tgt_dev can be dead, but the pointer remains non-NULL
2429          */
2430         if (likely(cmd->tgt_dev != NULL))
2431                 __scst_put();
2432
2433         scst_destroy_cmd(cmd);
2434         return;
2435 }
2436
2437 /* No locks supposed to be held */
2438 void scst_free_cmd(struct scst_cmd *cmd)
2439 {
2440         int destroy = 1;
2441
2442         TRACE_ENTRY();
2443
2444         TRACE_DBG("Freeing cmd %p (tag %llu)",
2445                   cmd, (long long unsigned int)cmd->tag);
2446
2447         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2448                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2449                         cmd, atomic_read(&scst_cmd_count));
2450         }
2451
2452         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2453                 cmd->dec_on_dev_needed);
2454
2455 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2456 #if defined(CONFIG_SCST_EXTRACHECKS)
2457         if (cmd->scsi_req) {
2458                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2459                         "scsi_req!");
2460                 scst_release_request(cmd);
2461         }
2462 #endif
2463 #endif
2464
2465         /*
2466          * Target driver can already free sg buffer before calling
2467          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2468          */
2469         if (!cmd->tgt_data_buf_alloced)
2470                 scst_check_restore_sg_buff(cmd);
2471
2472         if (cmd->tgtt->on_free_cmd != NULL) {
2473                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2474                 cmd->tgtt->on_free_cmd(cmd);
2475                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2476         }
2477
2478         if (likely(cmd->dev != NULL)) {
2479                 struct scst_dev_type *handler = cmd->dev->handler;
2480                 if (handler->on_free_cmd != NULL) {
2481                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2482                               handler->name, cmd);
2483                         handler->on_free_cmd(cmd);
2484                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
2485                                 handler->name);
2486                 }
2487         }
2488
2489         scst_release_space(cmd);
2490
2491         if (unlikely(cmd->sense != NULL)) {
2492                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2493                 mempool_free(cmd->sense, scst_sense_mempool);
2494                 cmd->sense = NULL;
2495         }
2496
2497         if (likely(cmd->tgt_dev != NULL)) {
2498 #ifdef CONFIG_SCST_EXTRACHECKS
2499                 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2500                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
2501                             "%d, target %s, LUN %lld, sn %ld, expected_sn %ld)",
2502                             cmd, cmd->cdb[0], cmd->tgtt->name,
2503                             (long long unsigned int)cmd->lun,
2504                             cmd->sn, cmd->tgt_dev->expected_sn);
2505                         scst_unblock_deferred(cmd->tgt_dev, cmd);
2506                 }
2507 #endif
2508
2509                 if (unlikely(cmd->out_of_sn)) {
2510                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2511                                 "destroy=%d", cmd,
2512                                 (long long unsigned int)cmd->tag,
2513                                 cmd->sn, destroy);
2514                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2515                                         &cmd->cmd_flags);
2516                 }
2517         }
2518
2519         if (likely(destroy))
2520                 scst_destroy_put_cmd(cmd);
2521
2522         TRACE_EXIT();
2523         return;
2524 }
2525
2526 /* No locks supposed to be held. */
2527 void scst_check_retries(struct scst_tgt *tgt)
2528 {
2529         int need_wake_up = 0;
2530
2531         TRACE_ENTRY();
2532
2533         /*
2534          * We don't worry about overflow of finished_cmds, because we check
2535          * only for its change.
2536          */
2537         atomic_inc(&tgt->finished_cmds);
2538         /* See comment in scst_queue_retry_cmd() */
2539         smp_mb__after_atomic_inc();
2540         if (unlikely(tgt->retry_cmds > 0)) {
2541                 struct scst_cmd *c, *tc;
2542                 unsigned long flags;
2543
2544                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2545                       tgt->retry_cmds);
2546
2547                 spin_lock_irqsave(&tgt->tgt_lock, flags);
2548                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2549                                 cmd_list_entry) {
2550                         tgt->retry_cmds--;
2551
2552                         TRACE_RETRY("Moving retry cmd %p to head of active "
2553                                 "cmd list (retry_cmds left %d)",
2554                                 c, tgt->retry_cmds);
2555                         spin_lock(&c->cmd_lists->cmd_list_lock);
2556                         list_move(&c->cmd_list_entry,
2557                                   &c->cmd_lists->active_cmd_list);
2558                         wake_up(&c->cmd_lists->cmd_list_waitQ);
2559                         spin_unlock(&c->cmd_lists->cmd_list_lock);
2560
2561                         need_wake_up++;
2562                         if (need_wake_up >= 2) /* "slow start" */
2563                                 break;
2564                 }
2565                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2566         }
2567
2568         TRACE_EXIT();
2569         return;
2570 }
2571
2572 void scst_tgt_retry_timer_fn(unsigned long arg)
2573 {
2574         struct scst_tgt *tgt = (struct scst_tgt *)arg;
2575         unsigned long flags;
2576
2577         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2578
2579         spin_lock_irqsave(&tgt->tgt_lock, flags);
2580         tgt->retry_timer_active = 0;
2581         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2582
2583         scst_check_retries(tgt);
2584
2585         TRACE_EXIT();
2586         return;
2587 }
2588
2589 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2590 {
2591         struct scst_mgmt_cmd *mcmd;
2592
2593         TRACE_ENTRY();
2594
2595         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2596         if (mcmd == NULL) {
2597                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2598                         "failed, some commands and their data could leak");
2599                 goto out;
2600         }
2601         memset(mcmd, 0, sizeof(*mcmd));
2602
2603 out:
2604         TRACE_EXIT();
2605         return mcmd;
2606 }
2607
2608 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2609 {
2610         unsigned long flags;
2611
2612         TRACE_ENTRY();
2613
2614         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2615         atomic_dec(&mcmd->sess->sess_cmd_count);
2616         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2617
2618         scst_sess_put(mcmd->sess);
2619
2620         if (mcmd->mcmd_tgt_dev != NULL)
2621                 __scst_put();
2622
2623         mempool_free(mcmd, scst_mgmt_mempool);
2624
2625         TRACE_EXIT();
2626         return;
2627 }
2628
2629 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2630 int scst_alloc_request(struct scst_cmd *cmd)
2631 {
2632         int res = 0;
2633         struct scsi_request *req;
2634         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2635
2636         TRACE_ENTRY();
2637
2638         /* cmd->dev->scsi_dev must be non-NULL here */
2639         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2640         if (req == NULL) {
2641                 TRACE(TRACE_OUT_OF_MEM, "%s",
2642                       "Allocation of scsi_request failed");
2643                 res = -ENOMEM;
2644                 goto out;
2645         }
2646
2647         cmd->scsi_req = req;
2648
2649         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2650         req->sr_cmd_len = cmd->cdb_len;
2651         req->sr_data_direction = cmd->data_direction;
2652         req->sr_use_sg = cmd->sg_cnt;
2653         req->sr_bufflen = cmd->bufflen;
2654         req->sr_buffer = cmd->sg;
2655         req->sr_request->rq_disk = cmd->dev->rq_disk;
2656         req->sr_sense_buffer[0] = 0;
2657
2658         cmd->scsi_req->upper_private_data = cmd;
2659
2660 out:
2661         TRACE_EXIT();
2662         return res;
2663 }
2664
2665 void scst_release_request(struct scst_cmd *cmd)
2666 {
2667         scsi_release_request(cmd->scsi_req);
2668         cmd->scsi_req = NULL;
2669 }
2670 #endif
2671
2672 static bool is_report_sg_limitation(void)
2673 {
2674 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2675         return (trace_flag & TRACE_OUT_OF_MEM) != 0;
2676 #else
2677         return false;
2678 #endif
2679 }
2680
2681 int scst_alloc_space(struct scst_cmd *cmd)
2682 {
2683         gfp_t gfp_mask;
2684         int res = -ENOMEM;
2685         int atomic = scst_cmd_atomic(cmd);
2686         int flags;
2687         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2688         static int ll;
2689
2690         TRACE_ENTRY();
2691
2692         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2693
2694         flags = atomic ? SGV_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2695         if (cmd->no_sgv)
2696                 flags |= SGV_POOL_ALLOC_NO_CACHED;
2697
2698         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2699                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2700         if (cmd->sg == NULL)
2701                 goto out;
2702
2703         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2704                 if ((ll < 10) || is_report_sg_limitation()) {
2705                         PRINT_INFO("Unable to complete command due to "
2706                                 "SG IO count limitation (requested %d, "
2707                                 "available %d, tgt lim %d)", cmd->sg_cnt,
2708                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2709                         ll++;
2710                 }
2711                 goto out_sg_free;
2712         }
2713
2714         if (cmd->data_direction != SCST_DATA_BIDI)
2715                 goto success;
2716
2717         cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2718                          flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2719                          &cmd->dev->dev_mem_lim, NULL);
2720         if (cmd->in_sg == NULL)
2721                 goto out_sg_free;
2722
2723         if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2724                 if ((ll < 10)  || is_report_sg_limitation()) {
2725                         PRINT_INFO("Unable to complete command due to "
2726                                 "SG IO count limitation (IN buffer, requested "
2727                                 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2728                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2729                         ll++;
2730                 }
2731                 goto out_in_sg_free;
2732         }
2733
2734 success:
2735         res = 0;
2736
2737 out:
2738         TRACE_EXIT();
2739         return res;
2740
2741 out_in_sg_free:
2742         sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2743         cmd->in_sgv = NULL;
2744         cmd->in_sg = NULL;
2745         cmd->in_sg_cnt = 0;
2746
2747 out_sg_free:
2748         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2749         cmd->sgv = NULL;
2750         cmd->sg = NULL;
2751         cmd->sg_cnt = 0;
2752         goto out;
2753 }
2754
2755 static void scst_release_space(struct scst_cmd *cmd)
2756 {
2757         TRACE_ENTRY();
2758
2759         if (cmd->sgv == NULL)
2760                 goto out;
2761
2762         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2763                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2764                 goto out;
2765         }
2766
2767         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2768         cmd->sgv = NULL;
2769         cmd->sg_cnt = 0;
2770         cmd->sg = NULL;
2771         cmd->bufflen = 0;
2772         cmd->data_len = 0;
2773
2774         if (cmd->in_sgv != NULL) {
2775                 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2776                 cmd->in_sgv = NULL;
2777                 cmd->in_sg_cnt = 0;
2778                 cmd->in_sg = NULL;
2779                 cmd->in_bufflen = 0;
2780         }
2781
2782 out:
2783         TRACE_EXIT();
2784         return;
2785 }
2786
2787 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2788
2789 /*
2790  * Can switch to the next dst_sg element, so, to copy to strictly only
2791  * one dst_sg element, it must be either last in the chain, or
2792  * copy_len == dst_sg->length.
2793  */
2794 static int __sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
2795                           size_t *pdst_offs, struct scatterlist *src_sg,
2796                           size_t copy_len,
2797                           enum km_type d_km_type, enum km_type s_km_type)
2798 {
2799         int res = 0;
2800         struct scatterlist *dst_sg;
2801         size_t src_len, dst_len, src_offs, dst_offs;
2802         struct page *src_page, *dst_page;
2803
2804         if (copy_len == 0)
2805                 copy_len = 0x7FFFFFFF; /* copy all */
2806
2807         dst_sg = *pdst_sg;
2808         dst_len = *pdst_len;
2809         dst_offs = *pdst_offs;
2810         dst_page = sg_page(dst_sg);
2811
2812         src_page = sg_page(src_sg);
2813         src_len = src_sg->length;
2814         src_offs = src_sg->offset;
2815
2816         do {
2817                 void *saddr, *daddr;
2818                 size_t n;
2819
2820                 saddr = kmap_atomic(src_page +
2821                                          (src_offs >> PAGE_SHIFT), s_km_type) +
2822                                     (src_offs & ~PAGE_MASK);
2823                 daddr = kmap_atomic(dst_page +
2824                                         (dst_offs >> PAGE_SHIFT), d_km_type) +
2825                                     (dst_offs & ~PAGE_MASK);
2826
2827                 if (((src_offs & ~PAGE_MASK) == 0) &&
2828                     ((dst_offs & ~PAGE_MASK) == 0) &&
2829                     (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
2830                     (copy_len >= PAGE_SIZE)) {
2831                         copy_page(daddr, saddr);
2832                         n = PAGE_SIZE;
2833                 } else {
2834                         n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
2835                                           PAGE_SIZE - (src_offs & ~PAGE_MASK));
2836                         n = min(n, src_len);
2837                         n = min(n, dst_len);
2838                         n = min_t(size_t, n, copy_len);
2839                         memcpy(daddr, saddr, n);
2840                 }
2841                 dst_offs += n;
2842                 src_offs += n;
2843
2844                 kunmap_atomic(saddr, s_km_type);
2845                 kunmap_atomic(daddr, d_km_type);
2846
2847                 res += n;
2848                 copy_len -= n;
2849                 if (copy_len == 0)
2850                         goto out;
2851
2852                 src_len -= n;
2853                 dst_len -= n;
2854                 if (dst_len == 0) {
2855                         dst_sg = sg_next(dst_sg);
2856                         if (dst_sg == NULL)
2857                                 goto out;
2858                         dst_page = sg_page(dst_sg);
2859                         dst_len = dst_sg->length;
2860                         dst_offs = dst_sg->offset;
2861                 }
2862         } while (src_len > 0);
2863
2864 out:
2865         *pdst_sg = dst_sg;
2866         *pdst_len = dst_len;
2867         *pdst_offs = dst_offs;
2868         return res;
2869 }
2870
2871 /**
2872  * sg_copy_elem - copy one SG element to another
2873  * @dst_sg:     destination SG element
2874  * @src_sg:     source SG element
2875  * @copy_len:   maximum amount of data to copy. If 0, then copy all.
2876  * @d_km_type:  kmap_atomic type for the destination SG
2877  * @s_km_type:  kmap_atomic type for the source SG
2878  *
2879  * Description:
2880  *    Data from the source SG element will be copied to the destination SG
2881  *    element. Returns number of bytes copied. Can switch to the next dst_sg
2882  *    element, so, to copy to strictly only one dst_sg element, it must be
2883  *    either last in the chain, or copy_len == dst_sg->length.
2884  */
2885 int sg_copy_elem(struct scatterlist *dst_sg, struct scatterlist *src_sg,
2886                  size_t copy_len, enum km_type d_km_type,
2887                  enum km_type s_km_type)
2888 {
2889         size_t dst_len = dst_sg->length, dst_offs = dst_sg->offset;
2890
2891         return __sg_copy_elem(&dst_sg, &dst_len, &dst_offs, src_sg,
2892                 copy_len, d_km_type, s_km_type);
2893 }
2894
2895
2896 /**
2897  * sg_copy - copy one SG vector to another
2898  * @dst_sg:     destination SG
2899  * @src_sg:     source SG
2900  * @copy_len:   maximum amount of data to copy. If 0, then copy all.
2901  * @d_km_type:  kmap_atomic type for the destination SG
2902  * @s_km_type:  kmap_atomic type for the source SG
2903  *
2904  * Description:
2905  *    Data from the source SG vector will be copied to the destination SG
2906  *    vector. End of the vectors will be determined by sg_next() returning
2907  *    NULL. Returns number of bytes copied.
2908  */
2909 int sg_copy(struct scatterlist *dst_sg,
2910             struct scatterlist *src_sg, size_t copy_len,
2911             enum km_type d_km_type, enum km_type s_km_type)
2912 {
2913         int res = 0;
2914         size_t dst_len, dst_offs;
2915
2916         if (copy_len == 0)
2917                 copy_len = 0x7FFFFFFF; /* copy all */
2918
2919         dst_len = dst_sg->length;
2920         dst_offs = dst_sg->offset;
2921
2922         do {
2923                 copy_len -= __sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
2924                                 src_sg, copy_len, d_km_type, s_km_type);
2925                 if ((copy_len == 0) || (dst_sg == NULL))
2926                         goto out;
2927
2928                 src_sg = sg_next(src_sg);
2929         } while (src_sg != NULL);
2930
2931 out:
2932         return res;
2933 }
2934 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
2935
2936 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2937 #include <linux/pfn.h>
2938
2939 struct blk_kern_sg_hdr {
2940         struct scatterlist *orig_sgp;
2941         union {
2942                 struct sg_table new_sg_table;
2943                 struct scatterlist *saved_sg;
2944         };
2945         bool tail_only;
2946 };
2947
2948 #define BLK_KERN_SG_HDR_ENTRIES (1 + (sizeof(struct blk_kern_sg_hdr) - 1) / \
2949                                  sizeof(struct scatterlist))
2950
2951 /**
2952  * blk_rq_unmap_kern_sg - "unmaps" data buffers in the request
2953  * @req:        request to unmap
2954  * @do_copy:    sets copy data between buffers, if needed, or not
2955  *
2956  * Description:
2957  *    It frees all additional buffers allocated for SG->BIO mapping.
2958  */
2959 void blk_rq_unmap_kern_sg(struct request *req, int do_copy)
2960 {
2961         struct blk_kern_sg_hdr *hdr = (struct blk_kern_sg_hdr *)req->end_io_data;
2962
2963         if (hdr == NULL)
2964                 goto out;
2965
2966         if (hdr->tail_only) {
2967                 /* Tail element only was copied */
2968                 struct scatterlist *saved_sg = hdr->saved_sg;
2969                 struct scatterlist *tail_sg = hdr->orig_sgp;
2970
2971                 if ((rq_data_dir(req) == READ) && do_copy)
2972                         sg_copy_elem(saved_sg, tail_sg, tail_sg->length,
2973                                 KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
2974
2975                 __free_pages(sg_page(tail_sg), get_order(tail_sg->length));
2976                 *tail_sg = *saved_sg;
2977                 kfree(hdr);
2978         } else {
2979                 /* The whole SG was copied */
2980                 struct sg_table new_sg_table = hdr->new_sg_table;
2981                 struct scatterlist *new_sgl = new_sg_table.sgl +
2982                                                 BLK_KERN_SG_HDR_ENTRIES;
2983                 struct scatterlist *orig_sgl = hdr->orig_sgp;
2984
2985                 if ((rq_data_dir(req) == READ) && do_copy)
2986                         sg_copy(orig_sgl, new_sgl, 0, KM_BIO_DST_IRQ,
2987                                 KM_BIO_SRC_IRQ);
2988
2989                 sg_free_table(&new_sg_table);
2990         }
2991
2992 out:
2993         return;
2994 }
2995
2996 static int blk_rq_handle_align_tail_only(struct request *rq,
2997                                          struct scatterlist *sg_to_copy,
2998                                          gfp_t gfp, gfp_t page_gfp)
2999 {
3000         int res = 0;
3001         struct scatterlist *tail_sg = sg_to_copy;
3002         struct scatterlist *saved_sg;
3003         struct blk_kern_sg_hdr *hdr;
3004         int saved_sg_nents;
3005         struct page *pg;
3006
3007         saved_sg_nents = 1 + BLK_KERN_SG_HDR_ENTRIES;
3008
3009         saved_sg = kmalloc(sizeof(*saved_sg) * saved_sg_nents, gfp);
3010         if (saved_sg == NULL)
3011                 goto out_nomem;
3012
3013         sg_init_table(saved_sg, saved_sg_nents);
3014
3015         hdr = (struct blk_kern_sg_hdr *)saved_sg;
3016         saved_sg += BLK_KERN_SG_HDR_ENTRIES;
3017         saved_sg_nents -= BLK_KERN_SG_HDR_ENTRIES;
3018
3019         hdr->tail_only = true;
3020         hdr->orig_sgp = tail_sg;
3021         hdr->saved_sg = saved_sg;
3022
3023         *saved_sg = *tail_sg;
3024
3025         pg = alloc_pages(page_gfp, get_order(tail_sg->length));
3026         if (pg == NULL)
3027                 goto err_free_saved_sg;
3028
3029         sg_assign_page(tail_sg, pg);
3030         tail_sg->offset = 0;
3031
3032         if (rq_data_dir(rq) == WRITE)
3033                 sg_copy_elem(tail_sg, saved_sg, saved_sg->length,
3034                                 KM_USER1, KM_USER0);
3035
3036         rq->end_io_data = hdr;
3037         rq->cmd_flags |= REQ_COPY_USER;
3038
3039 out:
3040         return res;
3041
3042 err_free_saved_sg:
3043         kfree(saved_sg);
3044
3045 out_nomem:
3046         res = -ENOMEM;
3047         goto out;
3048 }
3049
3050 static int blk_rq_handle_align(struct request *rq, struct scatterlist **psgl,
3051                                int *pnents, struct scatterlist *sgl_to_copy,
3052                                int nents_to_copy, gfp_t gfp, gfp_t page_gfp)
3053 {
3054         int res = 0, i;
3055         struct scatterlist *sgl = *psgl;
3056         int nents = *pnents;
3057         struct sg_table sg_table;
3058         struct scatterlist *sg;
3059         struct scatterlist *new_sgl;
3060         size_t len = 0, to_copy;
3061         int new_sgl_nents;
3062         struct blk_kern_sg_hdr *hdr;
3063
3064         if (sgl != sgl_to_copy) {
3065                 /* copy only the last element */
3066                 res = blk_rq_handle_align_tail_only(rq, sgl_to_copy,
3067                                 gfp, page_gfp);
3068                 if (res == 0)
3069                         goto out;
3070                 /* else go through */
3071         }
3072
3073         for_each_sg(sgl, sg, nents, i)
3074                 len += sg->length;
3075         to_copy = len;
3076
3077         new_sgl_nents = PFN_UP(len) + BLK_KERN_SG_HDR_ENTRIES;
3078
3079         res = sg_alloc_table(&sg_table, new_sgl_nents, gfp);
3080         if (res != 0)
3081                 goto out;
3082
3083         new_sgl = sg_table.sgl;
3084         hdr = (struct blk_kern_sg_hdr *)new_sgl;
3085         new_sgl += BLK_KERN_SG_HDR_ENTRIES;
3086         new_sgl_nents -= BLK_KERN_SG_HDR_ENTRIES;
3087
3088         hdr->tail_only = false;
3089         hdr->orig_sgp = sgl;
3090         hdr->new_sg_table = sg_table;
3091
3092         for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3093                 struct page *pg;
3094
3095                 pg = alloc_page(page_gfp);
3096                 if (pg == NULL)
3097                         goto err_free_new_sgl;
3098
3099                 sg_assign_page(sg, pg);
3100                 sg->length = min_t(size_t, PAGE_SIZE, len);
3101
3102                 len -= PAGE_SIZE;
3103         }
3104
3105         if (rq_data_dir(rq) == WRITE) {
3106                 /*
3107                  * We need to limit amount of copied data to to_copy, because
3108                  * sgl might have the last element not marked as last in
3109                  * SG chaining.
3110                  */
3111                 sg_copy(new_sgl, sgl, to_copy, KM_USER0, KM_USER1);
3112         }
3113
3114         rq->end_io_data = hdr;
3115         rq->cmd_flags |= REQ_COPY_USER;
3116
3117         *psgl = new_sgl;
3118         *pnents = new_sgl_nents;
3119
3120 out:
3121         return res;
3122
3123 err_free_new_sgl:
3124         for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3125                 struct page *pg = sg_page(sg);
3126                 if (pg == NULL)
3127                         break;
3128                 __free_page(pg);
3129         }
3130         sg_free_table(&sg_table);
3131
3132         res = -ENOMEM;
3133         goto out;
3134 }
3135
3136 static void bio_map_kern_endio(struct bio *bio, int err)
3137 {
3138         bio_put(bio);
3139 }
3140
3141 static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3142         int nents, gfp_t gfp, struct scatterlist **sgl_to_copy,
3143         int *nents_to_copy)
3144 {
3145         int res;
3146         struct request_queue *q = rq->q;
3147         int rw = rq_data_dir(rq);
3148         int max_nr_vecs, i;
3149         size_t tot_len;
3150         bool need_new_bio;
3151         struct scatterlist *sg, *prev_sg = NULL;
3152         struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
3153
3154         *sgl_to_copy = NULL;
3155
3156         if (unlikely((sgl == 0) || (nents <= 0))) {
3157                 WARN_ON(1);
3158                 res = -EINVAL;
3159                 goto out;
3160         }
3161
3162         /*
3163          * Let's keep each bio allocation inside a single page to decrease
3164          * probability of failure.
3165          */
3166         max_nr_vecs =  min_t(size_t,
3167                 ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
3168                 BIO_MAX_PAGES);
3169
3170         need_new_bio = true;
3171         tot_len = 0;
3172         for_each_sg(sgl, sg, nents, i) {
3173                 struct page *page = sg_page(sg);
3174                 void *page_addr = page_address(page);
3175                 size_t len = sg->length, l;
3176                 size_t offset = sg->offset;
3177
3178                 tot_len += len;
3179                 prev_sg = sg;
3180
3181                 /*
3182                  * Each segment must be aligned on DMA boundary and
3183                  * not on stack. The last one may have unaligned
3184                  * length as long as the total length is aligned to
3185                  * DMA padding alignment.
3186                  */
3187                 if (i == nents - 1)
3188                         l = 0;
3189                 else
3190                         l = len;
3191                 if (((sg->offset | l) & queue_dma_alignment(q)) ||
3192                     (page_addr && object_is_on_stack(page_addr + sg->offset))) {
3193                         res = -EINVAL;
3194                         goto out_need_copy;
3195                 }
3196
3197                 while (len > 0) {
3198                         size_t bytes;
3199                         int rc;
3200
3201                         if (need_new_bio) {
3202                                 bio = bio_kmalloc(gfp, max_nr_vecs);
3203                                 if (bio == NULL) {
3204                                         res = -ENOMEM;
3205                                         goto out_free_bios;
3206                                 }
3207
3208                                 if (rw == WRITE)
3209                                         bio->bi_rw |= 1 << BIO_RW;
3210
3211                                 bio->bi_end_io = bio_map_kern_endio;
3212
3213                                 if (hbio == NULL)
3214                                         hbio = tbio = bio;
3215                                 else
3216                                         tbio = tbio->bi_next = bio;
3217                         }
3218
3219                         bytes = min_t(size_t, len, PAGE_SIZE - offset);
3220
3221                         rc = bio_add_pc_page(q, bio, page, bytes, offset);
3222                         if (rc < bytes) {
3223                                 if (unlikely(need_new_bio || (rc < 0))) {
3224                                         if (rc < 0)
3225                                                 res = rc;
3226                                         else
3227                                                 res = -EIO;
3228                                         goto out_need_copy;
3229                                 } else {
3230                                         need_new_bio = true;
3231                                         len -= rc;
3232                                         offset += rc;
3233                                         continue;
3234                                 }
3235                         }
3236
3237                         need_new_bio = false;
3238                         offset = 0;
3239                         len -= bytes;
3240                         page = nth_page(page, 1);
3241                 }
3242         }
3243
3244         if (hbio == NULL) {
3245                 res = -EINVAL;
3246                 goto out_free_bios;
3247         }
3248
3249         /* Total length must be aligned on DMA padding alignment */
3250         if ((tot_len & q->dma_pad_mask) &&
3251             !(rq->cmd_flags & REQ_COPY_USER)) {
3252                 res = -EINVAL;
3253                 if (sgl->offset == 0) {
3254                         *sgl_to_copy = prev_sg;
3255                         *nents_to_copy = 1;
3256                         goto out_free_bios;
3257                 } else
3258                         goto out_need_copy;
3259         }
3260
3261         while (hbio != NULL) {
3262                 bio = hbio;
3263                 hbio = hbio->bi_next;
3264                 bio->bi_next = NULL;
3265
3266                 blk_queue_bounce(q, &bio);
3267
3268                 res = blk_rq_append_bio(q, rq, bio);
3269                 if (unlikely(res != 0)) {
3270                         bio->bi_next = hbio;
3271                         hbio = bio;
3272                         goto out_free_bios;
3273                 }
3274         }
3275
3276         rq->buffer = rq->data = NULL;
3277
3278 out:
3279         return res;
3280
3281 out_need_copy:
3282         *sgl_to_copy = sgl;
3283         *nents_to_copy = nents;
3284
3285 out_free_bios:
3286         while (hbio != NULL) {
3287                 bio = hbio;
3288                 hbio = hbio->bi_next;
3289                 bio_put(bio);
3290         }
3291         goto out;
3292 }
3293
3294 /**
3295  * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
3296  * @rq:         request to fill
3297  * @sgl:        area to map
3298  * @nents:      number of elements in @sgl
3299  * @gfp:        memory allocation flags
3300  *
3301  * Description:
3302  *    Data will be mapped directly if possible. Otherwise a bounce
3303  *    buffer will be used.
3304  */
3305 int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3306                        int nents, gfp_t gfp)
3307 {
3308         int res;
3309         struct scatterlist *sg_to_copy = NULL;
3310         int nents_to_copy = 0;
3311
3312         if (unlikely((sgl == 0) || (sgl->length == 0) ||
3313                      (nents <= 0) || (rq->end_io_data != NULL))) {
3314                 WARN_ON(1);
3315                 res = -EINVAL;
3316                 goto out;
3317         }
3318
3319         res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3320                                 &nents_to_copy);
3321         if (unlikely(res != 0)) {
3322                 if (sg_to_copy == NULL)
3323                         goto out;
3324
3325                 res = blk_rq_handle_align(rq, &sgl, &nents, sg_to_copy,
3326                                 nents_to_copy, gfp, rq->q->bounce_gfp | gfp);
3327                 if (unlikely(res != 0))
3328                         goto out;
3329
3330                 res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3331                                                 &nents_to_copy);
3332                 if (res != 0) {
3333                         blk_rq_unmap_kern_sg(rq, 0);
3334                         goto out;
3335                 }
3336         }
3337
3338         rq->buffer = rq->data = NULL;
3339
3340 out:
3341         return res;
3342 }
3343
3344 struct scsi_io_context {
3345         void *blk_data;
3346         void *data;
3347         void (*done)(void *data, char *sense, int result, int resid);
3348         char sense[SCSI_SENSE_BUFFERSIZE];
3349 };
3350
3351 static void scsi_end_async(struct request *req, int error)
3352 {
3353         struct scsi_io_context *sioc = req->end_io_data;
3354
3355         req->end_io_data = sioc->blk_data;
3356         blk_rq_unmap_kern_sg(req, (error == 0));
3357
3358         if (sioc->done)
3359                 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
3360
3361         kfree(sioc);
3362         __blk_put_request(req->q, req);
3363 }
3364
3365 /**
3366  * scsi_execute_async - insert request
3367  * @sdev:       scsi device
3368  * @cmd:        scsi command
3369  * @cmd_len:    length of scsi cdb
3370  * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
3371  * @sgl:        data buffer scatterlist
3372  * @nents:      number of elements in the sgl
3373  * @timeout:    request timeout in seconds
3374  * @retries:    number of times to retry request
3375  * @privdata:   data passed to done()
3376  * @done:       callback function when done
3377  * @gfp:        memory allocation flags
3378  * @flags:      one or more SCSI_ASYNC_EXEC_FLAG_* flags
3379  */
3380 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
3381                        int cmd_len, int data_direction, struct scatterlist *sgl,
3382                        int nents, int timeout, int retries, void *privdata,
3383                        void (*done)(void *, char *, int, int), gfp_t gfp,
3384                        int flags)
3385 {
3386         struct request *req;
3387         struct scsi_io_context *sioc;
3388         int err = 0;
3389         int write = (data_direction == DMA_TO_DEVICE);
3390
3391         sioc = kzalloc(sizeof(*sioc), gfp);
3392         if (sioc == NULL)
3393                 return DRIVER_ERROR << 24;
3394
3395         req = blk_get_request(sdev->request_queue, write, gfp);
3396         if (req == NULL)
3397                 goto free_sense;
3398         req->cmd_type = REQ_TYPE_BLOCK_PC;
3399         req->cmd_flags |= REQ_QUIET;
3400
3401         if (flags & SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING)
3402                 req->cmd_flags |= REQ_COPY_USER;
3403
3404         if (sgl != NULL) {
3405                 err = blk_rq_map_kern_sg(req, sgl, nents, gfp);
3406                 if (err)
3407                         goto free_req;
3408         }
3409
3410         sioc->blk_data = req->end_io_data;
3411         sioc->data = privdata;
3412         sioc->done = done;
3413
3414         req->cmd_len = cmd_len;
3415         memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
3416         memcpy(req->cmd, cmd, req->cmd_len);
3417         req->sense = sioc->sense;
3418         req->sense_len = 0;
3419         req->timeout = timeout;
3420         req->retries = retries;
3421         req->end_io_data = sioc;
3422
3423         blk_execute_rq_nowait(req->q, NULL, req,
3424                 flags & SCSI_ASYNC_EXEC_FLAG_AT_HEAD, scsi_end_async);
3425         return 0;
3426
3427 free_req:
3428         blk_put_request(req);
3429
3430 free_sense:
3431         kfree(sioc);
3432         return DRIVER_ERROR << 24;
3433 }
3434 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
3435
3436 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
3437 {
3438         struct scatterlist *src_sg, *dst_sg;
3439         unsigned int to_copy;
3440         int atomic = scst_cmd_atomic(cmd);
3441
3442         TRACE_ENTRY();
3443
3444         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
3445                 if (cmd->data_direction != SCST_DATA_BIDI) {
3446                         src_sg = cmd->tgt_sg;
3447                         dst_sg = cmd->sg;
3448                         to_copy = cmd->bufflen;
3449                 } else {
3450                         TRACE_MEM("BIDI cmd %p", cmd);
3451                         src_sg = cmd->tgt_in_sg;
3452                         dst_sg = cmd->in_sg;
3453                         to_copy = cmd->in_bufflen;
3454                 }
3455         } else {
3456                 src_sg = cmd->sg;
3457                 dst_sg = cmd->tgt_sg;
3458                 to_copy = cmd->resp_data_len;
3459         }
3460
3461         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, "
3462                 "to_copy %d", cmd, copy_dir, src_sg, dst_sg, to_copy);
3463
3464         if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
3465                 /*
3466                  * It can happened, e.g., with scst_user for cmd with delay
3467                  * alloc, which failed with Check Condition.
3468                  */
3469                 goto out;
3470         }
3471
3472         sg_copy(dst_sg, src_sg, to_copy, atomic ? KM_SOFTIRQ0 : KM_USER0,
3473                                          atomic ? KM_SOFTIRQ1 : KM_USER1);
3474
3475 out:
3476         TRACE_EXIT();
3477         return;
3478 }
3479
3480 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
3481
3482 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
3483 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
3484
3485 int scst_get_cdb_len(const uint8_t *cdb)
3486 {
3487         return SCST_GET_CDB_LEN(cdb[0]);
3488 }
3489
3490 /* get_trans_len_x extract x bytes from cdb as length starting from off */
3491
3492 static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
3493 {
3494         cmd->cdb_len = 10;
3495         cmd->bufflen = 0;
3496         return 0;
3497 }
3498
3499 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
3500 {
3501         cmd->bufflen = 6;
3502         return 0;
3503 }
3504
3505 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
3506 {
3507         cmd->bufflen = READ_CAP_LEN;
3508         return 0;
3509 }
3510
3511 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
3512 {
3513         int res = 0;
3514
3515         TRACE_ENTRY();
3516
3517         if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
3518                 cmd->op_name = "READ CAPACITY(16)";
3519                 cmd->bufflen = READ_CAP16_LEN;
3520                 cmd->op_flags |= SCST_IMPLICIT_HQ;
3521         } else
3522                 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
3523
3524         TRACE_EXIT_RES(res);
3525         return res;
3526 }
3527
3528 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
3529 {
3530         cmd->bufflen = 1;
3531         return 0;
3532 }
3533
3534 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
3535 {
3536         uint8_t *p = (uint8_t *)cmd->cdb + off;
3537         int res = 0;
3538
3539         cmd->bufflen = 0;
3540         cmd->bufflen |= ((u32)p[0]) << 8;
3541         cmd->bufflen |= ((u32)p[1]);
3542
3543         switch (cmd->cdb[1] & 0x1f) {
3544         case 0:
3545         case 1:
3546         case 6:
3547                 if (cmd->bufflen != 0) {
3548                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
3549                                 "allocation length for service action %x",
3550                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
3551                         goto out_inval;
3552                 }
3553                 break;
3554         }
3555
3556         switch (cmd->cdb[1] & 0x1f) {
3557         case 0:
3558         case 1:
3559                 cmd->bufflen = 20;
3560                 break;
3561         case 6:
3562                 cmd->bufflen = 32;
3563                 break;
3564         case 8:
3565                 cmd->bufflen = max(28, cmd->bufflen);
3566                 break;
3567         default:
3568                 PRINT_ERROR("READ POSITION: Invalid service action %x",
3569                         cmd->cdb[1] & 0x1f);
3570                 goto out_inval;
3571         }
3572
3573 out:
3574         return res;
3575
3576 out_inval:
3577         scst_set_cmd_error(cmd,
3578                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
3579         res = 1;
3580         goto out;
3581 }
3582
3583 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
3584 {
3585         cmd->bufflen = (u32)cmd->cdb[off];
3586         return 0;
3587 }
3588
3589 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
3590 {
3591         cmd->bufflen = (u32)cmd->cdb[off];
3592         if (cmd->bufflen == 0)
3593                 cmd->bufflen = 256;
3594         return 0;
3595 }
3596
3597 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
3598 {
3599         const uint8_t *p = cmd->cdb + off;
3600
3601         cmd->bufflen = 0;
3602         cmd->bufflen |= ((u32)p[0]) << 8;
3603         cmd->bufflen |= ((u32)p[1]);
3604
3605         return 0;
3606 }
3607
3608 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
3609 {
3610         const uint8_t *p = cmd->cdb + off;
3611
3612         cmd->bufflen = 0;
3613         cmd->bufflen |= ((u32)p[0]) << 16;
3614         cmd->bufflen |= ((u32)p[1]) << 8;
3615         cmd->bufflen |= ((u32)p[2]);
3616
3617         return 0;
3618 }
3619
3620 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
3621 {
3622         const uint8_t *p = cmd->cdb + off;
3623
3624         cmd->bufflen = 0;
3625         cmd->bufflen |= ((u32)p[0]) << 24;
3626         cmd->bufflen |= ((u32)p[1]) << 16;
3627         cmd->bufflen |= ((u32)p[2]) << 8;
3628         cmd->bufflen |= ((u32)p[3]);
3629
3630         return 0;
3631 }
3632
3633 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
3634 {
3635         cmd->bufflen = 0;
3636         return 0;
3637 }
3638
3639 int scst_get_cdb_info(struct scst_cmd *cmd)
3640 {
3641         int dev_type = cmd->dev->type;
3642         int i, res = 0;
3643         uint8_t op;
3644         const struct scst_sdbops *ptr = NULL;
3645
3646         TRACE_ENTRY();
3647
3648         op = cmd->cdb[0];       /* get clear opcode */
3649
3650         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
3651                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
3652                 dev_type);
3653
3654         i = scst_scsi_op_list[op];
3655         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
3656                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
3657                         ptr = &scst_scsi_op_table[i];
3658                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
3659                               ptr->ops, ptr->devkey[0], /* disk     */
3660                               ptr->devkey[1],   /* tape     */
3661                               ptr->devkey[2],   /* printer */
3662                               ptr->devkey[3],   /* cpu      */
3663                               ptr->devkey[4],   /* cdr      */
3664                               ptr->devkey[5],   /* cdrom    */
3665                               ptr->devkey[6],   /* scanner */
3666                               ptr->devkey[7],   /* worm     */
3667                               ptr->devkey[8],   /* changer */
3668                               ptr->devkey[9],   /* commdev */
3669                               ptr->op_name);
3670                         TRACE_DBG("direction=%d flags=%d off=%d",
3671                               ptr->direction,
3672                               ptr->flags,
3673                               ptr->off);
3674                         break;
3675                 }
3676                 i++;
3677         }
3678
3679         if (unlikely(ptr == NULL)) {
3680                 /* opcode not found or now not used !!! */
3681                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
3682                       dev_type);
3683                 res = -1;
3684                 cmd->op_flags = SCST_INFO_NOT_FOUND;
3685                 goto out;
3686         }
3687
3688         cmd->cdb_len = SCST_GET_CDB_LEN(op);
3689         cmd->op_name = ptr->op_name;
3690         cmd->data_direction = ptr->direction;
3691         cmd->op_flags = ptr->flags;
3692         res = (*ptr->get_trans_len)(cmd, ptr->off);
3693
3694 out:
3695         TRACE_EXIT_RES(res);
3696         return res;
3697 }
3698 EXPORT_SYMBOL(scst_get_cdb_info);
3699
3700 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
3701 uint64_t scst_pack_lun(const uint64_t lun)
3702 {
3703         uint64_t res;
3704         uint16_t *p = (uint16_t *)&res;
3705
3706         res = lun;
3707         *p = cpu_to_be16(*p);
3708
3709         TRACE_EXIT_HRES((unsigned long)res);
3710         return res;
3711 }
3712
3713 /*
3714  * Routine to extract a lun number from an 8-byte LUN structure
3715  * in network byte order (BE).
3716  * (see SAM-2, Section 4.12.3 page 40)
3717  * Supports 2 types of lun unpacking: peripheral and logical unit.
3718  */
3719 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
3720 {
3721         uint64_t res = NO_SUCH_LUN;
3722         int address_method;
3723
3724         TRACE_ENTRY();
3725
3726         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
3727
3728         if (unlikely(len < 2)) {
3729                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
3730                         "more", len);
3731                 goto out;
3732         }
3733
3734         if (len > 2) {
3735                 switch (len) {
3736                 case 8:
3737                         if ((*((uint64_t *)lun) &
3738                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
3739                                 goto out_err;
3740                         break;
3741                 case 4:
3742                         if (*((uint16_t *)&lun[2]) != 0)
3743                                 goto out_err;
3744                         break;
3745                 case 6:
3746                         if (*((uint32_t *)&lun[2]) != 0)
3747                                 goto out_err;
3748                         break;
3749                 default:
3750                         goto out_err;
3751                 }
3752         }
3753
3754         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
3755         switch (address_method) {
3756         case 0: /* peripheral device addressing method */
3757 #if 0
3758                 if (*lun) {
3759                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
3760                              "peripheral device addressing method 0x%02x, "
3761                              "expected 0", *lun);
3762                         break;
3763                 }
3764                 res = *(lun + 1);
3765                 break;
3766 #else
3767                 /*
3768                  * Looks like it's legal to use it as flat space addressing
3769                  * method as well
3770                  */
3771
3772                 /* go through */
3773 #endif
3774
3775         case 1: /* flat space addressing method */
3776                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
3777                 break;
3778
3779         case 2: /* logical unit addressing method */
3780                 if (*lun & 0x3f) {
3781                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
3782                                     "addressing method 0x%02x, expected 0",
3783                                     *lun & 0x3f);
3784                         break;
3785                 }
3786                 if (*(lun + 1) & 0xe0) {
3787                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
3788                                     "addressing method 0x%02x, expected 0",
3789                                     (*(lun + 1) & 0xf8) >> 5);
3790                         break;
3791                 }
3792                 res = *(lun + 1) & 0x1f;
3793                 break;
3794
3795         case 3: /* extended logical unit addressing method */
3796         default:
3797                 PRINT_ERROR("Unimplemented LUN addressing method %u",
3798                             address_method);
3799                 break;