- Added "replace" command to replace one LUN by another and generate INQUIRY DATA...
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2009 ID7 Ltd.
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <asm/kmap_types.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #include "scst_cdbprobe.h"
37
38 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
39 static void scst_check_internal_sense(struct scst_device *dev, int result,
40         uint8_t *sense, int sense_len);
41 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
42         int flags);
43 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
44         const uint8_t *sense, int sense_len, int flags);
45 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
46         const uint8_t *sense, int sense_len, int flags);
47 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
48 static void scst_release_space(struct scst_cmd *cmd);
49 static void scst_sess_free_tgt_devs(struct scst_session *sess);
50 static void scst_unblock_cmds(struct scst_device *dev);
51 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
52 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
53         struct scst_acg_dev *acg_dev);
54
55 #ifdef CONFIG_SCST_DEBUG_TM
56 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
57         struct scst_acg_dev *acg_dev);
58 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
59 #else
60 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
61         struct scst_acg_dev *acg_dev) {}
62 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
63 #endif /* CONFIG_SCST_DEBUG_TM */
64
65 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
66 {
67         int res = 0;
68         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
69
70         TRACE_ENTRY();
71
72         if (cmd->sense != NULL)
73                 goto memzero;
74
75         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
76         if (cmd->sense == NULL) {
77                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
78                         "The sense data will be lost!!", cmd->cdb[0]);
79                 res = -ENOMEM;
80                 goto out;
81         }
82
83 memzero:
84         cmd->sense_bufflen = SCST_SENSE_BUFFERSIZE;
85         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
86
87 out:
88         TRACE_EXIT_RES(res);
89         return res;
90 }
91 EXPORT_SYMBOL(scst_alloc_sense);
92
93 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
94         const uint8_t *sense, unsigned int len)
95 {
96         int res;
97
98         TRACE_ENTRY();
99
100         res = scst_alloc_sense(cmd, atomic);
101         if (res != 0) {
102                 PRINT_BUFFER("Lost sense", sense, len);
103                 goto out;
104         }
105
106         memcpy(cmd->sense, sense, min((int)len, (int)cmd->sense_bufflen));
107         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
108
109 out:
110         TRACE_EXIT_RES(res);
111         return res;
112 }
113 EXPORT_SYMBOL(scst_alloc_set_sense);
114
115 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
116 {
117         TRACE_ENTRY();
118
119         cmd->status = status;
120         cmd->host_status = DID_OK;
121
122         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
123         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
124
125         cmd->data_direction = SCST_DATA_NONE;
126         cmd->resp_data_len = 0;
127         cmd->is_send_status = 1;
128
129         cmd->completed = 1;
130
131         TRACE_EXIT();
132         return;
133 }
134 EXPORT_SYMBOL(scst_set_cmd_error_status);
135
136 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
137 {
138         int rc;
139
140         TRACE_ENTRY();
141
142         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
143
144         rc = scst_alloc_sense(cmd, 1);
145         if (rc != 0) {
146                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
147                         key, asc, ascq);
148                 goto out;
149         }
150
151         scst_set_sense(cmd->sense, cmd->sense_bufflen,
152                 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
153         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
154
155 out:
156         TRACE_EXIT();
157         return;
158 }
159 EXPORT_SYMBOL(scst_set_cmd_error);
160
161 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
162         int key, int asc, int ascq)
163 {
164         sBUG_ON(len == 0);
165
166         memset(buffer, 0, len);
167
168         if (d_sense) {
169                 /* Descriptor format */
170                 if (len < 4) {
171                         PRINT_ERROR("Length %d of sense buffer too small to "
172                                 "fit sense %x:%x:%x", len, key, asc, ascq);
173                 }
174
175                 buffer[0] = 0x72;               /* Response Code        */
176                 if (len > 1)
177                         buffer[1] = key;        /* Sense Key            */
178                 if (len > 2)
179                         buffer[2] = asc;        /* ASC                  */
180                 if (len > 3)
181                         buffer[3] = ascq;       /* ASCQ                 */
182         } else {
183                 /* Fixed format */
184                 if (len < 14) {
185                         PRINT_ERROR("Length %d of sense buffer too small to "
186                                 "fit sense %x:%x:%x", len, key, asc, ascq);
187                 }
188
189                 buffer[0] = 0x70;               /* Response Code        */
190                 if (len > 2)
191                         buffer[2] = key;        /* Sense Key            */
192                 if (len > 7)
193                         buffer[7] = 0x0a;       /* Additional Sense Length */
194                 if (len > 12)
195                         buffer[12] = asc;       /* ASC                  */
196                 if (len > 13)
197                         buffer[13] = ascq;      /* ASCQ                 */
198         }
199
200         TRACE_BUFFER("Sense set", buffer, len);
201         return;
202 }
203 EXPORT_SYMBOL(scst_set_sense);
204
205 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
206         int key, int asc, int ascq)
207 {
208         bool res = false;
209
210         /* Response Code */
211         if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
212                 /* Fixed format */
213
214                 if (len < 14) {
215                         PRINT_ERROR("Sense too small to analyze (%d, "
216                                 "type fixed)", len);
217                         goto out;
218                 }
219
220                 /* Sense Key */
221                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
222                         goto out;
223
224                 /* ASC */
225                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
226                         goto out;
227
228                 /* ASCQ */
229                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
230                         goto out;
231         } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
232                 /* Descriptor format */
233
234                 if (len < 4) {
235                         PRINT_ERROR("Sense too small to analyze (%d, "
236                                 "type descriptor)", len);
237                         goto out;
238                 }
239
240                 /* Sense Key */
241                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
242                         goto out;
243
244                 /* ASC */
245                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
246                         goto out;
247
248                 /* ASCQ */
249                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
250                         goto out;
251         } else
252                 goto out;
253
254         res = true;
255
256 out:
257         TRACE_EXIT_RES((int)res);
258         return res;
259 }
260 EXPORT_SYMBOL(scst_analyze_sense);
261
262 bool scst_is_ua_sense(const uint8_t *sense, int len)
263 {
264         if (SCST_SENSE_VALID(sense))
265                 return scst_analyze_sense(sense, len,
266                         SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0);
267         else
268                 return false;
269 }
270 EXPORT_SYMBOL(scst_is_ua_sense);
271
272 bool scst_is_ua_global(const uint8_t *sense, int len)
273 {
274         bool res;
275
276         /* Changing it don't forget to change scst_requeue_ua() as well!! */
277
278         if (scst_analyze_sense(sense, len, SCST_SENSE_ALL_VALID,
279                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed)))
280                 res = true;
281         else
282                 res = false;
283
284         return res;
285 }
286
287 void scst_check_convert_sense(struct scst_cmd *cmd)
288 {
289         bool d_sense;
290
291         TRACE_ENTRY();
292
293         if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
294                 goto out;
295
296         d_sense = scst_get_cmd_dev_d_sense(cmd);
297         if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
298                 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
299                         cmd);
300                 if (cmd->sense_bufflen < 14) {
301                         PRINT_ERROR("Sense too small to convert (%d, "
302                                 "type fixed)", cmd->sense_bufflen);
303                         goto out;
304                 }
305                 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
306                         cmd->sense[2], cmd->sense[12], cmd->sense[13]);
307         } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
308                                 (cmd->sense[0] == 0x73))) {
309                 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
310                         cmd);
311                 if (cmd->sense_bufflen < 4) {
312                         PRINT_ERROR("Sense too small to convert (%d, "
313                                 "type descryptor)", cmd->sense_bufflen);
314                         goto out;
315                 }
316                 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
317                         cmd->sense[1], cmd->sense[2], cmd->sense[3]);
318         }
319
320 out:
321         TRACE_EXIT();
322         return;
323 }
324 EXPORT_SYMBOL(scst_check_convert_sense);
325
326 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
327         unsigned int len)
328 {
329         TRACE_ENTRY();
330
331         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
332         scst_alloc_set_sense(cmd, 1, sense, len);
333
334         TRACE_EXIT();
335         return;
336 }
337
338 void scst_set_busy(struct scst_cmd *cmd)
339 {
340         int c = atomic_read(&cmd->sess->sess_cmd_count);
341
342         TRACE_ENTRY();
343
344         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
345                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
346                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
347                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
348                         cmd->sess->initiator_name, c,
349                         cmd->queue_type, cmd->sess->init_phase);
350         } else {
351                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
352                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
353                         "initiator %s (cmds count %d, queue_type %x, "
354                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
355                         cmd->queue_type, cmd->sess->init_phase);
356         }
357
358         TRACE_EXIT();
359         return;
360 }
361 EXPORT_SYMBOL(scst_set_busy);
362
363 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
364 {
365         int i;
366
367         TRACE_ENTRY();
368
369         TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
370                 asc, ascq);
371
372         /* Protect sess_tgt_dev_list_hash */
373         mutex_lock(&scst_mutex);
374
375         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
376                 struct list_head *sess_tgt_dev_list_head =
377                         &sess->sess_tgt_dev_list_hash[i];
378                 struct scst_tgt_dev *tgt_dev;
379
380                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
381                                 sess_tgt_dev_list_entry) {
382                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
383                         if (!list_empty(&tgt_dev->UA_list)) {
384                                 struct scst_tgt_dev_UA *ua;
385
386                                 ua = list_entry(tgt_dev->UA_list.next,
387                                         typeof(*ua), UA_list_entry);
388                                 if (scst_analyze_sense(ua->UA_sense_buffer,
389                                                 sizeof(ua->UA_sense_buffer),
390                                                 SCST_SENSE_ALL_VALID,
391                                                 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
392                                         scst_set_sense(ua->UA_sense_buffer,
393                                                 sizeof(ua->UA_sense_buffer),
394                                                 tgt_dev->dev->d_sense,
395                                                 key, asc, ascq);
396                                 } else
397                                         PRINT_ERROR("%s",
398                                                 "The first UA isn't RESET UA");
399                         } else
400                                 PRINT_ERROR("%s", "There's no RESET UA to "
401                                         "replace");
402                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
403                 }
404         }
405
406         mutex_unlock(&scst_mutex);
407
408         TRACE_EXIT();
409         return;
410 }
411 EXPORT_SYMBOL(scst_set_initial_UA);
412
413 static struct scst_aen *scst_alloc_aen(struct scst_session *sess,
414         uint64_t unpacked_lun)
415 {
416         struct scst_aen *aen;
417
418         TRACE_ENTRY();
419
420         aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
421         if (aen == NULL) {
422                 PRINT_ERROR("AEN memory allocation failed. Corresponding "
423                         "event notification will not be performed (initiator "
424                         "%s)", sess->initiator_name);
425                 goto out;
426         }
427         memset(aen, 0, sizeof(*aen));
428
429         aen->sess = sess;
430         scst_sess_get(sess);
431
432         aen->lun = scst_pack_lun(unpacked_lun);
433
434 out:
435         TRACE_EXIT_HRES((unsigned long)aen);
436         return aen;
437 };
438
439 static void scst_free_aen(struct scst_aen *aen)
440 {
441         TRACE_ENTRY();
442
443         scst_sess_put(aen->sess);
444         mempool_free(aen, scst_aen_mempool);
445
446         TRACE_EXIT();
447         return;
448 };
449
450 /* Must be called unded scst_mutex */
451 void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
452         int key, int asc, int ascq)
453 {
454         struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
455         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
456
457         TRACE_ENTRY();
458
459         if (tgtt->report_aen != NULL) {
460                 struct scst_aen *aen;
461                 int rc;
462
463                 aen = scst_alloc_aen(tgt_dev->sess, tgt_dev->lun);
464                 if (aen == NULL)
465                         goto queue_ua;
466
467                 aen->event_fn = SCST_AEN_SCSI;
468                 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
469                 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
470                         tgt_dev->dev->d_sense, key, asc, ascq);
471
472                 TRACE_DBG("Calling target's %s report_aen(%p)",
473                         tgtt->name, aen);
474                 rc = tgtt->report_aen(aen);
475                 TRACE_DBG("Target's %s report_aen(%p) returned %d",
476                         tgtt->name, aen, rc);
477                 if (rc == SCST_AEN_RES_SUCCESS)
478                         goto out;
479
480                 scst_free_aen(aen);
481         }
482
483 queue_ua:
484         TRACE_MGMT_DBG("AEN not supported, queuing plain UA (tgt_dev %p)",
485                 tgt_dev);
486         scst_set_sense(sense_buffer, sizeof(sense_buffer),
487                 tgt_dev->dev->d_sense, key, asc, ascq);
488         scst_check_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
489
490 out:
491         TRACE_EXIT();
492         return;
493 }
494
495 /* No locks */
496 void scst_capacity_data_changed(struct scst_device *dev)
497 {
498         struct scst_tgt_dev *tgt_dev;
499
500         TRACE_ENTRY();
501
502         if (dev->type != TYPE_DISK) {
503                 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
504                         "CHANGED UA", dev->type);
505                 goto out;
506         }
507
508         TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
509
510         mutex_lock(&scst_mutex);
511
512         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
513                             dev_tgt_dev_list_entry) {
514                 scst_gen_aen_or_ua(tgt_dev,
515                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
516         }
517
518         mutex_unlock(&scst_mutex);
519
520 out:
521         TRACE_EXIT();
522         return;
523 }
524 EXPORT_SYMBOL(scst_capacity_data_changed);
525
526 static inline bool scst_is_report_luns_changed_type(int type)
527 {
528         switch (type) {
529         case TYPE_DISK:
530         case TYPE_TAPE:
531         case TYPE_PRINTER:
532         case TYPE_PROCESSOR:
533         case TYPE_WORM:
534         case TYPE_ROM:
535         case TYPE_SCANNER:
536         case TYPE_MOD:
537         case TYPE_MEDIUM_CHANGER:
538         case TYPE_RAID:
539         case TYPE_ENCLOSURE:
540                 return true;
541         default:
542                 return false;
543         }
544 }
545
546 /* scst_mutex supposed to be held */
547 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
548                                               int flags)
549 {
550         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
551         struct list_head *shead;
552         struct scst_tgt_dev *tgt_dev;
553         int i;
554
555         TRACE_ENTRY();
556
557         TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
558                 "(sess %p)", sess);
559
560         local_bh_disable();
561
562         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
563                 shead = &sess->sess_tgt_dev_list_hash[i];
564
565                 list_for_each_entry(tgt_dev, shead,
566                                 sess_tgt_dev_list_entry) {
567                         /* Lockdep triggers here a false positive.. */
568                         spin_lock(&tgt_dev->tgt_dev_lock);
569                 }
570         }
571
572         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
573                 shead = &sess->sess_tgt_dev_list_hash[i];
574
575                 list_for_each_entry(tgt_dev, shead,
576                                 sess_tgt_dev_list_entry) {
577                         if (!scst_is_report_luns_changed_type(
578                                         tgt_dev->dev->type))
579                                 continue;
580
581                         scst_set_sense(sense_buffer, sizeof(sense_buffer),
582                                 tgt_dev->dev->d_sense,
583                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
584
585                         __scst_check_set_UA(tgt_dev, sense_buffer,
586                                 sizeof(sense_buffer),
587                                 flags | SCST_SET_UA_FLAG_GLOBAL);
588                 }
589         }
590
591         for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
592                 shead = &sess->sess_tgt_dev_list_hash[i];
593
594                 list_for_each_entry_reverse(tgt_dev,
595                                 shead, sess_tgt_dev_list_entry) {
596                         spin_unlock(&tgt_dev->tgt_dev_lock);
597                 }
598         }
599
600         local_bh_enable();
601
602         TRACE_EXIT();
603         return;
604 }
605
606 /* The activity supposed to be suspended and scst_mutex held */
607 static void scst_report_luns_changed_sess(struct scst_session *sess)
608 {
609         int i;
610         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
611         int d_sense = 0;
612         uint64_t lun = 0;
613
614         TRACE_ENTRY();
615
616         TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
617
618         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
619                 struct list_head *shead;
620                 struct scst_tgt_dev *tgt_dev;
621
622                 shead = &sess->sess_tgt_dev_list_hash[i];
623
624                 list_for_each_entry(tgt_dev, shead,
625                                 sess_tgt_dev_list_entry) {
626                         if (scst_is_report_luns_changed_type(
627                                         tgt_dev->dev->type)) {
628                                 lun = tgt_dev->lun;
629                                 d_sense = tgt_dev->dev->d_sense;
630                                 goto found;
631                         }
632                 }
633         }
634
635 found:
636         if (tgtt->report_aen != NULL) {
637                 struct scst_aen *aen;
638                 int rc;
639
640                 aen = scst_alloc_aen(sess, lun);
641                 if (aen == NULL)
642                         goto queue_ua;
643
644                 aen->event_fn = SCST_AEN_SCSI;
645                 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
646                 scst_set_sense(aen->aen_sense, aen->aen_sense_len, d_sense,
647                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
648
649                 TRACE_DBG("Calling target's %s report_aen(%p)",
650                         tgtt->name, aen);
651                 rc = tgtt->report_aen(aen);
652                 TRACE_DBG("Target's %s report_aen(%p) returned %d",
653                         tgtt->name, aen, rc);
654                 if (rc == SCST_AEN_RES_SUCCESS)
655                         goto out;
656
657                 scst_free_aen(aen);
658         }
659
660 queue_ua:
661         scst_queue_report_luns_changed_UA(sess, 0);
662
663 out:
664         TRACE_EXIT();
665         return;
666 }
667
668 /* The activity supposed to be suspended and scst_mutex held */
669 void scst_report_luns_changed(struct scst_acg *acg)
670 {
671         struct scst_session *sess;
672
673         TRACE_ENTRY();
674
675         TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
676
677         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
678                 scst_report_luns_changed_sess(sess);
679         }
680
681         TRACE_EXIT();
682         return;
683 }
684
685 void scst_aen_done(struct scst_aen *aen)
686 {
687         TRACE_ENTRY();
688
689         TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
690                 aen->event_fn, aen->sess->initiator_name);
691
692         if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
693                 goto out_free;
694
695         if (aen->event_fn != SCST_AEN_SCSI)
696                 goto out_free;
697
698         TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
699                 aen->sess->initiator_name);
700
701         if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
702                         SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
703                                 scst_sense_reported_luns_data_changed))) {
704                 mutex_lock(&scst_mutex);
705                 scst_queue_report_luns_changed_UA(aen->sess,
706                         SCST_SET_UA_FLAG_AT_HEAD);
707                 mutex_unlock(&scst_mutex);
708         } else {
709                 struct list_head *shead;
710                 struct scst_tgt_dev *tgt_dev;
711                 uint64_t lun;
712
713                 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
714
715                 mutex_lock(&scst_mutex);
716
717                 /* tgt_dev might get dead, so we need to reseek it */
718                 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
719                 list_for_each_entry(tgt_dev, shead,
720                                 sess_tgt_dev_list_entry) {
721                         if (tgt_dev->lun == lun) {
722                                 TRACE_MGMT_DBG("Requeuing failed AEN UA for "
723                                         "tgt_dev %p", tgt_dev);
724                                 scst_check_set_UA(tgt_dev, aen->aen_sense,
725                                         aen->aen_sense_len,
726                                         SCST_SET_UA_FLAG_AT_HEAD);
727                                 break;
728                         }
729                 }
730
731                 mutex_unlock(&scst_mutex);
732         }
733
734 out_free:
735         scst_free_aen(aen);
736
737         TRACE_EXIT();
738         return;
739 }
740 EXPORT_SYMBOL(scst_aen_done);
741
742 void scst_requeue_ua(struct scst_cmd *cmd)
743 {
744         TRACE_ENTRY();
745
746         if (scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
747                         SCST_SENSE_ALL_VALID,
748                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
749                 TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
750                         "for delivery failed cmd %p", cmd);
751                 mutex_lock(&scst_mutex);
752                 scst_queue_report_luns_changed_UA(cmd->sess,
753                         SCST_SET_UA_FLAG_AT_HEAD);
754                 mutex_unlock(&scst_mutex);
755         } else {
756                 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
757                 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
758                         cmd->sense_bufflen, SCST_SET_UA_FLAG_AT_HEAD);
759         }
760
761         TRACE_EXIT();
762         return;
763 }
764
765 /* The activity supposed to be suspended and scst_mutex held */
766 static void scst_check_reassign_sess(struct scst_session *sess)
767 {
768         struct scst_acg *acg, *old_acg;
769         struct scst_acg_dev *acg_dev;
770         int i;
771         struct list_head *shead;
772         struct scst_tgt_dev *tgt_dev;
773         bool luns_changed = false;
774         bool add_failed, something_freed, not_needed_freed = false;
775
776         TRACE_ENTRY();
777
778         TRACE_MGMT_DBG("Checking reassignment for sess %p (initiator %s)",
779                 sess, sess->initiator_name);
780
781         acg = scst_find_acg(sess);
782         if (acg == sess->acg) {
783                 TRACE_MGMT_DBG("No reassignment for sess %p", sess);
784                 goto out;
785         }
786
787         TRACE_MGMT_DBG("sess %p will be reassigned from acg %s to acg %s",
788                 sess, sess->acg->acg_name, acg->acg_name);
789
790         old_acg = sess->acg;
791         sess->acg = NULL; /* to catch implicit dependencies earlier */
792
793 retry_add:
794         add_failed = false;
795         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
796                 unsigned int inq_changed_ua_needed = 0;
797
798                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
799                         shead = &sess->sess_tgt_dev_list_hash[i];
800
801                         list_for_each_entry(tgt_dev, shead,
802                                         sess_tgt_dev_list_entry) {
803                                 if ((tgt_dev->dev == acg_dev->dev) &&
804                                     (tgt_dev->lun == acg_dev->lun) &&
805                                     (tgt_dev->acg_dev->rd_only == acg_dev->rd_only)) {
806                                         TRACE_MGMT_DBG("sess %p: tgt_dev %p for "
807                                                 "LUN %lld stays the same",
808                                                 sess, tgt_dev,
809                                                 (unsigned long long)tgt_dev->lun);
810                                         tgt_dev->acg_dev = acg_dev;
811                                         goto next;
812                                 } else if (tgt_dev->lun == acg_dev->lun)
813                                         inq_changed_ua_needed = 1;
814                         }
815                 }
816
817                 luns_changed = true;
818
819                 TRACE_MGMT_DBG("sess %p: Allocing new tgt_dev for LUN %lld",
820                         sess, (unsigned long long)acg_dev->lun);
821
822                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
823                 if (tgt_dev == NULL) {
824                         add_failed = true;
825                         break;
826                 }
827
828                 tgt_dev->inq_changed_ua_needed = inq_changed_ua_needed ||
829                                                  not_needed_freed;
830 next:
831                 continue;
832         }
833
834         something_freed = false;
835         not_needed_freed = true;
836         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
837                 struct scst_tgt_dev *t;
838                 shead = &sess->sess_tgt_dev_list_hash[i];
839
840                 list_for_each_entry_safe(tgt_dev, t, shead,
841                                         sess_tgt_dev_list_entry) {
842                         if (tgt_dev->acg_dev->acg != acg) {
843                                 TRACE_MGMT_DBG("sess %p: Deleting not used "
844                                         "tgt_dev %p for LUN %lld",
845                                         sess, tgt_dev,
846                                         (unsigned long long)tgt_dev->lun);
847                                 luns_changed = true;
848                                 something_freed = true;
849                                 scst_free_tgt_dev(tgt_dev);
850                         }
851                 }
852         }
853
854         if (add_failed && something_freed) {
855                 TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
856                 goto retry_add;
857         }
858
859         sess->acg = acg;
860
861         TRACE_DBG("Moving sess %p from acg %s to acg %s", sess,
862                 old_acg->acg_name, acg->acg_name);
863         list_move_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
864
865         if (luns_changed) {
866                 scst_report_luns_changed_sess(sess);
867
868                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
869                         shead = &sess->sess_tgt_dev_list_hash[i];
870
871                         list_for_each_entry(tgt_dev, shead,
872                                         sess_tgt_dev_list_entry) {
873                                 if (tgt_dev->inq_changed_ua_needed) {
874                                         TRACE_MGMT_DBG("sess %p: Setting "
875                                                 "INQUIRY DATA HAS CHANGED UA "
876                                                 "(tgt_dev %p)", sess, tgt_dev);
877
878                                         tgt_dev->inq_changed_ua_needed = 0;
879
880                                         scst_gen_aen_or_ua(tgt_dev,
881                                                 SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
882                                 }
883                         }
884                 }
885         }
886
887 out:
888         TRACE_EXIT();
889         return;
890 }
891
892 /* The activity supposed to be suspended and scst_mutex held */
893 void scst_check_reassign_sessions(void)
894 {
895         struct scst_tgt_template *tgtt;
896
897         TRACE_ENTRY();
898
899         list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
900                 struct scst_tgt *tgt;
901                 list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
902                         struct scst_session *sess;
903                         list_for_each_entry(sess, &tgt->sess_list,
904                                                 sess_list_entry) {
905                                 scst_check_reassign_sess(sess);
906                         }
907                 }
908         }
909
910         TRACE_EXIT();
911         return;
912 }
913
914 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
915 {
916         int res;
917
918         TRACE_ENTRY();
919
920         switch (cmd->state) {
921         case SCST_CMD_STATE_INIT_WAIT:
922         case SCST_CMD_STATE_INIT:
923         case SCST_CMD_STATE_PRE_PARSE:
924         case SCST_CMD_STATE_DEV_PARSE:
925         case SCST_CMD_STATE_DEV_DONE:
926                 if (cmd->internal)
927                         res = SCST_CMD_STATE_FINISHED_INTERNAL;
928                 else
929                         res = SCST_CMD_STATE_PRE_XMIT_RESP;
930                 break;
931
932         case SCST_CMD_STATE_PRE_DEV_DONE:
933         case SCST_CMD_STATE_MODE_SELECT_CHECKS:
934                 res = SCST_CMD_STATE_DEV_DONE;
935                 break;
936
937         case SCST_CMD_STATE_PRE_XMIT_RESP:
938                 res = SCST_CMD_STATE_XMIT_RESP;
939                 break;
940
941         case SCST_CMD_STATE_PREPROCESS_DONE:
942         case SCST_CMD_STATE_PREPARE_SPACE:
943         case SCST_CMD_STATE_RDY_TO_XFER:
944         case SCST_CMD_STATE_DATA_WAIT:
945         case SCST_CMD_STATE_TGT_PRE_EXEC:
946         case SCST_CMD_STATE_SEND_FOR_EXEC:
947         case SCST_CMD_STATE_LOCAL_EXEC:
948         case SCST_CMD_STATE_REAL_EXEC:
949         case SCST_CMD_STATE_REAL_EXECUTING:
950                 res = SCST_CMD_STATE_PRE_DEV_DONE;
951                 break;
952
953         default:
954                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
955                         cmd->state, cmd, cmd->cdb[0]);
956                 sBUG();
957                 /* Invalid state to supress compiler's warning */
958                 res = SCST_CMD_STATE_LAST_ACTIVE;
959         }
960
961         TRACE_EXIT_RES(res);
962         return res;
963 }
964 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
965
966 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
967 {
968         TRACE_ENTRY();
969
970 #ifdef CONFIG_SCST_EXTRACHECKS
971         switch (cmd->state) {
972         case SCST_CMD_STATE_XMIT_RESP:
973         case SCST_CMD_STATE_FINISHED:
974         case SCST_CMD_STATE_FINISHED_INTERNAL:
975         case SCST_CMD_STATE_XMIT_WAIT:
976                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
977                         cmd->state, cmd, cmd->cdb[0]);
978                 sBUG();
979         }
980 #endif
981
982         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
983
984 #ifdef CONFIG_SCST_EXTRACHECKS
985         if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
986                    (cmd->tgt_dev == NULL) && !cmd->internal) {
987                 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
988                         "op %x)", cmd->state, cmd, cmd->cdb[0]);
989                 sBUG();
990         }
991 #endif
992
993         TRACE_EXIT();
994         return;
995 }
996 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
997
998 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
999 {
1000         int i, l;
1001
1002         TRACE_ENTRY();
1003
1004         scst_check_restore_sg_buff(cmd);
1005         cmd->resp_data_len = resp_data_len;
1006
1007         if (resp_data_len == cmd->bufflen)
1008                 goto out;
1009
1010         l = 0;
1011         for (i = 0; i < cmd->sg_cnt; i++) {
1012                 l += cmd->sg[i].length;
1013                 if (l >= resp_data_len) {
1014                         int left = resp_data_len - (l - cmd->sg[i].length);
1015 #ifdef CONFIG_SCST_DEBUG
1016                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
1017                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
1018                                 "left %d",
1019                                 cmd, (long long unsigned int)cmd->tag,
1020                                 resp_data_len, i,
1021                                 cmd->sg[i].length, left);
1022 #endif
1023                         cmd->orig_sg_cnt = cmd->sg_cnt;
1024                         cmd->orig_sg_entry = i;
1025                         cmd->orig_entry_len = cmd->sg[i].length;
1026                         cmd->sg_cnt = (left > 0) ? i+1 : i;
1027                         cmd->sg[i].length = left;
1028                         cmd->sg_buff_modified = 1;
1029                         break;
1030                 }
1031         }
1032
1033 out:
1034         TRACE_EXIT();
1035         return;
1036 }
1037 EXPORT_SYMBOL(scst_set_resp_data_len);
1038
1039 /* No locks */
1040 int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
1041 {
1042         struct scst_tgt *tgt = cmd->tgt;
1043         int res = 0;
1044         unsigned long flags;
1045
1046         TRACE_ENTRY();
1047
1048         spin_lock_irqsave(&tgt->tgt_lock, flags);
1049         tgt->retry_cmds++;
1050         /*
1051          * Memory barrier is needed here, because we need the exact order
1052          * between the read and write between retry_cmds and finished_cmds to
1053          * not miss the case when a command finished while we queuing it for
1054          * retry after the finished_cmds check.
1055          */
1056         smp_mb();
1057         TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
1058               tgt->retry_cmds);
1059         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
1060                 /* At least one cmd finished, so try again */
1061                 tgt->retry_cmds--;
1062                 TRACE_RETRY("Some command(s) finished, direct retry "
1063                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
1064                       "retry_cmds=%d)", finished_cmds,
1065                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
1066                 res = -1;
1067                 goto out_unlock_tgt;
1068         }
1069
1070         TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
1071         list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
1072
1073         if (!tgt->retry_timer_active) {
1074                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
1075                 add_timer(&tgt->retry_timer);
1076                 tgt->retry_timer_active = 1;
1077         }
1078
1079 out_unlock_tgt:
1080         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1081
1082         TRACE_EXIT_RES(res);
1083         return res;
1084 }
1085
1086 /* Returns 0 to continue, >0 to restart, <0 to break */
1087 static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
1088         unsigned long cur_time, unsigned long max_time,
1089         struct scst_session *sess, unsigned long *flags,
1090         struct scst_tgt_template *tgtt)
1091 {
1092         int res = -1; /* break */
1093
1094         TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
1095                 "pending time %ld", cmd, cmd->cmd_hw_pending,
1096                 (long)(cur_time - cmd->start_time) / HZ,
1097                 (long)(cur_time - cmd->hw_pending_start) / HZ);
1098
1099         if (time_before_eq(cur_time, cmd->start_time + max_time)) {
1100                 /* Cmds are ordered, so no need to check more */
1101                 goto out;
1102         }
1103
1104         if (!cmd->cmd_hw_pending) {
1105                 res = 0; /* continue */
1106                 goto out;
1107         }
1108
1109         if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
1110                 /* Cmds are ordered, so no need to check more */
1111                 goto out;
1112         }
1113
1114         TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
1115                 cmd, (cur_time - cmd->hw_pending_start) / HZ,
1116                 cmd->state);
1117
1118         cmd->cmd_hw_pending = 0;
1119
1120         spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
1121         tgtt->on_hw_pending_cmd_timeout(cmd);
1122         spin_lock_irqsave(&sess->sess_list_lock, *flags);
1123
1124         res = 1; /* restart */
1125
1126 out:
1127         TRACE_EXIT_RES(res);
1128         return res;
1129 }
1130
1131 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1132 static void scst_hw_pending_work_fn(void *p)
1133 #else
1134 static void scst_hw_pending_work_fn(struct delayed_work *work)
1135 #endif
1136 {
1137 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1138         struct scst_session *sess = (struct scst_session *)p;
1139 #else
1140         struct scst_session *sess = container_of(work, struct scst_session,
1141                                         hw_pending_work);
1142 #endif
1143         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
1144         struct scst_cmd *cmd;
1145         unsigned long cur_time = jiffies;
1146         unsigned long flags;
1147         unsigned long max_time = tgtt->max_hw_pending_time * HZ;
1148
1149         TRACE_ENTRY();
1150
1151         TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
1152
1153         clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1154
1155         spin_lock_irqsave(&sess->sess_list_lock, flags);
1156
1157 restart:
1158         list_for_each_entry(cmd, &sess->search_cmd_list,
1159                                 sess_cmd_list_entry) {
1160                 int rc;
1161
1162                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1163                                         &flags, tgtt);
1164                 if (rc < 0)
1165                         break;
1166                 else if (rc == 0)
1167                         continue;
1168                 else
1169                         goto restart;
1170         }
1171
1172 restart1:
1173         list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
1174                                 sess_cmd_list_entry) {
1175                 int rc;
1176
1177                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1178                                         &flags, tgtt);
1179                 if (rc < 0)
1180                         break;
1181                 else if (rc == 0)
1182                         continue;
1183                 else
1184                         goto restart1;
1185         }
1186
1187         if (!list_empty(&sess->search_cmd_list) ||
1188             !list_empty(&sess->after_pre_xmit_cmd_list)) {
1189                 /*
1190                  * For stuck cmds if there is no activity we might need to have
1191                  * one more run to release them, so reschedule once again.
1192                  */
1193                 TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
1194                         sess, tgtt->max_hw_pending_time);
1195                 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1196                 schedule_delayed_work(&sess->hw_pending_work,
1197                                 tgtt->max_hw_pending_time * HZ);
1198         }
1199
1200         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
1201
1202         TRACE_EXIT();
1203         return;
1204 }
1205
1206 /* Called under scst_mutex and suspended activity */
1207 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
1208 {
1209         struct scst_device *dev;
1210         int res = 0;
1211         static int dev_num; /* protected by scst_mutex */
1212
1213         TRACE_ENTRY();
1214
1215         dev = kzalloc(sizeof(*dev), gfp_mask);
1216         if (dev == NULL) {
1217                 TRACE(TRACE_OUT_OF_MEM, "%s",
1218                         "Allocation of scst_device failed");
1219                 res = -ENOMEM;
1220                 goto out;
1221         }
1222
1223         dev->handler = &scst_null_devtype;
1224         dev->p_cmd_lists = &scst_main_cmd_lists;
1225         atomic_set(&dev->dev_cmd_count, 0);
1226         atomic_set(&dev->write_cmd_count, 0);
1227         scst_init_mem_lim(&dev->dev_mem_lim);
1228         spin_lock_init(&dev->dev_lock);
1229         atomic_set(&dev->on_dev_count, 0);
1230         INIT_LIST_HEAD(&dev->blocked_cmd_list);
1231         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
1232         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
1233         INIT_LIST_HEAD(&dev->threads_list);
1234         init_waitqueue_head(&dev->on_dev_waitQ);
1235         dev->dev_double_ua_possible = 1;
1236         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
1237         dev->dev_num = dev_num++;
1238
1239 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1240 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1241         dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1242         if (dev->dev_io_ctx == NULL) {
1243                 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
1244                 res = -ENOMEM;
1245                 kfree(dev);
1246                 goto out;
1247         }
1248 #endif
1249 #endif
1250
1251         *out_dev = dev;
1252
1253 out:
1254         TRACE_EXIT_RES(res);
1255         return res;
1256 }
1257
1258 /* Called under scst_mutex and suspended activity */
1259 void scst_free_device(struct scst_device *dev)
1260 {
1261         TRACE_ENTRY();
1262
1263 #ifdef CONFIG_SCST_EXTRACHECKS
1264         if (!list_empty(&dev->dev_tgt_dev_list) ||
1265             !list_empty(&dev->dev_acg_dev_list)) {
1266                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
1267                         "is not empty!", __func__);
1268                 sBUG();
1269         }
1270 #endif
1271
1272         __exit_io_context(dev->dev_io_ctx);
1273
1274         kfree(dev);
1275
1276         TRACE_EXIT();
1277         return;
1278 }
1279
1280 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
1281 {
1282         atomic_set(&mem_lim->alloced_pages, 0);
1283         mem_lim->max_allowed_pages =
1284                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
1285 }
1286 EXPORT_SYMBOL(scst_init_mem_lim);
1287
1288 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
1289                                         struct scst_device *dev, uint64_t lun)
1290 {
1291         struct scst_acg_dev *res;
1292
1293         TRACE_ENTRY();
1294
1295 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1296         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
1297 #else
1298         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
1299 #endif
1300         if (res == NULL) {
1301                 TRACE(TRACE_OUT_OF_MEM,
1302                       "%s", "Allocation of scst_acg_dev failed");
1303                 goto out;
1304         }
1305 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1306         memset(res, 0, sizeof(*res));
1307 #endif
1308
1309         res->dev = dev;
1310         res->acg = acg;
1311         res->lun = lun;
1312
1313 out:
1314         TRACE_EXIT_HRES(res);
1315         return res;
1316 }
1317
1318 /* The activity supposed to be suspended and scst_mutex held */
1319 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
1320 {
1321         TRACE_ENTRY();
1322
1323         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
1324                 acg_dev);
1325         list_del(&acg_dev->acg_dev_list_entry);
1326         list_del(&acg_dev->dev_acg_dev_list_entry);
1327
1328         kmem_cache_free(scst_acgd_cachep, acg_dev);
1329
1330         TRACE_EXIT();
1331         return;
1332 }
1333
1334 /* The activity supposed to be suspended and scst_mutex held */
1335 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
1336 {
1337         struct scst_acg *acg;
1338
1339         TRACE_ENTRY();
1340
1341         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
1342         if (acg == NULL) {
1343                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
1344                 goto out;
1345         }
1346
1347         INIT_LIST_HEAD(&acg->acg_dev_list);
1348         INIT_LIST_HEAD(&acg->acg_sess_list);
1349         INIT_LIST_HEAD(&acg->acn_list);
1350         acg->acg_name = acg_name;
1351
1352         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
1353         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
1354
1355         scst_check_reassign_sessions();
1356
1357 out:
1358         TRACE_EXIT_HRES(acg);
1359         return acg;
1360 }
1361
1362 /* The activity supposed to be suspended and scst_mutex held */
1363 int scst_destroy_acg(struct scst_acg *acg)
1364 {
1365         struct scst_acn *n, *nn;
1366         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
1367         int res = 0;
1368
1369         TRACE_ENTRY();
1370
1371         if (!list_empty(&acg->acg_sess_list)) {
1372                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
1373                 res = -EBUSY;
1374                 goto out;
1375         }
1376
1377         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
1378         list_del(&acg->scst_acg_list_entry);
1379
1380         /* Freeing acg_devs */
1381         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
1382                         acg_dev_list_entry) {
1383                 struct scst_tgt_dev *tgt_dev, *tt;
1384                 list_for_each_entry_safe(tgt_dev, tt,
1385                                  &acg_dev->dev->dev_tgt_dev_list,
1386                                  dev_tgt_dev_list_entry) {
1387                         if (tgt_dev->acg_dev == acg_dev)
1388                                 scst_free_tgt_dev(tgt_dev);
1389                 }
1390                 scst_free_acg_dev(acg_dev);
1391         }
1392
1393         /* Freeing names */
1394         list_for_each_entry_safe(n, nn, &acg->acn_list,
1395                         acn_list_entry) {
1396                 list_del(&n->acn_list_entry);
1397                 kfree(n->name);
1398                 kfree(n);
1399         }
1400         INIT_LIST_HEAD(&acg->acn_list);
1401
1402         kfree(acg);
1403 out:
1404         TRACE_EXIT_RES(res);
1405         return res;
1406 }
1407
1408 /*
1409  * scst_mutex supposed to be held, there must not be parallel activity in this
1410  * session.
1411  */
1412 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1413         struct scst_acg_dev *acg_dev)
1414 {
1415         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1416         struct scst_tgt_dev *tgt_dev, *t = NULL;
1417         struct scst_device *dev = acg_dev->dev;
1418         struct list_head *sess_tgt_dev_list_head;
1419         struct scst_tgt_template *vtt = sess->tgt->tgtt;
1420         int rc, i;
1421         bool share_io_ctx = false;
1422         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1423
1424         TRACE_ENTRY();
1425
1426 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1427         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1428 #else
1429         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1430 #endif
1431         if (tgt_dev == NULL) {
1432                 TRACE(TRACE_OUT_OF_MEM, "%s",
1433                       "Allocation of scst_tgt_dev failed");
1434                 goto out;
1435         }
1436 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1437         memset(tgt_dev, 0, sizeof(*tgt_dev));
1438 #endif
1439
1440         tgt_dev->dev = dev;
1441         tgt_dev->lun = acg_dev->lun;
1442         tgt_dev->acg_dev = acg_dev;
1443         tgt_dev->sess = sess;
1444         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1445
1446         scst_sgv_pool_use_norm(tgt_dev);
1447
1448         if (dev->scsi_dev != NULL) {
1449                 ini_sg = dev->scsi_dev->host->sg_tablesize;
1450                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1451                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1452                                 ENABLE_CLUSTERING);
1453         } else {
1454                 ini_sg = (1 << 15) /* infinite */;
1455                 ini_unchecked_isa_dma = 0;
1456                 ini_use_clustering = 0;
1457         }
1458         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1459
1460         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1461             !sess->tgt->tgtt->no_clustering)
1462                 scst_sgv_pool_use_norm_clust(tgt_dev);
1463
1464         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1465                 scst_sgv_pool_use_dma(tgt_dev);
1466
1467         if (dev->scsi_dev != NULL) {
1468                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1469                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
1470                       dev->scsi_dev->channel, dev->scsi_dev->id,
1471                       dev->scsi_dev->lun,
1472                       (long long unsigned int)tgt_dev->lun);
1473         } else {
1474                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1475                        dev->virt_name, (long long unsigned int)tgt_dev->lun);
1476         }
1477
1478         spin_lock_init(&tgt_dev->tgt_dev_lock);
1479         INIT_LIST_HEAD(&tgt_dev->UA_list);
1480         spin_lock_init(&tgt_dev->thr_data_lock);
1481         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1482         spin_lock_init(&tgt_dev->sn_lock);
1483         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1484         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1485         tgt_dev->expected_sn = 1;
1486         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1487         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1488         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1489                 atomic_set(&tgt_dev->sn_slots[i], 0);
1490
1491         if (dev->handler->parse_atomic &&
1492             (sess->tgt->tgtt->preprocessing_done == NULL)) {
1493                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1494                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1495                                 &tgt_dev->tgt_dev_flags);
1496                 if (dev->handler->exec_atomic)
1497                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1498                                 &tgt_dev->tgt_dev_flags);
1499         }
1500         if (dev->handler->exec_atomic) {
1501                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1502                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1503                                 &tgt_dev->tgt_dev_flags);
1504                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1505                                 &tgt_dev->tgt_dev_flags);
1506                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1507                         &tgt_dev->tgt_dev_flags);
1508         }
1509         if (dev->handler->dev_done_atomic &&
1510             sess->tgt->tgtt->xmit_response_atomic) {
1511                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1512                         &tgt_dev->tgt_dev_flags);
1513         }
1514
1515         scst_set_sense(sense_buffer, sizeof(sense_buffer),
1516                 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1517         scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1518
1519         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1520
1521         if (tgt_dev->sess->initiator_name != NULL) {
1522                 spin_lock_bh(&dev->dev_lock);
1523                 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1524                                 dev_tgt_dev_list_entry) {
1525                         TRACE_DBG("t name %s (tgt_dev name %s)",
1526                                 t->sess->initiator_name,
1527                                 tgt_dev->sess->initiator_name);
1528                         if (t->sess->initiator_name == NULL)
1529                                 continue;
1530                         if (strcmp(t->sess->initiator_name,
1531                                         tgt_dev->sess->initiator_name) == 0) {
1532                                 share_io_ctx = true;
1533                                 break;
1534                         }
1535                 }
1536                 spin_unlock_bh(&dev->dev_lock);
1537         }
1538
1539         if (share_io_ctx) {
1540                 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1541                         t->tgt_dev_io_ctx, tgt_dev,
1542                         tgt_dev->sess->initiator_name);
1543                 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1544         } else {
1545 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1546 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1547                 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1548                 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1549                         TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1550                                 "context for dev %s (initiator %s)",
1551                                 dev->virt_name, sess->initiator_name);
1552                         goto out_free;
1553                 }
1554 #endif
1555 #endif
1556         }
1557
1558         if (vtt->threads_num > 0) {
1559                 rc = 0;
1560                 if (dev->handler->threads_num > 0)
1561                         rc = scst_add_dev_threads(dev, vtt->threads_num);
1562                 else if (dev->handler->threads_num == 0)
1563                         rc = scst_add_global_threads(vtt->threads_num);
1564                 if (rc != 0)
1565                         goto out_free;
1566         }
1567
1568         if (dev->handler && dev->handler->attach_tgt) {
1569                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1570                       tgt_dev);
1571                 rc = dev->handler->attach_tgt(tgt_dev);
1572                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1573                 if (rc != 0) {
1574                         PRINT_ERROR("Device handler's %s attach_tgt() "
1575                             "failed: %d", dev->handler->name, rc);
1576                         goto out_thr_free;
1577                 }
1578         }
1579
1580         spin_lock_bh(&dev->dev_lock);
1581         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1582         if (dev->dev_reserved)
1583                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1584         spin_unlock_bh(&dev->dev_lock);
1585
1586         sess_tgt_dev_list_head =
1587                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1588         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1589                       sess_tgt_dev_list_head);
1590
1591 out:
1592         TRACE_EXIT();
1593         return tgt_dev;
1594
1595 out_thr_free:
1596         if (vtt->threads_num > 0) {
1597                 if (dev->handler->threads_num > 0)
1598                         scst_del_dev_threads(dev, vtt->threads_num);
1599                 else if (dev->handler->threads_num == 0)
1600                         scst_del_global_threads(vtt->threads_num);
1601         }
1602
1603 out_free:
1604         scst_free_all_UA(tgt_dev);
1605         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1606
1607         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1608         tgt_dev = NULL;
1609         goto out;
1610 }
1611
1612 /* No locks supposed to be held, scst_mutex - held */
1613 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1614 {
1615         TRACE_ENTRY();
1616
1617         scst_clear_reservation(tgt_dev);
1618
1619         /* With activity suspended the lock isn't needed, but let's be safe */
1620         spin_lock_bh(&tgt_dev->tgt_dev_lock);
1621         scst_free_all_UA(tgt_dev);
1622         memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1623         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1624
1625         if (queue_UA) {
1626                 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1627                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1628                         tgt_dev->dev->d_sense,
1629                         SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1630                 scst_check_set_UA(tgt_dev, sense_buffer,
1631                         sizeof(sense_buffer), 0);
1632         }
1633
1634         TRACE_EXIT();
1635         return;
1636 }
1637
1638 /*
1639  * scst_mutex supposed to be held, there must not be parallel activity in this
1640  * session.
1641  */
1642 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1643 {
1644         struct scst_device *dev = tgt_dev->dev;
1645         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1646
1647         TRACE_ENTRY();
1648
1649         tm_dbg_deinit_tgt_dev(tgt_dev);
1650
1651         spin_lock_bh(&dev->dev_lock);
1652         list_del(&tgt_dev->dev_tgt_dev_list_entry);
1653         spin_unlock_bh(&dev->dev_lock);
1654
1655         list_del(&tgt_dev->sess_tgt_dev_list_entry);
1656
1657         scst_clear_reservation(tgt_dev);
1658         scst_free_all_UA(tgt_dev);
1659
1660         if (dev->handler && dev->handler->detach_tgt) {
1661                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1662                       tgt_dev);
1663                 dev->handler->detach_tgt(tgt_dev);
1664                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1665         }
1666
1667         if (vtt->threads_num > 0) {
1668                 if (dev->handler->threads_num > 0)
1669                         scst_del_dev_threads(dev, vtt->threads_num);
1670                 else if (dev->handler->threads_num == 0)
1671                         scst_del_global_threads(vtt->threads_num);
1672         }
1673
1674         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1675
1676         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1677
1678         TRACE_EXIT();
1679         return;
1680 }
1681
1682 /* scst_mutex supposed to be held */
1683 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1684 {
1685         int res = 0;
1686         struct scst_acg_dev *acg_dev;
1687         struct scst_tgt_dev *tgt_dev;
1688
1689         TRACE_ENTRY();
1690
1691         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1692                         acg_dev_list_entry) {
1693                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1694                 if (tgt_dev == NULL) {
1695                         res = -ENOMEM;
1696                         goto out_free;
1697                 }
1698         }
1699
1700 out:
1701         TRACE_EXIT();
1702         return res;
1703
1704 out_free:
1705         scst_sess_free_tgt_devs(sess);
1706         goto out;
1707 }
1708
1709 /*
1710  * scst_mutex supposed to be held, there must not be parallel activity in this
1711  * session.
1712  */
1713 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1714 {
1715         int i;
1716         struct scst_tgt_dev *tgt_dev, *t;
1717
1718         TRACE_ENTRY();
1719
1720         /* The session is going down, no users, so no locks */
1721         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1722                 struct list_head *sess_tgt_dev_list_head =
1723                         &sess->sess_tgt_dev_list_hash[i];
1724                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1725                                 sess_tgt_dev_list_entry) {
1726                         scst_free_tgt_dev(tgt_dev);
1727                 }
1728                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1729         }
1730
1731         TRACE_EXIT();
1732         return;
1733 }
1734
1735 /* The activity supposed to be suspended and scst_mutex held */
1736 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1737         uint64_t lun, int read_only, bool gen_scst_report_luns_changed)
1738 {
1739         int res = 0;
1740         struct scst_acg_dev *acg_dev;
1741         struct scst_tgt_dev *tgt_dev;
1742         struct scst_session *sess;
1743         LIST_HEAD(tmp_tgt_dev_list);
1744
1745         TRACE_ENTRY();
1746
1747         INIT_LIST_HEAD(&tmp_tgt_dev_list);
1748
1749         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1750         if (acg_dev == NULL) {
1751                 res = -ENOMEM;
1752                 goto out;
1753         }
1754         acg_dev->rd_only = read_only;
1755
1756         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1757                 acg_dev);
1758         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1759         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1760
1761         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1762                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1763                 if (tgt_dev == NULL) {
1764                         res = -ENOMEM;
1765                         goto out_free;
1766                 }
1767                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1768                               &tmp_tgt_dev_list);
1769         }
1770
1771         if (gen_scst_report_luns_changed)
1772                 scst_report_luns_changed(acg);
1773
1774         if (dev->virt_name != NULL) {
1775                 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1776                         "rd_only %d)", dev->virt_name, acg->acg_name,
1777                         (long long unsigned int)lun,
1778                         read_only);
1779         } else {
1780                 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1781                         "%lld, rd_only %d)",
1782                         dev->scsi_dev->host->host_no,
1783                         dev->scsi_dev->channel, dev->scsi_dev->id,
1784                         dev->scsi_dev->lun, acg->acg_name,
1785                         (long long unsigned int)lun,
1786                         read_only);
1787         }
1788
1789 out:
1790         TRACE_EXIT_RES(res);
1791         return res;
1792
1793 out_free:
1794         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1795                          extra_tgt_dev_list_entry) {
1796                 scst_free_tgt_dev(tgt_dev);
1797         }
1798         scst_free_acg_dev(acg_dev);
1799         goto out;
1800 }
1801
1802 /* The activity supposed to be suspended and scst_mutex held */
1803 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev,
1804         bool gen_scst_report_luns_changed)
1805 {
1806         int res = 0;
1807         struct scst_acg_dev *acg_dev = NULL, *a;
1808         struct scst_tgt_dev *tgt_dev, *tt;
1809
1810         TRACE_ENTRY();
1811
1812         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1813                 if (a->dev == dev) {
1814                         acg_dev = a;
1815                         break;
1816                 }
1817         }
1818
1819         if (acg_dev == NULL) {
1820                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1821                 res = -EINVAL;
1822                 goto out;
1823         }
1824
1825         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1826                          dev_tgt_dev_list_entry) {
1827                 if (tgt_dev->acg_dev == acg_dev)
1828                         scst_free_tgt_dev(tgt_dev);
1829         }
1830         scst_free_acg_dev(acg_dev);
1831
1832         if (gen_scst_report_luns_changed)
1833                 scst_report_luns_changed(acg);
1834
1835         if (dev->virt_name != NULL) {
1836                 PRINT_INFO("Removed device %s from group %s",
1837                         dev->virt_name, acg->acg_name);
1838         } else {
1839                 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1840                         dev->scsi_dev->host->host_no,
1841                         dev->scsi_dev->channel, dev->scsi_dev->id,
1842                         dev->scsi_dev->lun, acg->acg_name);
1843         }
1844
1845 out:
1846         TRACE_EXIT_RES(res);
1847         return res;
1848 }
1849
1850 /* The activity supposed to be suspended and scst_mutex held */
1851 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1852 {
1853         int res = 0;
1854         struct scst_acn *n;
1855         int len;
1856         char *nm;
1857
1858         TRACE_ENTRY();
1859
1860         list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1861                 if (strcmp(n->name, name) == 0) {
1862                         PRINT_ERROR("Name %s already exists in group %s",
1863                                 name, acg->acg_name);
1864                         res = -EINVAL;
1865                         goto out;
1866                 }
1867         }
1868
1869         n = kmalloc(sizeof(*n), GFP_KERNEL);
1870         if (n == NULL) {
1871                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1872                 res = -ENOMEM;
1873                 goto out;
1874         }
1875
1876         len = strlen(name);
1877         nm = kmalloc(len + 1, GFP_KERNEL);
1878         if (nm == NULL) {
1879                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1880                 res = -ENOMEM;
1881                 goto out_free;
1882         }
1883
1884         strcpy(nm, name);
1885         n->name = nm;
1886
1887         list_add_tail(&n->acn_list_entry, &acg->acn_list);
1888
1889 out:
1890         if (res == 0) {
1891                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1892                 scst_check_reassign_sessions();
1893         }
1894
1895         TRACE_EXIT_RES(res);
1896         return res;
1897
1898 out_free:
1899         kfree(n);
1900         goto out;
1901 }
1902
1903 /* scst_mutex supposed to be held */
1904 void __scst_acg_remove_acn(struct scst_acn *n)
1905 {
1906         TRACE_ENTRY();
1907
1908         list_del(&n->acn_list_entry);
1909         kfree(n->name);
1910         kfree(n);
1911
1912         TRACE_EXIT();
1913         return;
1914 }
1915
1916 /* The activity supposed to be suspended and scst_mutex held */
1917 int scst_acg_remove_name(struct scst_acg *acg, const char *name, bool reassign)
1918 {
1919         int res = -EINVAL;
1920         struct scst_acn *n;
1921
1922         TRACE_ENTRY();
1923
1924         list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1925                 if (strcmp(n->name, name) == 0) {
1926                         __scst_acg_remove_acn(n);
1927                         res = 0;
1928                         break;
1929                 }
1930         }
1931
1932         if (res == 0) {
1933                 PRINT_INFO("Removed name %s from group %s", name,
1934                         acg->acg_name);
1935                 if (reassign)
1936                         scst_check_reassign_sessions();
1937         } else
1938                 PRINT_ERROR("Unable to find name %s in group %s", name,
1939                         acg->acg_name);
1940
1941         TRACE_EXIT_RES(res);
1942         return res;
1943 }
1944
1945 static struct scst_cmd *scst_create_prepare_internal_cmd(
1946         struct scst_cmd *orig_cmd, int bufsize)
1947 {
1948         struct scst_cmd *res;
1949         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1950
1951         TRACE_ENTRY();
1952
1953         res = scst_alloc_cmd(gfp_mask);
1954         if (res == NULL)
1955                 goto out;
1956
1957         res->cmd_lists = orig_cmd->cmd_lists;
1958         res->sess = orig_cmd->sess;
1959         res->atomic = scst_cmd_atomic(orig_cmd);
1960         res->internal = 1;
1961         res->tgtt = orig_cmd->tgtt;
1962         res->tgt = orig_cmd->tgt;
1963         res->dev = orig_cmd->dev;
1964         res->tgt_dev = orig_cmd->tgt_dev;
1965         res->lun = orig_cmd->lun;
1966         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1967         res->data_direction = SCST_DATA_UNKNOWN;
1968         res->orig_cmd = orig_cmd;
1969         res->bufflen = bufsize;
1970
1971         scst_sess_get(res->sess);
1972         if (res->tgt_dev != NULL)
1973                 __scst_get(0);
1974
1975         res->state = SCST_CMD_STATE_PRE_PARSE;
1976
1977 out:
1978         TRACE_EXIT_HRES((unsigned long)res);
1979         return res;
1980 }
1981
1982 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1983 {
1984         int res = 0;
1985         static const uint8_t request_sense[6] =
1986             { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1987         struct scst_cmd *rs_cmd;
1988
1989         TRACE_ENTRY();
1990
1991         if (orig_cmd->sense != NULL) {
1992                 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1993                         orig_cmd->sense, orig_cmd);
1994                 mempool_free(orig_cmd->sense, scst_sense_mempool);
1995                 orig_cmd->sense = NULL;
1996         }
1997
1998         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
1999                         SCST_SENSE_BUFFERSIZE);
2000         if (rs_cmd == NULL)
2001                 goto out_error;
2002
2003         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
2004         rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
2005         rs_cmd->cdb_len = sizeof(request_sense);
2006         rs_cmd->data_direction = SCST_DATA_READ;
2007         rs_cmd->expected_data_direction = rs_cmd->data_direction;
2008         rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
2009         rs_cmd->expected_values_set = 1;
2010
2011         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
2012                 "cmd list", rs_cmd);
2013         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2014         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
2015         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
2016         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2017
2018 out:
2019         TRACE_EXIT_RES(res);
2020         return res;
2021
2022 out_error:
2023         res = -1;
2024         goto out;
2025 }
2026
2027 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
2028 {
2029         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
2030         uint8_t *buf;
2031         int len;
2032
2033         TRACE_ENTRY();
2034
2035         sBUG_ON(orig_cmd == NULL);
2036
2037         len = scst_get_buf_first(req_cmd, &buf);
2038
2039         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
2040             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
2041                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
2042                         buf, len);
2043                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
2044                         len);
2045         } else {
2046                 PRINT_ERROR("%s", "Unable to get the sense via "
2047                         "REQUEST SENSE, returning HARDWARE ERROR");
2048                 scst_set_cmd_error(orig_cmd,
2049                         SCST_LOAD_SENSE(scst_sense_hardw_error));
2050         }
2051
2052         if (len > 0)
2053                 scst_put_buf(req_cmd, buf);
2054
2055         TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
2056                 "cmd list", orig_cmd);
2057         spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2058         list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
2059         wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
2060         spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2061
2062         TRACE_EXIT();
2063         return;
2064 }
2065
2066 int scst_finish_internal_cmd(struct scst_cmd *cmd)
2067 {
2068         int res;
2069
2070         TRACE_ENTRY();
2071
2072         sBUG_ON(!cmd->internal);
2073
2074         if (cmd->cdb[0] == REQUEST_SENSE)
2075                 scst_complete_request_sense(cmd);
2076
2077         __scst_cmd_put(cmd);
2078
2079         res = SCST_CMD_STATE_RES_CONT_NEXT;
2080
2081         TRACE_EXIT_HRES(res);
2082         return res;
2083 }
2084
2085 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2086 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
2087 {
2088         struct scsi_request *req;
2089
2090         TRACE_ENTRY();
2091
2092         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
2093                 if (req) {
2094                         if (req->sr_bufflen)
2095                                 kfree(req->sr_buffer);
2096                         scsi_release_request(req);
2097                 }
2098         }
2099
2100         TRACE_EXIT();
2101         return;
2102 }
2103
2104 static void scst_send_release(struct scst_device *dev)
2105 {
2106         struct scsi_request *req;
2107         struct scsi_device *scsi_dev;
2108         uint8_t cdb[6];
2109
2110         TRACE_ENTRY();
2111
2112         if (dev->scsi_dev == NULL)
2113                 goto out;
2114
2115         scsi_dev = dev->scsi_dev;
2116
2117         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
2118         if (req == NULL) {
2119                 PRINT_ERROR("Allocation of scsi_request failed: unable "
2120                             "to RELEASE device %d:%d:%d:%d",
2121                             scsi_dev->host->host_no, scsi_dev->channel,
2122                             scsi_dev->id, scsi_dev->lun);
2123                 goto out;
2124         }
2125
2126         memset(cdb, 0, sizeof(cdb));
2127         cdb[0] = RELEASE;
2128         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2129             ((scsi_dev->lun << 5) & 0xe0) : 0;
2130         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
2131         req->sr_cmd_len = sizeof(cdb);
2132         req->sr_data_direction = SCST_DATA_NONE;
2133         req->sr_use_sg = 0;
2134         req->sr_bufflen = 0;
2135         req->sr_buffer = NULL;
2136         req->sr_request->rq_disk = dev->rq_disk;
2137         req->sr_sense_buffer[0] = 0;
2138
2139         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
2140                 "mid-level", req);
2141         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
2142                     scst_req_done, 15, 3);
2143
2144 out:
2145         TRACE_EXIT();
2146         return;
2147 }
2148 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2149 static void scst_send_release(struct scst_device *dev)
2150 {
2151         struct scsi_device *scsi_dev;
2152         unsigned char cdb[6];
2153         uint8_t sense[SCSI_SENSE_BUFFERSIZE];
2154         int rc, i;
2155
2156         TRACE_ENTRY();
2157
2158         if (dev->scsi_dev == NULL)
2159                 goto out;
2160
2161         scsi_dev = dev->scsi_dev;
2162
2163         for (i = 0; i < 5; i++) {
2164                 memset(cdb, 0, sizeof(cdb));
2165                 cdb[0] = RELEASE;
2166                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2167                     ((scsi_dev->lun << 5) & 0xe0) : 0;
2168
2169                 memset(sense, 0, sizeof(sense));
2170
2171                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
2172                         "SCSI mid-level");
2173                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
2174                                 sense, 15, 0, 0
2175 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
2176                                 , NULL
2177 #endif
2178                                 );
2179                 TRACE_DBG("MODE_SENSE done: %x", rc);
2180
2181                 if (scsi_status_is_good(rc)) {
2182                         break;
2183                 } else {
2184                         PRINT_ERROR("RELEASE failed: %d", rc);
2185                         PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
2186                         scst_check_internal_sense(dev, rc, sense,
2187                                 sizeof(sense));
2188                 }
2189         }
2190
2191 out:
2192         TRACE_EXIT();
2193         return;
2194 }
2195 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2196
2197 /* scst_mutex supposed to be held */
2198 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
2199 {
2200         struct scst_device *dev = tgt_dev->dev;
2201         int release = 0;
2202
2203         TRACE_ENTRY();
2204
2205         spin_lock_bh(&dev->dev_lock);
2206         if (dev->dev_reserved &&
2207             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
2208                 /* This is one who holds the reservation */
2209                 struct scst_tgt_dev *tgt_dev_tmp;
2210                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
2211                                     dev_tgt_dev_list_entry) {
2212                         clear_bit(SCST_TGT_DEV_RESERVED,
2213                                     &tgt_dev_tmp->tgt_dev_flags);
2214                 }
2215                 dev->dev_reserved = 0;
2216                 release = 1;
2217         }
2218         spin_unlock_bh(&dev->dev_lock);
2219
2220         if (release)
2221                 scst_send_release(dev);
2222
2223         TRACE_EXIT();
2224         return;
2225 }
2226
2227 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
2228         const char *initiator_name)
2229 {
2230         struct scst_session *sess;
2231         int i;
2232         int len;
2233         char *nm;
2234
2235         TRACE_ENTRY();
2236
2237 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2238         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
2239 #else
2240         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
2241 #endif
2242         if (sess == NULL) {
2243                 TRACE(TRACE_OUT_OF_MEM, "%s",
2244                       "Allocation of scst_session failed");
2245                 goto out;
2246         }
2247 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2248         memset(sess, 0, sizeof(*sess));
2249 #endif
2250
2251         sess->init_phase = SCST_SESS_IPH_INITING;
2252         sess->shut_phase = SCST_SESS_SPH_READY;
2253         atomic_set(&sess->refcnt, 0);
2254         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
2255                 struct list_head *sess_tgt_dev_list_head =
2256                          &sess->sess_tgt_dev_list_hash[i];
2257                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
2258         }
2259         spin_lock_init(&sess->sess_list_lock);
2260         INIT_LIST_HEAD(&sess->search_cmd_list);
2261         INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
2262         sess->tgt = tgt;
2263         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
2264         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
2265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
2266         INIT_DELAYED_WORK(&sess->hw_pending_work,
2267                 (void (*)(struct work_struct *))scst_hw_pending_work_fn);
2268 #else
2269         INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
2270 #endif
2271
2272 #ifdef CONFIG_SCST_MEASURE_LATENCY
2273         spin_lock_init(&sess->meas_lock);
2274 #endif
2275
2276         len = strlen(initiator_name);
2277         nm = kmalloc(len + 1, gfp_mask);
2278         if (nm == NULL) {
2279                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
2280                 goto out_free;
2281         }
2282
2283         strcpy(nm, initiator_name);
2284         sess->initiator_name = nm;
2285
2286 out:
2287         TRACE_EXIT();
2288         return sess;
2289
2290 out_free:
2291         kmem_cache_free(scst_sess_cachep, sess);
2292         sess = NULL;
2293         goto out;
2294 }
2295
2296 void scst_free_session(struct scst_session *sess)
2297 {
2298         TRACE_ENTRY();
2299
2300         mutex_lock(&scst_mutex);
2301
2302         TRACE_DBG("Removing sess %p from the list", sess);
2303         list_del(&sess->sess_list_entry);
2304         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
2305         list_del(&sess->acg_sess_list_entry);
2306
2307         scst_sess_free_tgt_devs(sess);
2308
2309         wake_up_all(&sess->tgt->unreg_waitQ);
2310
2311         mutex_unlock(&scst_mutex);
2312
2313         kfree(sess->initiator_name);
2314         kmem_cache_free(scst_sess_cachep, sess);
2315
2316         TRACE_EXIT();
2317         return;
2318 }
2319
2320 void scst_free_session_callback(struct scst_session *sess)
2321 {
2322         struct completion *c;
2323
2324         TRACE_ENTRY();
2325
2326         TRACE_DBG("Freeing session %p", sess);
2327
2328         cancel_delayed_work_sync(&sess->hw_pending_work);
2329
2330         c = sess->shutdown_compl;
2331
2332         if (sess->unreg_done_fn) {
2333                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
2334                 sess->unreg_done_fn(sess);
2335                 TRACE_DBG("%s", "unreg_done_fn() returned");
2336         }
2337         scst_free_session(sess);
2338
2339         if (c)
2340                 complete_all(c);
2341
2342         TRACE_EXIT();
2343         return;
2344 }
2345
2346 void scst_sched_session_free(struct scst_session *sess)
2347 {
2348         unsigned long flags;
2349
2350         TRACE_ENTRY();
2351
2352         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
2353                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
2354                         "shut phase %lx", sess, sess->shut_phase);
2355                 sBUG();
2356         }
2357
2358         spin_lock_irqsave(&scst_mgmt_lock, flags);
2359         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
2360         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
2361         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
2362
2363         wake_up(&scst_mgmt_waitQ);
2364
2365         TRACE_EXIT();
2366         return;
2367 }
2368
2369 void scst_cmd_get(struct scst_cmd *cmd)
2370 {
2371         __scst_cmd_get(cmd);
2372 }
2373 EXPORT_SYMBOL(scst_cmd_get);
2374
2375 void scst_cmd_put(struct scst_cmd *cmd)
2376 {
2377         __scst_cmd_put(cmd);
2378 }
2379 EXPORT_SYMBOL(scst_cmd_put);
2380
2381 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
2382 {
2383         struct scst_cmd *cmd;
2384
2385         TRACE_ENTRY();
2386
2387 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2388         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
2389 #else
2390         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
2391 #endif
2392         if (cmd == NULL) {
2393                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
2394                 goto out;
2395         }
2396 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2397         memset(cmd, 0, sizeof(*cmd));
2398 #endif
2399
2400         cmd->state = SCST_CMD_STATE_INIT_WAIT;
2401         cmd->start_time = jiffies;
2402         atomic_set(&cmd->cmd_ref, 1);
2403         cmd->cmd_lists = &scst_main_cmd_lists;
2404         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
2405         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2406         cmd->timeout = SCST_DEFAULT_TIMEOUT;
2407         cmd->retries = 0;
2408         cmd->data_len = -1;
2409         cmd->is_send_status = 1;
2410         cmd->resp_data_len = -1;
2411
2412         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2413         cmd->dbl_ua_orig_resp_data_len = -1;
2414
2415 out:
2416         TRACE_EXIT();
2417         return cmd;
2418 }
2419
2420 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2421 {
2422         scst_sess_put(cmd->sess);
2423
2424         /*
2425          * At this point tgt_dev can be dead, but the pointer remains non-NULL
2426          */
2427         if (likely(cmd->tgt_dev != NULL))
2428                 __scst_put();
2429
2430         scst_destroy_cmd(cmd);
2431         return;
2432 }
2433
2434 /* No locks supposed to be held */
2435 void scst_free_cmd(struct scst_cmd *cmd)
2436 {
2437         int destroy = 1;
2438
2439         TRACE_ENTRY();
2440
2441         TRACE_DBG("Freeing cmd %p (tag %llu)",
2442                   cmd, (long long unsigned int)cmd->tag);
2443
2444         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2445                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2446                         cmd, atomic_read(&scst_cmd_count));
2447         }
2448
2449         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2450                 cmd->dec_on_dev_needed);
2451
2452 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2453 #if defined(CONFIG_SCST_EXTRACHECKS)
2454         if (cmd->scsi_req) {
2455                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2456                         "scsi_req!");
2457                 scst_release_request(cmd);
2458         }
2459 #endif
2460 #endif
2461
2462         /*
2463          * Target driver can already free sg buffer before calling
2464          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2465          */
2466         if (!cmd->tgt_data_buf_alloced)
2467                 scst_check_restore_sg_buff(cmd);
2468
2469         if (cmd->tgtt->on_free_cmd != NULL) {
2470                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2471                 cmd->tgtt->on_free_cmd(cmd);
2472                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2473         }
2474
2475         if (likely(cmd->dev != NULL)) {
2476                 struct scst_dev_type *handler = cmd->dev->handler;
2477                 if (handler->on_free_cmd != NULL) {
2478                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2479                               handler->name, cmd);
2480                         handler->on_free_cmd(cmd);
2481                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
2482                                 handler->name);
2483                 }
2484         }
2485
2486         scst_release_space(cmd);
2487
2488         if (unlikely(cmd->sense != NULL)) {
2489                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2490                 mempool_free(cmd->sense, scst_sense_mempool);
2491                 cmd->sense = NULL;
2492         }
2493
2494         if (likely(cmd->tgt_dev != NULL)) {
2495 #ifdef CONFIG_SCST_EXTRACHECKS
2496                 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2497                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
2498                             "%d, target %s, LUN %lld, sn %ld, expected_sn %ld)",
2499                             cmd, cmd->cdb[0], cmd->tgtt->name,
2500                             (long long unsigned int)cmd->lun,
2501                             cmd->sn, cmd->tgt_dev->expected_sn);
2502                         scst_unblock_deferred(cmd->tgt_dev, cmd);
2503                 }
2504 #endif
2505
2506                 if (unlikely(cmd->out_of_sn)) {
2507                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2508                                 "destroy=%d", cmd,
2509                                 (long long unsigned int)cmd->tag,
2510                                 cmd->sn, destroy);
2511                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2512                                         &cmd->cmd_flags);
2513                 }
2514         }
2515
2516         if (likely(destroy))
2517                 scst_destroy_put_cmd(cmd);
2518
2519         TRACE_EXIT();
2520         return;
2521 }
2522
2523 /* No locks supposed to be held. */
2524 void scst_check_retries(struct scst_tgt *tgt)
2525 {
2526         int need_wake_up = 0;
2527
2528         TRACE_ENTRY();
2529
2530         /*
2531          * We don't worry about overflow of finished_cmds, because we check
2532          * only for its change.
2533          */
2534         atomic_inc(&tgt->finished_cmds);
2535         /* See comment in scst_queue_retry_cmd() */
2536         smp_mb__after_atomic_inc();
2537         if (unlikely(tgt->retry_cmds > 0)) {
2538                 struct scst_cmd *c, *tc;
2539                 unsigned long flags;
2540
2541                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2542                       tgt->retry_cmds);
2543
2544                 spin_lock_irqsave(&tgt->tgt_lock, flags);
2545                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2546                                 cmd_list_entry) {
2547                         tgt->retry_cmds--;
2548
2549                         TRACE_RETRY("Moving retry cmd %p to head of active "
2550                                 "cmd list (retry_cmds left %d)",
2551                                 c, tgt->retry_cmds);
2552                         spin_lock(&c->cmd_lists->cmd_list_lock);
2553                         list_move(&c->cmd_list_entry,
2554                                   &c->cmd_lists->active_cmd_list);
2555                         wake_up(&c->cmd_lists->cmd_list_waitQ);
2556                         spin_unlock(&c->cmd_lists->cmd_list_lock);
2557
2558                         need_wake_up++;
2559                         if (need_wake_up >= 2) /* "slow start" */
2560                                 break;
2561                 }
2562                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2563         }
2564
2565         TRACE_EXIT();
2566         return;
2567 }
2568
2569 void scst_tgt_retry_timer_fn(unsigned long arg)
2570 {
2571         struct scst_tgt *tgt = (struct scst_tgt *)arg;
2572         unsigned long flags;
2573
2574         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2575
2576         spin_lock_irqsave(&tgt->tgt_lock, flags);
2577         tgt->retry_timer_active = 0;
2578         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2579
2580         scst_check_retries(tgt);
2581
2582         TRACE_EXIT();
2583         return;
2584 }
2585
2586 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2587 {
2588         struct scst_mgmt_cmd *mcmd;
2589
2590         TRACE_ENTRY();
2591
2592         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2593         if (mcmd == NULL) {
2594                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2595                         "failed, some commands and their data could leak");
2596                 goto out;
2597         }
2598         memset(mcmd, 0, sizeof(*mcmd));
2599
2600 out:
2601         TRACE_EXIT();
2602         return mcmd;
2603 }
2604
2605 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2606 {
2607         unsigned long flags;
2608
2609         TRACE_ENTRY();
2610
2611         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2612         atomic_dec(&mcmd->sess->sess_cmd_count);
2613         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2614
2615         scst_sess_put(mcmd->sess);
2616
2617         if (mcmd->mcmd_tgt_dev != NULL)
2618                 __scst_put();
2619
2620         mempool_free(mcmd, scst_mgmt_mempool);
2621
2622         TRACE_EXIT();
2623         return;
2624 }
2625
2626 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2627 int scst_alloc_request(struct scst_cmd *cmd)
2628 {
2629         int res = 0;
2630         struct scsi_request *req;
2631         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2632
2633         TRACE_ENTRY();
2634
2635         /* cmd->dev->scsi_dev must be non-NULL here */
2636         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2637         if (req == NULL) {
2638                 TRACE(TRACE_OUT_OF_MEM, "%s",
2639                       "Allocation of scsi_request failed");
2640                 res = -ENOMEM;
2641                 goto out;
2642         }
2643
2644         cmd->scsi_req = req;
2645
2646         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2647         req->sr_cmd_len = cmd->cdb_len;
2648         req->sr_data_direction = cmd->data_direction;
2649         req->sr_use_sg = cmd->sg_cnt;
2650         req->sr_bufflen = cmd->bufflen;
2651         req->sr_buffer = cmd->sg;
2652         req->sr_request->rq_disk = cmd->dev->rq_disk;
2653         req->sr_sense_buffer[0] = 0;
2654
2655         cmd->scsi_req->upper_private_data = cmd;
2656
2657 out:
2658         TRACE_EXIT();
2659         return res;
2660 }
2661
2662 void scst_release_request(struct scst_cmd *cmd)
2663 {
2664         scsi_release_request(cmd->scsi_req);
2665         cmd->scsi_req = NULL;
2666 }
2667 #endif
2668
2669 static bool is_report_sg_limitation(void)
2670 {
2671 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2672         return (trace_flag & TRACE_OUT_OF_MEM) != 0;
2673 #else
2674         return false;
2675 #endif
2676 }
2677
2678 int scst_alloc_space(struct scst_cmd *cmd)
2679 {
2680         gfp_t gfp_mask;
2681         int res = -ENOMEM;
2682         int atomic = scst_cmd_atomic(cmd);
2683         int flags;
2684         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2685         static int ll;
2686
2687         TRACE_ENTRY();
2688
2689         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2690
2691         flags = atomic ? SGV_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2692         if (cmd->no_sgv)
2693                 flags |= SGV_POOL_ALLOC_NO_CACHED;
2694
2695         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2696                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2697         if (cmd->sg == NULL)
2698                 goto out;
2699
2700         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2701                 if ((ll < 10) || is_report_sg_limitation()) {
2702                         PRINT_INFO("Unable to complete command due to "
2703                                 "SG IO count limitation (requested %d, "
2704                                 "available %d, tgt lim %d)", cmd->sg_cnt,
2705                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2706                         ll++;
2707                 }
2708                 goto out_sg_free;
2709         }
2710
2711         if (cmd->data_direction != SCST_DATA_BIDI)
2712                 goto success;
2713
2714         cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2715                          flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2716                          &cmd->dev->dev_mem_lim, NULL);
2717         if (cmd->in_sg == NULL)
2718                 goto out_sg_free;
2719
2720         if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2721                 if ((ll < 10)  || is_report_sg_limitation()) {
2722                         PRINT_INFO("Unable to complete command due to "
2723                                 "SG IO count limitation (IN buffer, requested "
2724                                 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2725                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2726                         ll++;
2727                 }
2728                 goto out_in_sg_free;
2729         }
2730
2731 success:
2732         res = 0;
2733
2734 out:
2735         TRACE_EXIT();
2736         return res;
2737
2738 out_in_sg_free:
2739         sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2740         cmd->in_sgv = NULL;
2741         cmd->in_sg = NULL;
2742         cmd->in_sg_cnt = 0;
2743
2744 out_sg_free:
2745         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2746         cmd->sgv = NULL;
2747         cmd->sg = NULL;
2748         cmd->sg_cnt = 0;
2749         goto out;
2750 }
2751
2752 static void scst_release_space(struct scst_cmd *cmd)
2753 {
2754         TRACE_ENTRY();
2755
2756         if (cmd->sgv == NULL)
2757                 goto out;
2758
2759         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2760                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2761                 goto out;
2762         }
2763
2764         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2765         cmd->sgv = NULL;
2766         cmd->sg_cnt = 0;
2767         cmd->sg = NULL;
2768         cmd->bufflen = 0;
2769         cmd->data_len = 0;
2770
2771         if (cmd->in_sgv != NULL) {
2772                 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2773                 cmd->in_sgv = NULL;
2774                 cmd->in_sg_cnt = 0;
2775                 cmd->in_sg = NULL;
2776                 cmd->in_bufflen = 0;
2777         }
2778
2779 out:
2780         TRACE_EXIT();
2781         return;
2782 }
2783
2784 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2785
2786 /*
2787  * Can switch to the next dst_sg element, so, to copy to strictly only
2788  * one dst_sg element, it must be either last in the chain, or
2789  * copy_len == dst_sg->length.
2790  */
2791 static int __sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
2792                           size_t *pdst_offs, struct scatterlist *src_sg,
2793                           size_t copy_len,
2794                           enum km_type d_km_type, enum km_type s_km_type)
2795 {
2796         int res = 0;
2797         struct scatterlist *dst_sg;
2798         size_t src_len, dst_len, src_offs, dst_offs;
2799         struct page *src_page, *dst_page;
2800
2801         if (copy_len == 0)
2802                 copy_len = 0x7FFFFFFF; /* copy all */
2803
2804         dst_sg = *pdst_sg;
2805         dst_len = *pdst_len;
2806         dst_offs = *pdst_offs;
2807         dst_page = sg_page(dst_sg);
2808
2809         src_page = sg_page(src_sg);
2810         src_len = src_sg->length;
2811         src_offs = src_sg->offset;
2812
2813         do {
2814                 void *saddr, *daddr;
2815                 size_t n;
2816
2817                 saddr = kmap_atomic(src_page +
2818                                          (src_offs >> PAGE_SHIFT), s_km_type) +
2819                                     (src_offs & ~PAGE_MASK);
2820                 daddr = kmap_atomic(dst_page +
2821                                         (dst_offs >> PAGE_SHIFT), d_km_type) +
2822                                     (dst_offs & ~PAGE_MASK);
2823
2824                 if (((src_offs & ~PAGE_MASK) == 0) &&
2825                     ((dst_offs & ~PAGE_MASK) == 0) &&
2826                     (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
2827                     (copy_len >= PAGE_SIZE)) {
2828                         copy_page(daddr, saddr);
2829                         n = PAGE_SIZE;
2830                 } else {
2831                         n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
2832                                           PAGE_SIZE - (src_offs & ~PAGE_MASK));
2833                         n = min(n, src_len);
2834                         n = min(n, dst_len);
2835                         n = min_t(size_t, n, copy_len);
2836                         memcpy(daddr, saddr, n);
2837                 }
2838                 dst_offs += n;
2839                 src_offs += n;
2840
2841                 kunmap_atomic(saddr, s_km_type);
2842                 kunmap_atomic(daddr, d_km_type);
2843
2844                 res += n;
2845                 copy_len -= n;
2846                 if (copy_len == 0)
2847                         goto out;
2848
2849                 src_len -= n;
2850                 dst_len -= n;
2851                 if (dst_len == 0) {
2852                         dst_sg = sg_next(dst_sg);
2853                         if (dst_sg == NULL)
2854                                 goto out;
2855                         dst_page = sg_page(dst_sg);
2856                         dst_len = dst_sg->length;
2857                         dst_offs = dst_sg->offset;
2858                 }
2859         } while (src_len > 0);
2860
2861 out:
2862         *pdst_sg = dst_sg;
2863         *pdst_len = dst_len;
2864         *pdst_offs = dst_offs;
2865         return res;
2866 }
2867
2868 /**
2869  * sg_copy_elem - copy one SG element to another
2870  * @dst_sg:     destination SG element
2871  * @src_sg:     source SG element
2872  * @copy_len:   maximum amount of data to copy. If 0, then copy all.
2873  * @d_km_type:  kmap_atomic type for the destination SG
2874  * @s_km_type:  kmap_atomic type for the source SG
2875  *
2876  * Description:
2877  *    Data from the source SG element will be copied to the destination SG
2878  *    element. Returns number of bytes copied. Can switch to the next dst_sg
2879  *    element, so, to copy to strictly only one dst_sg element, it must be
2880  *    either last in the chain, or copy_len == dst_sg->length.
2881  */
2882 int sg_copy_elem(struct scatterlist *dst_sg, struct scatterlist *src_sg,
2883                  size_t copy_len, enum km_type d_km_type,
2884                  enum km_type s_km_type)
2885 {
2886         size_t dst_len = dst_sg->length, dst_offs = dst_sg->offset;
2887
2888         return __sg_copy_elem(&dst_sg, &dst_len, &dst_offs, src_sg,
2889                 copy_len, d_km_type, s_km_type);
2890 }
2891
2892
2893 /**
2894  * sg_copy - copy one SG vector to another
2895  * @dst_sg:     destination SG
2896  * @src_sg:     source SG
2897  * @copy_len:   maximum amount of data to copy. If 0, then copy all.
2898  * @d_km_type:  kmap_atomic type for the destination SG
2899  * @s_km_type:  kmap_atomic type for the source SG
2900  *
2901  * Description:
2902  *    Data from the source SG vector will be copied to the destination SG
2903  *    vector. End of the vectors will be determined by sg_next() returning
2904  *    NULL. Returns number of bytes copied.
2905  */
2906 int sg_copy(struct scatterlist *dst_sg,
2907             struct scatterlist *src_sg, size_t copy_len,
2908             enum km_type d_km_type, enum km_type s_km_type)
2909 {
2910         int res = 0;
2911         size_t dst_len, dst_offs;
2912
2913         if (copy_len == 0)
2914                 copy_len = 0x7FFFFFFF; /* copy all */
2915
2916         dst_len = dst_sg->length;
2917         dst_offs = dst_sg->offset;
2918
2919         do {
2920                 copy_len -= __sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
2921                                 src_sg, copy_len, d_km_type, s_km_type);
2922                 if ((copy_len == 0) || (dst_sg == NULL))
2923                         goto out;
2924
2925                 src_sg = sg_next(src_sg);
2926         } while (src_sg != NULL);
2927
2928 out:
2929         return res;
2930 }
2931 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
2932
2933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2934 #include <linux/pfn.h>
2935
2936 struct blk_kern_sg_hdr {
2937         struct scatterlist *orig_sgp;
2938         union {
2939                 struct sg_table new_sg_table;
2940                 struct scatterlist *saved_sg;
2941         };
2942         bool tail_only;
2943 };
2944
2945 #define BLK_KERN_SG_HDR_ENTRIES (1 + (sizeof(struct blk_kern_sg_hdr) - 1) / \
2946                                  sizeof(struct scatterlist))
2947
2948 /**
2949  * blk_rq_unmap_kern_sg - "unmaps" data buffers in the request
2950  * @req:        request to unmap
2951  * @do_copy:    sets copy data between buffers, if needed, or not
2952  *
2953  * Description:
2954  *    It frees all additional buffers allocated for SG->BIO mapping.
2955  */
2956 void blk_rq_unmap_kern_sg(struct request *req, int do_copy)
2957 {
2958         struct blk_kern_sg_hdr *hdr = (struct blk_kern_sg_hdr *)req->end_io_data;
2959
2960         if (hdr == NULL)
2961                 goto out;
2962
2963         if (hdr->tail_only) {
2964                 /* Tail element only was copied */
2965                 struct scatterlist *saved_sg = hdr->saved_sg;
2966                 struct scatterlist *tail_sg = hdr->orig_sgp;
2967
2968                 if ((rq_data_dir(req) == READ) && do_copy)
2969                         sg_copy_elem(saved_sg, tail_sg, tail_sg->length,
2970                                 KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
2971
2972                 __free_pages(sg_page(tail_sg), get_order(tail_sg->length));
2973                 *tail_sg = *saved_sg;
2974                 kfree(hdr);
2975         } else {
2976                 /* The whole SG was copied */
2977                 struct sg_table new_sg_table = hdr->new_sg_table;
2978                 struct scatterlist *new_sgl = new_sg_table.sgl +
2979                                                 BLK_KERN_SG_HDR_ENTRIES;
2980                 struct scatterlist *orig_sgl = hdr->orig_sgp;
2981
2982                 if ((rq_data_dir(req) == READ) && do_copy)
2983                         sg_copy(orig_sgl, new_sgl, 0, KM_BIO_DST_IRQ,
2984                                 KM_BIO_SRC_IRQ);
2985
2986                 sg_free_table(&new_sg_table);
2987         }
2988
2989 out:
2990         return;
2991 }
2992
2993 static int blk_rq_handle_align_tail_only(struct request *rq,
2994                                          struct scatterlist *sg_to_copy,
2995                                          gfp_t gfp, gfp_t page_gfp)
2996 {
2997         int res = 0;
2998         struct scatterlist *tail_sg = sg_to_copy;
2999         struct scatterlist *saved_sg;
3000         struct blk_kern_sg_hdr *hdr;
3001         int saved_sg_nents;
3002         struct page *pg;
3003
3004         saved_sg_nents = 1 + BLK_KERN_SG_HDR_ENTRIES;
3005
3006         saved_sg = kmalloc(sizeof(*saved_sg) * saved_sg_nents, gfp);
3007         if (saved_sg == NULL)
3008                 goto out_nomem;
3009
3010         sg_init_table(saved_sg, saved_sg_nents);
3011
3012         hdr = (struct blk_kern_sg_hdr *)saved_sg;
3013         saved_sg += BLK_KERN_SG_HDR_ENTRIES;
3014         saved_sg_nents -= BLK_KERN_SG_HDR_ENTRIES;
3015
3016         hdr->tail_only = true;
3017         hdr->orig_sgp = tail_sg;
3018         hdr->saved_sg = saved_sg;
3019
3020         *saved_sg = *tail_sg;
3021
3022         pg = alloc_pages(page_gfp, get_order(tail_sg->length));
3023         if (pg == NULL)
3024                 goto err_free_saved_sg;
3025
3026         sg_assign_page(tail_sg, pg);
3027         tail_sg->offset = 0;
3028
3029         if (rq_data_dir(rq) == WRITE)
3030                 sg_copy_elem(tail_sg, saved_sg, saved_sg->length,
3031                                 KM_USER1, KM_USER0);
3032
3033         rq->end_io_data = hdr;
3034         rq->cmd_flags |= REQ_COPY_USER;
3035
3036 out:
3037         return res;
3038
3039 err_free_saved_sg:
3040         kfree(saved_sg);
3041
3042 out_nomem:
3043         res = -ENOMEM;
3044         goto out;
3045 }
3046
3047 static int blk_rq_handle_align(struct request *rq, struct scatterlist **psgl,
3048                                int *pnents, struct scatterlist *sgl_to_copy,
3049                                int nents_to_copy, gfp_t gfp, gfp_t page_gfp)
3050 {
3051         int res = 0, i;
3052         struct scatterlist *sgl = *psgl;
3053         int nents = *pnents;
3054         struct sg_table sg_table;
3055         struct scatterlist *sg;
3056         struct scatterlist *new_sgl;
3057         size_t len = 0, to_copy;
3058         int new_sgl_nents;
3059         struct blk_kern_sg_hdr *hdr;
3060
3061         if (sgl != sgl_to_copy) {
3062                 /* copy only the last element */
3063                 res = blk_rq_handle_align_tail_only(rq, sgl_to_copy,
3064                                 gfp, page_gfp);
3065                 if (res == 0)
3066                         goto out;
3067                 /* else go through */
3068         }
3069
3070         for_each_sg(sgl, sg, nents, i)
3071                 len += sg->length;
3072         to_copy = len;
3073
3074         new_sgl_nents = PFN_UP(len) + BLK_KERN_SG_HDR_ENTRIES;
3075
3076         res = sg_alloc_table(&sg_table, new_sgl_nents, gfp);
3077         if (res != 0)
3078                 goto out;
3079
3080         new_sgl = sg_table.sgl;
3081         hdr = (struct blk_kern_sg_hdr *)new_sgl;
3082         new_sgl += BLK_KERN_SG_HDR_ENTRIES;
3083         new_sgl_nents -= BLK_KERN_SG_HDR_ENTRIES;
3084
3085         hdr->tail_only = false;
3086         hdr->orig_sgp = sgl;
3087         hdr->new_sg_table = sg_table;
3088
3089         for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3090                 struct page *pg;
3091
3092                 pg = alloc_page(page_gfp);
3093                 if (pg == NULL)
3094                         goto err_free_new_sgl;
3095
3096                 sg_assign_page(sg, pg);
3097                 sg->length = min_t(size_t, PAGE_SIZE, len);
3098
3099                 len -= PAGE_SIZE;
3100         }
3101
3102         if (rq_data_dir(rq) == WRITE) {
3103                 /*
3104                  * We need to limit amount of copied data to to_copy, because
3105                  * sgl might have the last element not marked as last in
3106                  * SG chaining.
3107                  */
3108                 sg_copy(new_sgl, sgl, to_copy, KM_USER0, KM_USER1);
3109         }
3110
3111         rq->end_io_data = hdr;
3112         rq->cmd_flags |= REQ_COPY_USER;
3113
3114         *psgl = new_sgl;
3115         *pnents = new_sgl_nents;
3116
3117 out:
3118         return res;
3119
3120 err_free_new_sgl:
3121         for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3122                 struct page *pg = sg_page(sg);
3123                 if (pg == NULL)
3124                         break;
3125                 __free_page(pg);
3126         }
3127         sg_free_table(&sg_table);
3128
3129         res = -ENOMEM;
3130         goto out;
3131 }
3132
3133 static void bio_map_kern_endio(struct bio *bio, int err)
3134 {
3135         bio_put(bio);
3136 }
3137
3138 static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3139         int nents, gfp_t gfp, struct scatterlist **sgl_to_copy,
3140         int *nents_to_copy)
3141 {
3142         int res;
3143         struct request_queue *q = rq->q;
3144         int rw = rq_data_dir(rq);
3145         int max_nr_vecs, i;
3146         size_t tot_len;
3147         bool need_new_bio;
3148         struct scatterlist *sg, *prev_sg = NULL;
3149         struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
3150
3151         *sgl_to_copy = NULL;
3152
3153         if (unlikely((sgl == 0) || (nents <= 0))) {
3154                 WARN_ON(1);
3155                 res = -EINVAL;
3156                 goto out;
3157         }
3158
3159         /*
3160          * Let's keep each bio allocation inside a single page to decrease
3161          * probability of failure.
3162          */
3163         max_nr_vecs =  min_t(size_t,
3164                 ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
3165                 BIO_MAX_PAGES);
3166
3167         need_new_bio = true;
3168         tot_len = 0;
3169         for_each_sg(sgl, sg, nents, i) {
3170                 struct page *page = sg_page(sg);
3171                 void *page_addr = page_address(page);
3172                 size_t len = sg->length, l;
3173                 size_t offset = sg->offset;
3174
3175                 tot_len += len;
3176                 prev_sg = sg;
3177
3178                 /*
3179                  * Each segment must be aligned on DMA boundary and
3180                  * not on stack. The last one may have unaligned
3181                  * length as long as the total length is aligned to
3182                  * DMA padding alignment.
3183                  */
3184                 if (i == nents - 1)
3185                         l = 0;
3186                 else
3187                         l = len;
3188                 if (((sg->offset | l) & queue_dma_alignment(q)) ||
3189                     (page_addr && object_is_on_stack(page_addr + sg->offset))) {
3190                         res = -EINVAL;
3191                         goto out_need_copy;
3192                 }
3193
3194                 while (len > 0) {
3195                         size_t bytes;
3196                         int rc;
3197
3198                         if (need_new_bio) {
3199                                 bio = bio_kmalloc(gfp, max_nr_vecs);
3200                                 if (bio == NULL) {
3201                                         res = -ENOMEM;
3202                                         goto out_free_bios;
3203                                 }
3204
3205                                 if (rw == WRITE)
3206                                         bio->bi_rw |= 1 << BIO_RW;
3207
3208                                 bio->bi_end_io = bio_map_kern_endio;
3209
3210                                 if (hbio == NULL)
3211                                         hbio = tbio = bio;
3212                                 else
3213                                         tbio = tbio->bi_next = bio;
3214                         }
3215
3216                         bytes = min_t(size_t, len, PAGE_SIZE - offset);
3217
3218                         rc = bio_add_pc_page(q, bio, page, bytes, offset);
3219                         if (rc < bytes) {
3220                                 if (unlikely(need_new_bio || (rc < 0))) {
3221                                         if (rc < 0)
3222                                                 res = rc;
3223                                         else
3224                                                 res = -EIO;
3225                                         goto out_need_copy;
3226                                 } else {
3227                                         need_new_bio = true;
3228                                         len -= rc;
3229                                         offset += rc;
3230                                         continue;
3231                                 }
3232                         }
3233
3234                         need_new_bio = false;
3235                         offset = 0;
3236                         len -= bytes;
3237                         page = nth_page(page, 1);
3238                 }
3239         }
3240
3241         if (hbio == NULL) {
3242                 res = -EINVAL;
3243                 goto out_free_bios;
3244         }
3245
3246         /* Total length must be aligned on DMA padding alignment */
3247         if ((tot_len & q->dma_pad_mask) &&
3248             !(rq->cmd_flags & REQ_COPY_USER)) {
3249                 res = -EINVAL;
3250                 if (sgl->offset == 0) {
3251                         *sgl_to_copy = prev_sg;
3252                         *nents_to_copy = 1;
3253                         goto out_free_bios;
3254                 } else
3255                         goto out_need_copy;
3256         }
3257
3258         while (hbio != NULL) {
3259                 bio = hbio;
3260                 hbio = hbio->bi_next;
3261                 bio->bi_next = NULL;
3262
3263                 blk_queue_bounce(q, &bio);
3264
3265                 res = blk_rq_append_bio(q, rq, bio);
3266                 if (unlikely(res != 0)) {
3267                         bio->bi_next = hbio;
3268                         hbio = bio;
3269                         goto out_free_bios;
3270                 }
3271         }
3272
3273         rq->buffer = rq->data = NULL;
3274
3275 out:
3276         return res;
3277
3278 out_need_copy:
3279         *sgl_to_copy = sgl;
3280         *nents_to_copy = nents;
3281
3282 out_free_bios:
3283         while (hbio != NULL) {
3284                 bio = hbio;
3285                 hbio = hbio->bi_next;
3286                 bio_put(bio);
3287         }
3288         goto out;
3289 }
3290
3291 /**
3292  * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
3293  * @rq:         request to fill
3294  * @sgl:        area to map
3295  * @nents:      number of elements in @sgl
3296  * @gfp:        memory allocation flags
3297  *
3298  * Description:
3299  *    Data will be mapped directly if possible. Otherwise a bounce
3300  *    buffer will be used.
3301  */
3302 int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3303                        int nents, gfp_t gfp)
3304 {
3305         int res;
3306         struct scatterlist *sg_to_copy = NULL;
3307         int nents_to_copy = 0;
3308
3309         if (unlikely((sgl == 0) || (sgl->length == 0) ||
3310                      (nents <= 0) || (rq->end_io_data != NULL))) {
3311                 WARN_ON(1);
3312                 res = -EINVAL;
3313                 goto out;
3314         }
3315
3316         res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3317                                 &nents_to_copy);
3318         if (unlikely(res != 0)) {
3319                 if (sg_to_copy == NULL)
3320                         goto out;
3321
3322                 res = blk_rq_handle_align(rq, &sgl, &nents, sg_to_copy,
3323                                 nents_to_copy, gfp, rq->q->bounce_gfp | gfp);
3324                 if (unlikely(res != 0))
3325                         goto out;
3326
3327                 res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3328                                                 &nents_to_copy);
3329                 if (res != 0) {
3330                         blk_rq_unmap_kern_sg(rq, 0);
3331                         goto out;
3332                 }
3333         }
3334
3335         rq->buffer = rq->data = NULL;
3336
3337 out:
3338         return res;
3339 }
3340
3341 struct scsi_io_context {
3342         void *blk_data;
3343         void *data;
3344         void (*done)(void *data, char *sense, int result, int resid);
3345         char sense[SCSI_SENSE_BUFFERSIZE];
3346 };
3347
3348 static void scsi_end_async(struct request *req, int error)
3349 {
3350         struct scsi_io_context *sioc = req->end_io_data;
3351
3352         req->end_io_data = sioc->blk_data;
3353         blk_rq_unmap_kern_sg(req, (error == 0));
3354
3355         if (sioc->done)
3356                 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
3357
3358         kfree(sioc);
3359         __blk_put_request(req->q, req);
3360 }
3361
3362 /**
3363  * scsi_execute_async - insert request
3364  * @sdev:       scsi device
3365  * @cmd:        scsi command
3366  * @cmd_len:    length of scsi cdb
3367  * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
3368  * @sgl:        data buffer scatterlist
3369  * @nents:      number of elements in the sgl
3370  * @timeout:    request timeout in seconds
3371  * @retries:    number of times to retry request
3372  * @privdata:   data passed to done()
3373  * @done:       callback function when done
3374  * @gfp:        memory allocation flags
3375  * @flags:      one or more SCSI_ASYNC_EXEC_FLAG_* flags
3376  */
3377 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
3378                        int cmd_len, int data_direction, struct scatterlist *sgl,
3379                        int nents, int timeout, int retries, void *privdata,
3380                        void (*done)(void *, char *, int, int), gfp_t gfp,
3381                        int flags)
3382 {
3383         struct request *req;
3384         struct scsi_io_context *sioc;
3385         int err = 0;
3386         int write = (data_direction == DMA_TO_DEVICE);
3387
3388         sioc = kzalloc(sizeof(*sioc), gfp);
3389         if (sioc == NULL)
3390                 return DRIVER_ERROR << 24;
3391
3392         req = blk_get_request(sdev->request_queue, write, gfp);
3393         if (req == NULL)
3394                 goto free_sense;
3395         req->cmd_type = REQ_TYPE_BLOCK_PC;
3396         req->cmd_flags |= REQ_QUIET;
3397
3398         if (flags & SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING)
3399                 req->cmd_flags |= REQ_COPY_USER;
3400
3401         if (sgl != NULL) {
3402                 err = blk_rq_map_kern_sg(req, sgl, nents, gfp);
3403                 if (err)
3404                         goto free_req;
3405         }
3406
3407         sioc->blk_data = req->end_io_data;
3408         sioc->data = privdata;
3409         sioc->done = done;
3410
3411         req->cmd_len = cmd_len;
3412         memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
3413         memcpy(req->cmd, cmd, req->cmd_len);
3414         req->sense = sioc->sense;
3415         req->sense_len = 0;
3416         req->timeout = timeout;
3417         req->retries = retries;
3418         req->end_io_data = sioc;
3419
3420         blk_execute_rq_nowait(req->q, NULL, req,
3421                 flags & SCSI_ASYNC_EXEC_FLAG_AT_HEAD, scsi_end_async);
3422         return 0;
3423
3424 free_req:
3425         blk_put_request(req);
3426
3427 free_sense:
3428         kfree(sioc);
3429         return DRIVER_ERROR << 24;
3430 }
3431 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
3432
3433 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
3434 {
3435         struct scatterlist *src_sg, *dst_sg;
3436         unsigned int to_copy;
3437         int atomic = scst_cmd_atomic(cmd);
3438
3439         TRACE_ENTRY();
3440
3441         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
3442                 if (cmd->data_direction != SCST_DATA_BIDI) {
3443                         src_sg = cmd->tgt_sg;
3444                         dst_sg = cmd->sg;
3445                         to_copy = cmd->bufflen;
3446                 } else {
3447                         TRACE_MEM("BIDI cmd %p", cmd);
3448                         src_sg = cmd->tgt_in_sg;
3449                         dst_sg = cmd->in_sg;
3450                         to_copy = cmd->in_bufflen;
3451                 }
3452         } else {
3453                 src_sg = cmd->sg;
3454                 dst_sg = cmd->tgt_sg;
3455                 to_copy = cmd->resp_data_len;
3456         }
3457
3458         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, "
3459                 "to_copy %d", cmd, copy_dir, src_sg, dst_sg, to_copy);
3460
3461         if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
3462                 /*
3463                  * It can happened, e.g., with scst_user for cmd with delay
3464                  * alloc, which failed with Check Condition.
3465                  */
3466                 goto out;
3467         }
3468
3469         sg_copy(dst_sg, src_sg, to_copy, atomic ? KM_SOFTIRQ0 : KM_USER0,
3470                                          atomic ? KM_SOFTIRQ1 : KM_USER1);
3471
3472 out:
3473         TRACE_EXIT();
3474         return;
3475 }
3476
3477 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
3478
3479 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
3480 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
3481
3482 int scst_get_cdb_len(const uint8_t *cdb)
3483 {
3484         return SCST_GET_CDB_LEN(cdb[0]);
3485 }
3486
3487 /* get_trans_len_x extract x bytes from cdb as length starting from off */
3488
3489 static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
3490 {
3491         cmd->cdb_len = 10;
3492         cmd->bufflen = 0;
3493         return 0;
3494 }
3495
3496 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
3497 {
3498         cmd->bufflen = 6;
3499         return 0;
3500 }
3501
3502 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
3503 {
3504         cmd->bufflen = READ_CAP_LEN;
3505         return 0;
3506 }
3507
3508 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
3509 {
3510         int res = 0;
3511
3512         TRACE_ENTRY();
3513
3514         if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
3515                 cmd->op_name = "READ CAPACITY(16)";
3516                 cmd->bufflen = READ_CAP16_LEN;
3517                 cmd->op_flags |= SCST_IMPLICIT_HQ;
3518         } else
3519                 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
3520
3521         TRACE_EXIT_RES(res);
3522         return res;
3523 }
3524
3525 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
3526 {
3527         cmd->bufflen = 1;
3528         return 0;
3529 }
3530
3531 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
3532 {
3533         uint8_t *p = (uint8_t *)cmd->cdb + off;
3534         int res = 0;
3535
3536         cmd->bufflen = 0;
3537         cmd->bufflen |= ((u32)p[0]) << 8;
3538         cmd->bufflen |= ((u32)p[1]);
3539
3540         switch (cmd->cdb[1] & 0x1f) {
3541         case 0:
3542         case 1:
3543         case 6:
3544                 if (cmd->bufflen != 0) {
3545                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
3546                                 "allocation length for service action %x",
3547                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
3548                         goto out_inval;
3549                 }
3550                 break;
3551         }
3552
3553         switch (cmd->cdb[1] & 0x1f) {
3554         case 0:
3555         case 1:
3556                 cmd->bufflen = 20;
3557                 break;
3558         case 6:
3559                 cmd->bufflen = 32;
3560                 break;
3561         case 8:
3562                 cmd->bufflen = max(28, cmd->bufflen);
3563                 break;
3564         default:
3565                 PRINT_ERROR("READ POSITION: Invalid service action %x",
3566                         cmd->cdb[1] & 0x1f);
3567                 goto out_inval;
3568         }
3569
3570 out:
3571         return res;
3572
3573 out_inval:
3574         scst_set_cmd_error(cmd,
3575                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
3576         res = 1;
3577         goto out;
3578 }
3579
3580 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
3581 {
3582         cmd->bufflen = (u32)cmd->cdb[off];
3583         return 0;
3584 }
3585
3586 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
3587 {
3588         cmd->bufflen = (u32)cmd->cdb[off];
3589         if (cmd->bufflen == 0)
3590                 cmd->bufflen = 256;
3591         return 0;
3592 }
3593
3594 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
3595 {
3596         const uint8_t *p = cmd->cdb + off;
3597
3598         cmd->bufflen = 0;
3599         cmd->bufflen |= ((u32)p[0]) << 8;
3600         cmd->bufflen |= ((u32)p[1]);
3601
3602         return 0;
3603 }
3604
3605 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
3606 {
3607         const uint8_t *p = cmd->cdb + off;
3608
3609         cmd->bufflen = 0;
3610         cmd->bufflen |= ((u32)p[0]) << 16;
3611         cmd->bufflen |= ((u32)p[1]) << 8;
3612         cmd->bufflen |= ((u32)p[2]);
3613
3614         return 0;
3615 }
3616
3617 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
3618 {
3619         const uint8_t *p = cmd->cdb + off;
3620
3621         cmd->bufflen = 0;
3622         cmd->bufflen |= ((u32)p[0]) << 24;
3623         cmd->bufflen |= ((u32)p[1]) << 16;
3624         cmd->bufflen |= ((u32)p[2]) << 8;
3625         cmd->bufflen |= ((u32)p[3]);
3626
3627         return 0;
3628 }
3629
3630 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
3631 {
3632         cmd->bufflen = 0;
3633         return 0;
3634 }
3635
3636 int scst_get_cdb_info(struct scst_cmd *cmd)
3637 {
3638         int dev_type = cmd->dev->type;
3639         int i, res = 0;
3640         uint8_t op;
3641         const struct scst_sdbops *ptr = NULL;
3642
3643         TRACE_ENTRY();
3644
3645         op = cmd->cdb[0];       /* get clear opcode */
3646
3647         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
3648                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
3649                 dev_type);
3650
3651         i = scst_scsi_op_list[op];
3652         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
3653                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
3654                         ptr = &scst_scsi_op_table[i];
3655                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
3656                               ptr->ops, ptr->devkey[0], /* disk     */
3657                               ptr->devkey[1],   /* tape     */
3658                               ptr->devkey[2],   /* printer */
3659                               ptr->devkey[3],   /* cpu      */
3660                               ptr->devkey[4],   /* cdr      */
3661                               ptr->devkey[5],   /* cdrom    */
3662                               ptr->devkey[6],   /* scanner */
3663                               ptr->devkey[7],   /* worm     */
3664                               ptr->devkey[8],   /* changer */
3665                               ptr->devkey[9],   /* commdev */
3666                               ptr->op_name);
3667                         TRACE_DBG("direction=%d flags=%d off=%d",
3668                               ptr->direction,
3669                               ptr->flags,
3670                               ptr->off);
3671                         break;
3672                 }
3673                 i++;
3674         }
3675
3676         if (unlikely(ptr == NULL)) {
3677                 /* opcode not found or now not used !!! */
3678                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
3679                       dev_type);
3680                 res = -1;
3681                 cmd->op_flags = SCST_INFO_NOT_FOUND;
3682                 goto out;
3683         }
3684
3685         cmd->cdb_len = SCST_GET_CDB_LEN(op);
3686         cmd->op_name = ptr->op_name;
3687         cmd->data_direction = ptr->direction;
3688         cmd->op_flags = ptr->flags;
3689         res = (*ptr->get_trans_len)(cmd, ptr->off);
3690
3691 out:
3692         TRACE_EXIT_RES(res);
3693         return res;
3694 }
3695 EXPORT_SYMBOL(scst_get_cdb_info);
3696
3697 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
3698 uint64_t scst_pack_lun(const uint64_t lun)
3699 {
3700         uint64_t res;
3701         uint16_t *p = (uint16_t *)&res;
3702
3703         res = lun;
3704         *p = cpu_to_be16(*p);
3705
3706         TRACE_EXIT_HRES((unsigned long)res);
3707         return res;
3708 }
3709
3710 /*
3711  * Routine to extract a lun number from an 8-byte LUN structure
3712  * in network byte order (BE).
3713  * (see SAM-2, Section 4.12.3 page 40)
3714  * Supports 2 types of lun unpacking: peripheral and logical unit.
3715  */
3716 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
3717 {
3718         uint64_t res = NO_SUCH_LUN;
3719         int address_method;
3720
3721         TRACE_ENTRY();
3722
3723         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
3724
3725         if (unlikely(len < 2)) {
3726                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
3727                         "more", len);
3728                 goto out;
3729         }
3730
3731         if (len > 2) {
3732                 switch (len) {
3733                 case 8:
3734                         if ((*((uint64_t *)lun) &
3735                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
3736                                 goto out_err;
3737                         break;
3738                 case 4:
3739                         if (*((uint16_t *)&lun[2]) != 0)
3740                                 goto out_err;
3741                         break;
3742                 case 6:
3743                         if (*((uint32_t *)&lun[2]) != 0)
3744                                 goto out_err;
3745                         break;
3746                 default:
3747                         goto out_err;
3748                 }
3749         }
3750
3751         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
3752         switch (address_method) {
3753         case 0: /* peripheral device addressing method */
3754 #if 0
3755                 if (*lun) {
3756                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
3757                              "peripheral device addressing method 0x%02x, "
3758                              "expected 0", *lun);
3759                         break;
3760                 }
3761                 res = *(lun + 1);
3762                 break;
3763 #else
3764                 /*
3765                  * Looks like it's legal to use it as flat space addressing
3766                  * method as well
3767                  */
3768
3769                 /* go through */
3770 #endif
3771
3772         case 1: /* flat space addressing method */
3773                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
3774                 break;
3775
3776         case 2: /* logical unit addressing method */
3777                 if (*lun & 0x3f) {
3778                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
3779                                     "addressing method 0x%02x, expected 0",
3780                                     *lun & 0x3f);
3781                         break;
3782                 }
3783                 if (*(lun + 1) & 0xe0) {
3784                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
3785                                     "addressing method 0x%02x, expected 0",
3786                                     (*(lun + 1) & 0xf8) >> 5);
3787                         break;
3788                 }
3789                 res = *(lun + 1) & 0x1f;
3790                 break;
3791
3792         case 3: /* extended logical unit addressing method */
3793         default:
3794                 PRINT_ERROR("Unimplemented LUN addressing method %u",
3795                             address_method);
3796                 break;
3797         }
3798
3799 out:
3800         TRACE_EXIT_RES((int)res);
3801         return res;
3802
3803 out_err:
3804         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
3805         goto out;
3806 }
3807
3808 int scst_calc_block_shift(int sector_size)
3809 {
3810         int block_shift = 0;
3811         int t;
3812
3813         if (sector_size == 0)
3814                 sector_size = 512;
3815
3816         t = sector_size;
3817         while (1) {
3818                 if ((t & 1) != 0)
3819                         break;
3820                 t >>= 1;
3821                 block_shift++;
3822         }
3823         if (block_shift < 9) {