Let's don't check if data should be copied between dev handler's and target driver...
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2009 ID7 Ltd.
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <asm/kmap_types.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #include "scst_cdbprobe.h"
37
38 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
39 struct scsi_io_context {
40         unsigned int full_cdb_used:1;
41         void *data;
42         void (*done)(void *data, char *sense, int result, int resid);
43         char sense[SCST_SENSE_BUFFERSIZE];
44         unsigned char full_cdb[0];
45 };
46 static struct kmem_cache *scsi_io_context_cache;
47 #endif
48
49 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
50 static void scst_check_internal_sense(struct scst_device *dev, int result,
51         uint8_t *sense, int sense_len);
52 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
53         int flags);
54 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
55         const uint8_t *sense, int sense_len, int flags);
56 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
57         const uint8_t *sense, int sense_len, int flags);
58 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
59 static void scst_release_space(struct scst_cmd *cmd);
60 static void scst_unblock_cmds(struct scst_device *dev);
61 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
62 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
63         struct scst_acg_dev *acg_dev);
64 static void scst_tgt_retry_timer_fn(unsigned long arg);
65
66 #ifdef CONFIG_SCST_DEBUG_TM
67 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
68         struct scst_acg_dev *acg_dev);
69 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
70 #else
71 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
72         struct scst_acg_dev *acg_dev) {}
73 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
74 #endif /* CONFIG_SCST_DEBUG_TM */
75
76 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
77 {
78         int res = 0;
79         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
80
81         TRACE_ENTRY();
82
83         if (cmd->sense != NULL)
84                 goto memzero;
85
86         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
87         if (cmd->sense == NULL) {
88                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
89                         "The sense data will be lost!!", cmd->cdb[0]);
90                 res = -ENOMEM;
91                 goto out;
92         }
93
94         cmd->sense_buflen = SCST_SENSE_BUFFERSIZE;
95
96 memzero:
97         cmd->sense_valid_len = 0;
98         memset(cmd->sense, 0, cmd->sense_buflen);
99
100 out:
101         TRACE_EXIT_RES(res);
102         return res;
103 }
104 EXPORT_SYMBOL(scst_alloc_sense);
105
106 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
107         const uint8_t *sense, unsigned int len)
108 {
109         int res;
110
111         TRACE_ENTRY();
112
113         res = scst_alloc_sense(cmd, atomic);
114         if (res != 0) {
115                 PRINT_BUFFER("Lost sense", sense, len);
116                 goto out;
117         }
118
119         cmd->sense_valid_len = len;
120         if (cmd->sense_buflen < len) {
121                 PRINT_WARNING("Sense truncated (needed %d), shall you increase "
122                         "SCST_SENSE_BUFFERSIZE? Op: %x", len, cmd->cdb[0]);
123                 cmd->sense_valid_len = cmd->sense_buflen;
124         }
125
126         memcpy(cmd->sense, sense, cmd->sense_valid_len);
127         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_valid_len);
128
129 out:
130         TRACE_EXIT_RES(res);
131         return res;
132 }
133 EXPORT_SYMBOL(scst_alloc_set_sense);
134
135 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
136 {
137         TRACE_ENTRY();
138
139         cmd->status = status;
140         cmd->host_status = DID_OK;
141
142         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
143         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
144
145         cmd->data_direction = SCST_DATA_NONE;
146         cmd->resp_data_len = 0;
147         cmd->is_send_status = 1;
148
149         cmd->completed = 1;
150
151         TRACE_EXIT();
152         return;
153 }
154 EXPORT_SYMBOL(scst_set_cmd_error_status);
155
156 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
157 {
158         int rc;
159
160         TRACE_ENTRY();
161
162         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
163
164         rc = scst_alloc_sense(cmd, 1);
165         if (rc != 0) {
166                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
167                         key, asc, ascq);
168                 goto out;
169         }
170
171         cmd->sense_valid_len = scst_set_sense(cmd->sense, cmd->sense_buflen,
172                 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
173         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_valid_len);
174
175 out:
176         TRACE_EXIT();
177         return;
178 }
179 EXPORT_SYMBOL(scst_set_cmd_error);
180
181 int scst_set_sense(uint8_t *buffer, int len, bool d_sense,
182         int key, int asc, int ascq)
183 {
184         int res;
185
186         sBUG_ON(len == 0);
187
188         memset(buffer, 0, len);
189
190         if (d_sense) {
191                 /* Descriptor format */
192                 if (len < 8) {
193                         PRINT_ERROR("Length %d of sense buffer too small to "
194                                 "fit sense %x:%x:%x", len, key, asc, ascq);
195                 }
196
197                 buffer[0] = 0x72;               /* Response Code        */
198                 if (len > 1)
199                         buffer[1] = key;        /* Sense Key            */
200                 if (len > 2)
201                         buffer[2] = asc;        /* ASC                  */
202                 if (len > 3)
203                         buffer[3] = ascq;       /* ASCQ                 */
204                 res = 8;
205         } else {
206                 /* Fixed format */
207                 if (len < 18) {
208                         PRINT_ERROR("Length %d of sense buffer too small to "
209                                 "fit sense %x:%x:%x", len, key, asc, ascq);
210                 }
211
212                 buffer[0] = 0x70;               /* Response Code        */
213                 if (len > 2)
214                         buffer[2] = key;        /* Sense Key            */
215                 if (len > 7)
216                         buffer[7] = 0x0a;       /* Additional Sense Length */
217                 if (len > 12)
218                         buffer[12] = asc;       /* ASC                  */
219                 if (len > 13)
220                         buffer[13] = ascq;      /* ASCQ                 */
221                 res = 18;
222         }
223
224         TRACE_BUFFER("Sense set", buffer, res);
225         return res;
226 }
227 EXPORT_SYMBOL(scst_set_sense);
228
229 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
230         int key, int asc, int ascq)
231 {
232         bool res = false;
233
234         /* Response Code */
235         if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
236                 /* Fixed format */
237
238                 /* Sense Key */
239                 if (valid_mask & SCST_SENSE_KEY_VALID) {
240                         if (len < 3)
241                                 goto out;
242                         if (sense[2] != key)
243                                 goto out;
244                 }
245
246                 /* ASC */
247                 if (valid_mask & SCST_SENSE_ASC_VALID) {
248                         if (len < 13)
249                                 goto out;
250                         if (sense[12] != asc)
251                                 goto out;
252                 }
253
254                 /* ASCQ */
255                 if (valid_mask & SCST_SENSE_ASCQ_VALID) {
256                         if (len < 14)
257                                 goto out;
258                         if (sense[13] != ascq)
259                                 goto out;
260                 }
261         } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
262                 /* Descriptor format */
263
264                 /* Sense Key */
265                 if (valid_mask & SCST_SENSE_KEY_VALID) {
266                         if (len < 2)
267                                 goto out;
268                         if (sense[1] != key)
269                                 goto out;
270                 }
271
272                 /* ASC */
273                 if (valid_mask & SCST_SENSE_ASC_VALID) {
274                         if (len < 3)
275                                 goto out;
276                         if (sense[2] != asc)
277                                 goto out;
278                 }
279
280                 /* ASCQ */
281                 if (valid_mask & SCST_SENSE_ASCQ_VALID) {
282                         if (len < 4)
283                                 goto out;
284                         if (sense[3] != ascq)
285                                 goto out;
286                 }
287         } else
288                 goto out;
289
290         res = true;
291
292 out:
293         TRACE_EXIT_RES((int)res);
294         return res;
295 }
296 EXPORT_SYMBOL(scst_analyze_sense);
297
298 bool scst_is_ua_sense(const uint8_t *sense, int len)
299 {
300         if (SCST_SENSE_VALID(sense))
301                 return scst_analyze_sense(sense, len,
302                         SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0);
303         else
304                 return false;
305 }
306 EXPORT_SYMBOL(scst_is_ua_sense);
307
308 bool scst_is_ua_global(const uint8_t *sense, int len)
309 {
310         bool res;
311
312         /* Changing it don't forget to change scst_requeue_ua() as well!! */
313
314         if (scst_analyze_sense(sense, len, SCST_SENSE_ALL_VALID,
315                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed)))
316                 res = true;
317         else
318                 res = false;
319
320         return res;
321 }
322
323 void scst_check_convert_sense(struct scst_cmd *cmd)
324 {
325         bool d_sense;
326
327         TRACE_ENTRY();
328
329         if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
330                 goto out;
331
332         d_sense = scst_get_cmd_dev_d_sense(cmd);
333         if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
334                 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
335                         cmd);
336                 if ((cmd->sense_valid_len < 18)) {
337                         PRINT_ERROR("Sense too small to convert (%d, "
338                                 "type: fixed)", cmd->sense_buflen);
339                         goto out;
340                 }
341                 cmd->sense_valid_len = scst_set_sense(cmd->sense, cmd->sense_buflen,
342                         d_sense, cmd->sense[2], cmd->sense[12], cmd->sense[13]);
343         } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
344                                 (cmd->sense[0] == 0x73))) {
345                 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
346                         cmd);
347                 if ((cmd->sense_buflen < 18) || (cmd->sense_valid_len < 8)) {
348                         PRINT_ERROR("Sense too small to convert (%d, "
349                                 "type: descryptor, valid %d)",
350                                 cmd->sense_buflen, cmd->sense_valid_len);
351                         goto out;
352                 }
353                 cmd->sense_valid_len = scst_set_sense(cmd->sense,
354                         cmd->sense_buflen, d_sense,
355                         cmd->sense[1], cmd->sense[2], cmd->sense[3]);
356         }
357
358 out:
359         TRACE_EXIT();
360         return;
361 }
362 EXPORT_SYMBOL(scst_check_convert_sense);
363
364 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
365         unsigned int len)
366 {
367         TRACE_ENTRY();
368
369         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
370         scst_alloc_set_sense(cmd, 1, sense, len);
371
372         TRACE_EXIT();
373         return;
374 }
375
376 void scst_set_busy(struct scst_cmd *cmd)
377 {
378         int c = atomic_read(&cmd->sess->sess_cmd_count);
379
380         TRACE_ENTRY();
381
382         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
383                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
384                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
385                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
386                         cmd->sess->initiator_name, c,
387                         cmd->queue_type, cmd->sess->init_phase);
388         } else {
389                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
390                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
391                         "initiator %s (cmds count %d, queue_type %x, "
392                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
393                         cmd->queue_type, cmd->sess->init_phase);
394         }
395
396         TRACE_EXIT();
397         return;
398 }
399 EXPORT_SYMBOL(scst_set_busy);
400
401 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
402 {
403         int i;
404
405         TRACE_ENTRY();
406
407         TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
408                 asc, ascq);
409
410         /* Protect sess_tgt_dev_list_hash */
411         mutex_lock(&scst_mutex);
412
413         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
414                 struct list_head *sess_tgt_dev_list_head =
415                         &sess->sess_tgt_dev_list_hash[i];
416                 struct scst_tgt_dev *tgt_dev;
417
418                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
419                                 sess_tgt_dev_list_entry) {
420                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
421                         if (!list_empty(&tgt_dev->UA_list)) {
422                                 struct scst_tgt_dev_UA *ua;
423
424                                 ua = list_entry(tgt_dev->UA_list.next,
425                                         typeof(*ua), UA_list_entry);
426                                 if (scst_analyze_sense(ua->UA_sense_buffer,
427                                                 ua->UA_valid_sense_len,
428                                                 SCST_SENSE_ALL_VALID,
429                                                 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
430                                         ua->UA_valid_sense_len = scst_set_sense(
431                                                 ua->UA_sense_buffer,
432                                                 sizeof(ua->UA_sense_buffer),
433                                                 tgt_dev->dev->d_sense,
434                                                 key, asc, ascq);
435                                 } else
436                                         PRINT_ERROR("%s",
437                                                 "The first UA isn't RESET UA");
438                         } else
439                                 PRINT_ERROR("%s", "There's no RESET UA to "
440                                         "replace");
441                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
442                 }
443         }
444
445         mutex_unlock(&scst_mutex);
446
447         TRACE_EXIT();
448         return;
449 }
450 EXPORT_SYMBOL(scst_set_initial_UA);
451
452 static struct scst_aen *scst_alloc_aen(struct scst_session *sess,
453         uint64_t unpacked_lun)
454 {
455         struct scst_aen *aen;
456
457         TRACE_ENTRY();
458
459         aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
460         if (aen == NULL) {
461                 PRINT_ERROR("AEN memory allocation failed. Corresponding "
462                         "event notification will not be performed (initiator "
463                         "%s)", sess->initiator_name);
464                 goto out;
465         }
466         memset(aen, 0, sizeof(*aen));
467
468         aen->sess = sess;
469         scst_sess_get(sess);
470
471         aen->lun = scst_pack_lun(unpacked_lun);
472
473 out:
474         TRACE_EXIT_HRES((unsigned long)aen);
475         return aen;
476 };
477
478 static void scst_free_aen(struct scst_aen *aen)
479 {
480         TRACE_ENTRY();
481
482         scst_sess_put(aen->sess);
483         mempool_free(aen, scst_aen_mempool);
484
485         TRACE_EXIT();
486         return;
487 };
488
489 /* Must be called unded scst_mutex */
490 void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
491         int key, int asc, int ascq)
492 {
493         struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
494         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
495         int sl;
496
497         TRACE_ENTRY();
498
499         if (tgtt->report_aen != NULL) {
500                 struct scst_aen *aen;
501                 int rc;
502
503                 aen = scst_alloc_aen(tgt_dev->sess, tgt_dev->lun);
504                 if (aen == NULL)
505                         goto queue_ua;
506
507                 aen->event_fn = SCST_AEN_SCSI;
508                 aen->aen_sense_len = scst_set_sense(aen->aen_sense,
509                         sizeof(aen->aen_sense), tgt_dev->dev->d_sense,
510                         key, asc, ascq);
511
512                 TRACE_DBG("Calling target's %s report_aen(%p)",
513                         tgtt->name, aen);
514                 rc = tgtt->report_aen(aen);
515                 TRACE_DBG("Target's %s report_aen(%p) returned %d",
516                         tgtt->name, aen, rc);
517                 if (rc == SCST_AEN_RES_SUCCESS)
518                         goto out;
519
520                 scst_free_aen(aen);
521         }
522
523 queue_ua:
524         TRACE_MGMT_DBG("AEN not supported, queuing plain UA (tgt_dev %p)",
525                 tgt_dev);
526         sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
527                 tgt_dev->dev->d_sense, key, asc, ascq);
528         scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
529
530 out:
531         TRACE_EXIT();
532         return;
533 }
534
535 /* No locks */
536 void scst_capacity_data_changed(struct scst_device *dev)
537 {
538         struct scst_tgt_dev *tgt_dev;
539
540         TRACE_ENTRY();
541
542         if (dev->type != TYPE_DISK) {
543                 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
544                         "CHANGED UA", dev->type);
545                 goto out;
546         }
547
548         TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
549
550         mutex_lock(&scst_mutex);
551
552         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
553                             dev_tgt_dev_list_entry) {
554                 scst_gen_aen_or_ua(tgt_dev,
555                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
556         }
557
558         mutex_unlock(&scst_mutex);
559
560 out:
561         TRACE_EXIT();
562         return;
563 }
564 EXPORT_SYMBOL(scst_capacity_data_changed);
565
566 static inline bool scst_is_report_luns_changed_type(int type)
567 {
568         switch (type) {
569         case TYPE_DISK:
570         case TYPE_TAPE:
571         case TYPE_PRINTER:
572         case TYPE_PROCESSOR:
573         case TYPE_WORM:
574         case TYPE_ROM:
575         case TYPE_SCANNER:
576         case TYPE_MOD:
577         case TYPE_MEDIUM_CHANGER:
578         case TYPE_RAID:
579         case TYPE_ENCLOSURE:
580                 return true;
581         default:
582                 return false;
583         }
584 }
585
586 /* scst_mutex supposed to be held */
587 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
588                                               int flags)
589 {
590         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
591         struct list_head *shead;
592         struct scst_tgt_dev *tgt_dev;
593         int i;
594
595         TRACE_ENTRY();
596
597         TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
598                 "(sess %p)", sess);
599
600         local_bh_disable();
601
602         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
603                 shead = &sess->sess_tgt_dev_list_hash[i];
604
605                 list_for_each_entry(tgt_dev, shead,
606                                 sess_tgt_dev_list_entry) {
607                         /* Lockdep triggers here a false positive.. */
608                         spin_lock(&tgt_dev->tgt_dev_lock);
609                 }
610         }
611
612         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
613                 shead = &sess->sess_tgt_dev_list_hash[i];
614
615                 list_for_each_entry(tgt_dev, shead,
616                                 sess_tgt_dev_list_entry) {
617                         int sl;
618
619                         if (!scst_is_report_luns_changed_type(
620                                         tgt_dev->dev->type))
621                                 continue;
622
623                         sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
624                                 tgt_dev->dev->d_sense,
625                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
626
627                         __scst_check_set_UA(tgt_dev, sense_buffer,
628                                 sl, flags | SCST_SET_UA_FLAG_GLOBAL);
629                 }
630         }
631
632         for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
633                 shead = &sess->sess_tgt_dev_list_hash[i];
634
635                 list_for_each_entry_reverse(tgt_dev,
636                                 shead, sess_tgt_dev_list_entry) {
637                         spin_unlock(&tgt_dev->tgt_dev_lock);
638                 }
639         }
640
641         local_bh_enable();
642
643         TRACE_EXIT();
644         return;
645 }
646
647 /* The activity supposed to be suspended and scst_mutex held */
648 static void scst_report_luns_changed_sess(struct scst_session *sess)
649 {
650         int i;
651         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
652         int d_sense = 0;
653         uint64_t lun = 0;
654
655         TRACE_ENTRY();
656
657         TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
658
659         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
660                 struct list_head *shead;
661                 struct scst_tgt_dev *tgt_dev;
662
663                 shead = &sess->sess_tgt_dev_list_hash[i];
664
665                 list_for_each_entry(tgt_dev, shead,
666                                 sess_tgt_dev_list_entry) {
667                         if (scst_is_report_luns_changed_type(
668                                         tgt_dev->dev->type)) {
669                                 lun = tgt_dev->lun;
670                                 d_sense = tgt_dev->dev->d_sense;
671                                 goto found;
672                         }
673                 }
674         }
675
676 found:
677         if (tgtt->report_aen != NULL) {
678                 struct scst_aen *aen;
679                 int rc;
680
681                 aen = scst_alloc_aen(sess, lun);
682                 if (aen == NULL)
683                         goto queue_ua;
684
685                 aen->event_fn = SCST_AEN_SCSI;
686                 aen->aen_sense_len = scst_set_sense(aen->aen_sense,
687                         sizeof(aen->aen_sense), d_sense,
688                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
689
690                 TRACE_DBG("Calling target's %s report_aen(%p)",
691                         tgtt->name, aen);
692                 rc = tgtt->report_aen(aen);
693                 TRACE_DBG("Target's %s report_aen(%p) returned %d",
694                         tgtt->name, aen, rc);
695                 if (rc == SCST_AEN_RES_SUCCESS)
696                         goto out;
697
698                 scst_free_aen(aen);
699         }
700
701 queue_ua:
702         scst_queue_report_luns_changed_UA(sess, 0);
703
704 out:
705         TRACE_EXIT();
706         return;
707 }
708
709 /* The activity supposed to be suspended and scst_mutex held */
710 void scst_report_luns_changed(struct scst_acg *acg)
711 {
712         struct scst_session *sess;
713
714         TRACE_ENTRY();
715
716         TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
717
718         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
719                 scst_report_luns_changed_sess(sess);
720         }
721
722         TRACE_EXIT();
723         return;
724 }
725
726 void scst_aen_done(struct scst_aen *aen)
727 {
728         TRACE_ENTRY();
729
730         TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
731                 aen->event_fn, aen->sess->initiator_name);
732
733         if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
734                 goto out_free;
735
736         if (aen->event_fn != SCST_AEN_SCSI)
737                 goto out_free;
738
739         TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
740                 aen->sess->initiator_name);
741
742         if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
743                         SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
744                                 scst_sense_reported_luns_data_changed))) {
745                 mutex_lock(&scst_mutex);
746                 scst_queue_report_luns_changed_UA(aen->sess,
747                         SCST_SET_UA_FLAG_AT_HEAD);
748                 mutex_unlock(&scst_mutex);
749         } else {
750                 struct list_head *shead;
751                 struct scst_tgt_dev *tgt_dev;
752                 uint64_t lun;
753
754                 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
755
756                 mutex_lock(&scst_mutex);
757
758                 /* tgt_dev might get dead, so we need to reseek it */
759                 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
760                 list_for_each_entry(tgt_dev, shead,
761                                 sess_tgt_dev_list_entry) {
762                         if (tgt_dev->lun == lun) {
763                                 TRACE_MGMT_DBG("Requeuing failed AEN UA for "
764                                         "tgt_dev %p", tgt_dev);
765                                 scst_check_set_UA(tgt_dev, aen->aen_sense,
766                                         aen->aen_sense_len,
767                                         SCST_SET_UA_FLAG_AT_HEAD);
768                                 break;
769                         }
770                 }
771
772                 mutex_unlock(&scst_mutex);
773         }
774
775 out_free:
776         scst_free_aen(aen);
777
778         TRACE_EXIT();
779         return;
780 }
781 EXPORT_SYMBOL(scst_aen_done);
782
783 void scst_requeue_ua(struct scst_cmd *cmd)
784 {
785         TRACE_ENTRY();
786
787         if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
788                         SCST_SENSE_ALL_VALID,
789                         SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
790                 TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
791                         "for delivery failed cmd %p", cmd);
792                 mutex_lock(&scst_mutex);
793                 scst_queue_report_luns_changed_UA(cmd->sess,
794                         SCST_SET_UA_FLAG_AT_HEAD);
795                 mutex_unlock(&scst_mutex);
796         } else {
797                 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
798                 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
799                         cmd->sense_valid_len, SCST_SET_UA_FLAG_AT_HEAD);
800         }
801
802         TRACE_EXIT();
803         return;
804 }
805
806 /* The activity supposed to be suspended and scst_mutex held */
807 static void scst_check_reassign_sess(struct scst_session *sess)
808 {
809         struct scst_acg *acg, *old_acg;
810         struct scst_acg_dev *acg_dev;
811         int i;
812         struct list_head *shead;
813         struct scst_tgt_dev *tgt_dev;
814         bool luns_changed = false;
815         bool add_failed, something_freed, not_needed_freed = false;
816
817         TRACE_ENTRY();
818
819         TRACE_MGMT_DBG("Checking reassignment for sess %p (initiator %s)",
820                 sess, sess->initiator_name);
821
822         acg = scst_find_acg(sess);
823         if (acg == sess->acg) {
824                 TRACE_MGMT_DBG("No reassignment for sess %p", sess);
825                 goto out;
826         }
827
828         TRACE_MGMT_DBG("sess %p will be reassigned from acg %s to acg %s",
829                 sess, sess->acg->acg_name, acg->acg_name);
830
831         old_acg = sess->acg;
832         sess->acg = NULL; /* to catch implicit dependencies earlier */
833
834 retry_add:
835         add_failed = false;
836         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
837                 unsigned int inq_changed_ua_needed = 0;
838
839                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
840                         shead = &sess->sess_tgt_dev_list_hash[i];
841
842                         list_for_each_entry(tgt_dev, shead,
843                                         sess_tgt_dev_list_entry) {
844                                 if ((tgt_dev->dev == acg_dev->dev) &&
845                                     (tgt_dev->lun == acg_dev->lun) &&
846                                     (tgt_dev->acg_dev->rd_only == acg_dev->rd_only)) {
847                                         TRACE_MGMT_DBG("sess %p: tgt_dev %p for "
848                                                 "LUN %lld stays the same",
849                                                 sess, tgt_dev,
850                                                 (unsigned long long)tgt_dev->lun);
851                                         tgt_dev->acg_dev = acg_dev;
852                                         goto next;
853                                 } else if (tgt_dev->lun == acg_dev->lun)
854                                         inq_changed_ua_needed = 1;
855                         }
856                 }
857
858                 luns_changed = true;
859
860                 TRACE_MGMT_DBG("sess %p: Allocing new tgt_dev for LUN %lld",
861                         sess, (unsigned long long)acg_dev->lun);
862
863                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
864                 if (tgt_dev == NULL) {
865                         add_failed = true;
866                         break;
867                 }
868
869                 tgt_dev->inq_changed_ua_needed = inq_changed_ua_needed ||
870                                                  not_needed_freed;
871 next:
872                 continue;
873         }
874
875         something_freed = false;
876         not_needed_freed = true;
877         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
878                 struct scst_tgt_dev *t;
879                 shead = &sess->sess_tgt_dev_list_hash[i];
880
881                 list_for_each_entry_safe(tgt_dev, t, shead,
882                                         sess_tgt_dev_list_entry) {
883                         if (tgt_dev->acg_dev->acg != acg) {
884                                 TRACE_MGMT_DBG("sess %p: Deleting not used "
885                                         "tgt_dev %p for LUN %lld",
886                                         sess, tgt_dev,
887                                         (unsigned long long)tgt_dev->lun);
888                                 luns_changed = true;
889                                 something_freed = true;
890                                 scst_free_tgt_dev(tgt_dev);
891                         }
892                 }
893         }
894
895         if (add_failed && something_freed) {
896                 TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
897                 goto retry_add;
898         }
899
900         sess->acg = acg;
901
902         TRACE_DBG("Moving sess %p from acg %s to acg %s", sess,
903                 old_acg->acg_name, acg->acg_name);
904         list_move_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
905
906         if (luns_changed) {
907                 scst_report_luns_changed_sess(sess);
908
909                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
910                         shead = &sess->sess_tgt_dev_list_hash[i];
911
912                         list_for_each_entry(tgt_dev, shead,
913                                         sess_tgt_dev_list_entry) {
914                                 if (tgt_dev->inq_changed_ua_needed) {
915                                         TRACE_MGMT_DBG("sess %p: Setting "
916                                                 "INQUIRY DATA HAS CHANGED UA "
917                                                 "(tgt_dev %p)", sess, tgt_dev);
918
919                                         tgt_dev->inq_changed_ua_needed = 0;
920
921                                         scst_gen_aen_or_ua(tgt_dev,
922                                                 SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
923                                 }
924                         }
925                 }
926         }
927
928 out:
929         TRACE_EXIT();
930         return;
931 }
932
933 /* The activity supposed to be suspended and scst_mutex held */
934 void scst_check_reassign_sessions(void)
935 {
936         struct scst_tgt_template *tgtt;
937
938         TRACE_ENTRY();
939
940         list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
941                 struct scst_tgt *tgt;
942                 list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
943                         struct scst_session *sess;
944                         list_for_each_entry(sess, &tgt->sess_list,
945                                                 sess_list_entry) {
946                                 scst_check_reassign_sess(sess);
947                         }
948                 }
949         }
950
951         TRACE_EXIT();
952         return;
953 }
954
955 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
956 {
957         int res;
958
959         TRACE_ENTRY();
960
961         switch (cmd->state) {
962         case SCST_CMD_STATE_INIT_WAIT:
963         case SCST_CMD_STATE_INIT:
964         case SCST_CMD_STATE_PRE_PARSE:
965         case SCST_CMD_STATE_DEV_PARSE:
966         case SCST_CMD_STATE_DEV_DONE:
967                 if (cmd->internal)
968                         res = SCST_CMD_STATE_FINISHED_INTERNAL;
969                 else
970                         res = SCST_CMD_STATE_PRE_XMIT_RESP;
971                 break;
972
973         case SCST_CMD_STATE_PRE_DEV_DONE:
974         case SCST_CMD_STATE_MODE_SELECT_CHECKS:
975                 res = SCST_CMD_STATE_DEV_DONE;
976                 break;
977
978         case SCST_CMD_STATE_PRE_XMIT_RESP:
979                 res = SCST_CMD_STATE_XMIT_RESP;
980                 break;
981
982         case SCST_CMD_STATE_PREPROCESS_DONE:
983         case SCST_CMD_STATE_PREPARE_SPACE:
984         case SCST_CMD_STATE_RDY_TO_XFER:
985         case SCST_CMD_STATE_DATA_WAIT:
986         case SCST_CMD_STATE_TGT_PRE_EXEC:
987         case SCST_CMD_STATE_SEND_FOR_EXEC:
988         case SCST_CMD_STATE_LOCAL_EXEC:
989         case SCST_CMD_STATE_REAL_EXEC:
990         case SCST_CMD_STATE_REAL_EXECUTING:
991                 res = SCST_CMD_STATE_PRE_DEV_DONE;
992                 break;
993
994         default:
995                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
996                         cmd->state, cmd, cmd->cdb[0]);
997                 sBUG();
998                 /* Invalid state to supress compiler's warning */
999                 res = SCST_CMD_STATE_LAST_ACTIVE;
1000         }
1001
1002         TRACE_EXIT_RES(res);
1003         return res;
1004 }
1005 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
1006
1007 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
1008 {
1009         TRACE_ENTRY();
1010
1011 #ifdef CONFIG_SCST_EXTRACHECKS
1012         switch (cmd->state) {
1013         case SCST_CMD_STATE_XMIT_RESP:
1014         case SCST_CMD_STATE_FINISHED:
1015         case SCST_CMD_STATE_FINISHED_INTERNAL:
1016         case SCST_CMD_STATE_XMIT_WAIT:
1017                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
1018                         cmd->state, cmd, cmd->cdb[0]);
1019                 sBUG();
1020         }
1021 #endif
1022
1023         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
1024
1025 #ifdef CONFIG_SCST_EXTRACHECKS
1026         if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1027                    (cmd->tgt_dev == NULL) && !cmd->internal) {
1028                 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
1029                         "op %x)", cmd->state, cmd, cmd->cdb[0]);
1030                 sBUG();
1031         }
1032 #endif
1033
1034         TRACE_EXIT();
1035         return;
1036 }
1037 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
1038
1039 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
1040 {
1041         int i, l;
1042
1043         TRACE_ENTRY();
1044
1045         scst_check_restore_sg_buff(cmd);
1046         cmd->resp_data_len = resp_data_len;
1047
1048         if (resp_data_len == cmd->bufflen)
1049                 goto out;
1050
1051         l = 0;
1052         for (i = 0; i < cmd->sg_cnt; i++) {
1053                 l += cmd->sg[i].length;
1054                 if (l >= resp_data_len) {
1055                         int left = resp_data_len - (l - cmd->sg[i].length);
1056 #ifdef CONFIG_SCST_DEBUG
1057                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
1058                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
1059                                 "left %d",
1060                                 cmd, (long long unsigned int)cmd->tag,
1061                                 resp_data_len, i,
1062                                 cmd->sg[i].length, left);
1063 #endif
1064                         cmd->orig_sg_cnt = cmd->sg_cnt;
1065                         cmd->orig_sg_entry = i;
1066                         cmd->orig_entry_len = cmd->sg[i].length;
1067                         cmd->sg_cnt = (left > 0) ? i+1 : i;
1068                         cmd->sg[i].length = left;
1069                         cmd->sg_buff_modified = 1;
1070                         break;
1071                 }
1072         }
1073
1074 out:
1075         TRACE_EXIT();
1076         return;
1077 }
1078 EXPORT_SYMBOL(scst_set_resp_data_len);
1079
1080 /* No locks */
1081 int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
1082 {
1083         struct scst_tgt *tgt = cmd->tgt;
1084         int res = 0;
1085         unsigned long flags;
1086
1087         TRACE_ENTRY();
1088
1089         spin_lock_irqsave(&tgt->tgt_lock, flags);
1090         tgt->retry_cmds++;
1091         /*
1092          * Memory barrier is needed here, because we need the exact order
1093          * between the read and write between retry_cmds and finished_cmds to
1094          * not miss the case when a command finished while we queuing it for
1095          * retry after the finished_cmds check.
1096          */
1097         smp_mb();
1098         TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
1099               tgt->retry_cmds);
1100         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
1101                 /* At least one cmd finished, so try again */
1102                 tgt->retry_cmds--;
1103                 TRACE_RETRY("Some command(s) finished, direct retry "
1104                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
1105                       "retry_cmds=%d)", finished_cmds,
1106                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
1107                 res = -1;
1108                 goto out_unlock_tgt;
1109         }
1110
1111         TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
1112         list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
1113
1114         if (!tgt->retry_timer_active) {
1115                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
1116                 add_timer(&tgt->retry_timer);
1117                 tgt->retry_timer_active = 1;
1118         }
1119
1120 out_unlock_tgt:
1121         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1122
1123         TRACE_EXIT_RES(res);
1124         return res;
1125 }
1126
1127 /* Returns 0 to continue, >0 to restart, <0 to break */
1128 static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
1129         unsigned long cur_time, unsigned long max_time,
1130         struct scst_session *sess, unsigned long *flags,
1131         struct scst_tgt_template *tgtt)
1132 {
1133         int res = -1; /* break */
1134
1135         TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
1136                 "pending time %ld", cmd, cmd->cmd_hw_pending,
1137                 (long)(cur_time - cmd->start_time) / HZ,
1138                 (long)(cur_time - cmd->hw_pending_start) / HZ);
1139
1140         if (time_before_eq(cur_time, cmd->start_time + max_time)) {
1141                 /* Cmds are ordered, so no need to check more */
1142                 goto out;
1143         }
1144
1145         if (!cmd->cmd_hw_pending) {
1146                 res = 0; /* continue */
1147                 goto out;
1148         }
1149
1150         if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
1151                 /* Cmds are ordered, so no need to check more */
1152                 goto out;
1153         }
1154
1155         TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
1156                 cmd, (cur_time - cmd->hw_pending_start) / HZ,
1157                 cmd->state);
1158
1159         cmd->cmd_hw_pending = 0;
1160
1161         spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
1162         tgtt->on_hw_pending_cmd_timeout(cmd);
1163         spin_lock_irqsave(&sess->sess_list_lock, *flags);
1164
1165         res = 1; /* restart */
1166
1167 out:
1168         TRACE_EXIT_RES(res);
1169         return res;
1170 }
1171
1172 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1173 static void scst_hw_pending_work_fn(void *p)
1174 #else
1175 static void scst_hw_pending_work_fn(struct delayed_work *work)
1176 #endif
1177 {
1178 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1179         struct scst_session *sess = (struct scst_session *)p;
1180 #else
1181         struct scst_session *sess = container_of(work, struct scst_session,
1182                                         hw_pending_work);
1183 #endif
1184         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
1185         struct scst_cmd *cmd;
1186         unsigned long cur_time = jiffies;
1187         unsigned long flags;
1188         unsigned long max_time = tgtt->max_hw_pending_time * HZ;
1189
1190         TRACE_ENTRY();
1191
1192         TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
1193
1194         clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1195
1196         spin_lock_irqsave(&sess->sess_list_lock, flags);
1197
1198 restart:
1199         list_for_each_entry(cmd, &sess->search_cmd_list,
1200                                 sess_cmd_list_entry) {
1201                 int rc;
1202
1203                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1204                                         &flags, tgtt);
1205                 if (rc < 0)
1206                         break;
1207                 else if (rc == 0)
1208                         continue;
1209                 else
1210                         goto restart;
1211         }
1212
1213 restart1:
1214         list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
1215                                 sess_cmd_list_entry) {
1216                 int rc;
1217
1218                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1219                                         &flags, tgtt);
1220                 if (rc < 0)
1221                         break;
1222                 else if (rc == 0)
1223                         continue;
1224                 else
1225                         goto restart1;
1226         }
1227
1228         if (!list_empty(&sess->search_cmd_list) ||
1229             !list_empty(&sess->after_pre_xmit_cmd_list)) {
1230                 /*
1231                  * For stuck cmds if there is no activity we might need to have
1232                  * one more run to release them, so reschedule once again.
1233                  */
1234                 TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
1235                         sess, tgtt->max_hw_pending_time);
1236                 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1237                 schedule_delayed_work(&sess->hw_pending_work,
1238                                 tgtt->max_hw_pending_time * HZ);
1239         }
1240
1241         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
1242
1243         TRACE_EXIT();
1244         return;
1245 }
1246
1247 struct scst_tgt *scst_alloc_tgt(struct scst_tgt_template *tgtt)
1248 {
1249         struct scst_tgt *tgt;
1250
1251         TRACE_ENTRY();
1252
1253         tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
1254         if (tgt == NULL) {
1255                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
1256                 goto out;
1257         }
1258
1259         INIT_LIST_HEAD(&tgt->sess_list);
1260         init_waitqueue_head(&tgt->unreg_waitQ);
1261         tgt->tgtt = tgtt;
1262         tgt->sg_tablesize = tgtt->sg_tablesize;
1263         spin_lock_init(&tgt->tgt_lock);
1264         INIT_LIST_HEAD(&tgt->retry_cmd_list);
1265         atomic_set(&tgt->finished_cmds, 0);
1266         init_timer(&tgt->retry_timer);
1267         tgt->retry_timer.data = (unsigned long)tgt;
1268         tgt->retry_timer.function = scst_tgt_retry_timer_fn;
1269
1270 out:
1271         TRACE_EXIT_HRES((unsigned long)tgt);
1272         return tgt;
1273 }
1274
1275 void scst_free_tgt(struct scst_tgt *tgt)
1276 {
1277         TRACE_ENTRY();
1278
1279         kfree(tgt);
1280
1281         TRACE_EXIT();
1282         return;
1283 }
1284
1285 /* Called under scst_mutex and suspended activity */
1286 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
1287 {
1288         struct scst_device *dev;
1289         int res = 0;
1290         static int dev_num; /* protected by scst_mutex */
1291
1292         TRACE_ENTRY();
1293
1294         dev = kzalloc(sizeof(*dev), gfp_mask);
1295         if (dev == NULL) {
1296                 TRACE(TRACE_OUT_OF_MEM, "%s",
1297                         "Allocation of scst_device failed");
1298                 res = -ENOMEM;
1299                 goto out;
1300         }
1301
1302         dev->handler = &scst_null_devtype;
1303         dev->p_cmd_lists = &scst_main_cmd_lists;
1304         atomic_set(&dev->dev_cmd_count, 0);
1305         atomic_set(&dev->write_cmd_count, 0);
1306         scst_init_mem_lim(&dev->dev_mem_lim);
1307         spin_lock_init(&dev->dev_lock);
1308         atomic_set(&dev->on_dev_count, 0);
1309         INIT_LIST_HEAD(&dev->blocked_cmd_list);
1310         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
1311         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
1312         INIT_LIST_HEAD(&dev->threads_list);
1313         init_waitqueue_head(&dev->on_dev_waitQ);
1314         dev->dev_double_ua_possible = 1;
1315         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
1316         dev->dev_num = dev_num++;
1317
1318 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && defined(SCST_IO_CONTEXT)
1319 #if defined(CONFIG_BLOCK)
1320         dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1321         if (dev->dev_io_ctx == NULL) {
1322                 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
1323                 res = -ENOMEM;
1324                 kfree(dev);
1325                 goto out;
1326         }
1327 #endif
1328 #endif
1329
1330         *out_dev = dev;
1331
1332 out:
1333         TRACE_EXIT_RES(res);
1334         return res;
1335 }
1336
1337 /* Called under scst_mutex and suspended activity */
1338 void scst_free_device(struct scst_device *dev)
1339 {
1340         TRACE_ENTRY();
1341
1342 #ifdef CONFIG_SCST_EXTRACHECKS
1343         if (!list_empty(&dev->dev_tgt_dev_list) ||
1344             !list_empty(&dev->dev_acg_dev_list)) {
1345                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
1346                         "is not empty!", __func__);
1347                 sBUG();
1348         }
1349 #endif
1350
1351         kfree(dev->virt_name);
1352         __exit_io_context(dev->dev_io_ctx);
1353
1354         kfree(dev);
1355
1356         TRACE_EXIT();
1357         return;
1358 }
1359
1360 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
1361 {
1362         atomic_set(&mem_lim->alloced_pages, 0);
1363         mem_lim->max_allowed_pages =
1364                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
1365 }
1366 EXPORT_SYMBOL(scst_init_mem_lim);
1367
1368 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
1369                                         struct scst_device *dev, uint64_t lun)
1370 {
1371         struct scst_acg_dev *res;
1372
1373         TRACE_ENTRY();
1374
1375 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1376         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
1377 #else
1378         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
1379 #endif
1380         if (res == NULL) {
1381                 TRACE(TRACE_OUT_OF_MEM,
1382                       "%s", "Allocation of scst_acg_dev failed");
1383                 goto out;
1384         }
1385 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1386         memset(res, 0, sizeof(*res));
1387 #endif
1388
1389         res->dev = dev;
1390         res->acg = acg;
1391         res->lun = lun;
1392
1393 out:
1394         TRACE_EXIT_HRES(res);
1395         return res;
1396 }
1397
1398 void scst_acg_dev_destroy(struct scst_acg_dev *acg_dev)
1399 {
1400         TRACE_ENTRY();
1401
1402         kmem_cache_free(scst_acgd_cachep, acg_dev);
1403
1404         TRACE_EXIT();
1405         return;
1406 }
1407
1408 /* The activity supposed to be suspended and scst_mutex held */
1409 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
1410 {
1411         TRACE_ENTRY();
1412
1413         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
1414                 acg_dev);
1415         list_del(&acg_dev->acg_dev_list_entry);
1416         list_del(&acg_dev->dev_acg_dev_list_entry);
1417
1418         if (acg_dev->acg_dev_kobj_initialized) {
1419                 kobject_del(&acg_dev->acg_dev_kobj);
1420                 kobject_put(&acg_dev->acg_dev_kobj);
1421         } else
1422                 scst_acg_dev_destroy(acg_dev);
1423
1424         TRACE_EXIT();
1425         return;
1426 }
1427
1428 /* The activity supposed to be suspended and scst_mutex held */
1429 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
1430 {
1431         struct scst_acg *acg;
1432
1433         TRACE_ENTRY();
1434
1435         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
1436         if (acg == NULL) {
1437                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
1438                 goto out;
1439         }
1440
1441         INIT_LIST_HEAD(&acg->acg_dev_list);
1442         INIT_LIST_HEAD(&acg->acg_sess_list);
1443         INIT_LIST_HEAD(&acg->acn_list);
1444         acg->acg_name = acg_name;
1445
1446         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
1447         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
1448
1449         scst_check_reassign_sessions();
1450
1451 out:
1452         TRACE_EXIT_HRES(acg);
1453         return acg;
1454 }
1455
1456 /* The activity supposed to be suspended and scst_mutex held */
1457 int scst_destroy_acg(struct scst_acg *acg)
1458 {
1459         struct scst_acn *n, *nn;
1460         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
1461         int res = 0;
1462
1463         TRACE_ENTRY();
1464
1465         if (!list_empty(&acg->acg_sess_list)) {
1466                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
1467                 res = -EBUSY;
1468                 goto out;
1469         }
1470
1471         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
1472         list_del(&acg->scst_acg_list_entry);
1473
1474         /* Freeing acg_devs */
1475         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
1476                         acg_dev_list_entry) {
1477                 struct scst_tgt_dev *tgt_dev, *tt;
1478                 list_for_each_entry_safe(tgt_dev, tt,
1479                                  &acg_dev->dev->dev_tgt_dev_list,
1480                                  dev_tgt_dev_list_entry) {
1481                         if (tgt_dev->acg_dev == acg_dev)
1482                                 scst_free_tgt_dev(tgt_dev);
1483                 }
1484                 scst_free_acg_dev(acg_dev);
1485         }
1486
1487         /* Freeing names */
1488         list_for_each_entry_safe(n, nn, &acg->acn_list,
1489                         acn_list_entry) {
1490                 list_del(&n->acn_list_entry);
1491                 kfree(n->name);
1492                 kfree(n);
1493         }
1494         INIT_LIST_HEAD(&acg->acn_list);
1495
1496         kfree(acg);
1497 out:
1498         TRACE_EXIT_RES(res);
1499         return res;
1500 }
1501
1502 /*
1503  * scst_mutex supposed to be held, there must not be parallel activity in this
1504  * session.
1505  */
1506 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1507         struct scst_acg_dev *acg_dev)
1508 {
1509         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1510         struct scst_tgt_dev *tgt_dev, *t = NULL;
1511         struct scst_device *dev = acg_dev->dev;
1512         struct list_head *sess_tgt_dev_list_head;
1513         struct scst_tgt_template *vtt = sess->tgt->tgtt;
1514         int rc, i, sl;
1515         bool share_io_ctx = false;
1516         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1517
1518         TRACE_ENTRY();
1519
1520 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1521         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1522 #else
1523         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1524 #endif
1525         if (tgt_dev == NULL) {
1526                 TRACE(TRACE_OUT_OF_MEM, "%s",
1527                       "Allocation of scst_tgt_dev failed");
1528                 goto out;
1529         }
1530 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1531         memset(tgt_dev, 0, sizeof(*tgt_dev));
1532 #endif
1533
1534         tgt_dev->dev = dev;
1535         tgt_dev->lun = acg_dev->lun;
1536         tgt_dev->acg_dev = acg_dev;
1537         tgt_dev->sess = sess;
1538         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1539
1540         scst_sgv_pool_use_norm(tgt_dev);
1541
1542         if (dev->scsi_dev != NULL) {
1543                 ini_sg = dev->scsi_dev->host->sg_tablesize;
1544                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1545                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1546                                 ENABLE_CLUSTERING);
1547         } else {
1548                 ini_sg = (1 << 15) /* infinite */;
1549                 ini_unchecked_isa_dma = 0;
1550                 ini_use_clustering = 0;
1551         }
1552         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1553
1554         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1555             !sess->tgt->tgtt->no_clustering)
1556                 scst_sgv_pool_use_norm_clust(tgt_dev);
1557
1558         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1559                 scst_sgv_pool_use_dma(tgt_dev);
1560
1561         TRACE_MGMT_DBG("Device %s on SCST lun=%lld",
1562                dev->virt_name, (long long unsigned int)tgt_dev->lun);
1563
1564         spin_lock_init(&tgt_dev->tgt_dev_lock);
1565         INIT_LIST_HEAD(&tgt_dev->UA_list);
1566         spin_lock_init(&tgt_dev->thr_data_lock);
1567         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1568         spin_lock_init(&tgt_dev->sn_lock);
1569         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1570         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1571         tgt_dev->expected_sn = 1;
1572         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1573         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1574         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1575                 atomic_set(&tgt_dev->sn_slots[i], 0);
1576
1577         if (dev->handler->parse_atomic &&
1578             (sess->tgt->tgtt->preprocessing_done == NULL)) {
1579                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1580                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1581                                 &tgt_dev->tgt_dev_flags);
1582                 if (dev->handler->exec_atomic)
1583                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1584                                 &tgt_dev->tgt_dev_flags);
1585         }
1586         if (dev->handler->exec_atomic) {
1587                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1588                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1589                                 &tgt_dev->tgt_dev_flags);
1590                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1591                                 &tgt_dev->tgt_dev_flags);
1592                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1593                         &tgt_dev->tgt_dev_flags);
1594         }
1595         if (dev->handler->dev_done_atomic &&
1596             sess->tgt->tgtt->xmit_response_atomic) {
1597                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1598                         &tgt_dev->tgt_dev_flags);
1599         }
1600
1601         sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
1602                 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1603         scst_alloc_set_UA(tgt_dev, sense_buffer, sl, 0);
1604
1605         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1606
1607         if (tgt_dev->sess->initiator_name != NULL) {
1608                 spin_lock_bh(&dev->dev_lock);
1609                 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1610                                 dev_tgt_dev_list_entry) {
1611                         TRACE_DBG("t name %s (tgt_dev name %s)",
1612                                 t->sess->initiator_name,
1613                                 tgt_dev->sess->initiator_name);
1614                         if (t->sess->initiator_name == NULL)
1615                                 continue;
1616                         if (strcmp(t->sess->initiator_name,
1617                                         tgt_dev->sess->initiator_name) == 0) {
1618                                 share_io_ctx = true;
1619                                 break;
1620                         }
1621                 }
1622                 spin_unlock_bh(&dev->dev_lock);
1623         }
1624
1625         if (share_io_ctx) {
1626                 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1627                         t->tgt_dev_io_ctx, tgt_dev,
1628                         tgt_dev->sess->initiator_name);
1629                 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1630         } else {
1631 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && defined(SCST_IO_CONTEXT)
1632 #if defined(CONFIG_BLOCK)
1633                 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1634                 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1635                         TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1636                                 "context for dev %s (initiator %s)",
1637                                 dev->virt_name, sess->initiator_name);
1638                         goto out_free;
1639                 }
1640 #endif
1641 #endif
1642         }
1643
1644         if (vtt->threads_num > 0) {
1645                 rc = 0;
1646                 if (dev->handler->threads_num > 0)
1647                         rc = scst_add_dev_threads(dev, vtt->threads_num);
1648                 else if (dev->handler->threads_num == 0)
1649                         rc = scst_add_global_threads(vtt->threads_num);
1650                 if (rc != 0)
1651                         goto out_free;
1652         }
1653
1654         if (dev->handler && dev->handler->attach_tgt) {
1655                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1656                       tgt_dev);
1657                 rc = dev->handler->attach_tgt(tgt_dev);
1658                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1659                 if (rc != 0) {
1660                         PRINT_ERROR("Device handler's %s attach_tgt() "
1661                             "failed: %d", dev->handler->name, rc);
1662                         goto out_thr_free;
1663                 }
1664         }
1665
1666         spin_lock_bh(&dev->dev_lock);
1667         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1668         if (dev->dev_reserved)
1669                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1670         spin_unlock_bh(&dev->dev_lock);
1671
1672         sess_tgt_dev_list_head =
1673                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1674         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1675                       sess_tgt_dev_list_head);
1676
1677 out:
1678         TRACE_EXIT();
1679         return tgt_dev;
1680
1681 out_thr_free:
1682         if (vtt->threads_num > 0) {
1683                 if (dev->handler->threads_num > 0)
1684                         scst_del_dev_threads(dev, vtt->threads_num);
1685                 else if (dev->handler->threads_num == 0)
1686                         scst_del_global_threads(vtt->threads_num);
1687         }
1688
1689 out_free:
1690         scst_free_all_UA(tgt_dev);
1691         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1692
1693         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1694         tgt_dev = NULL;
1695         goto out;
1696 }
1697
1698 /* No locks supposed to be held, scst_mutex - held */
1699 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1700 {
1701         TRACE_ENTRY();
1702
1703         scst_clear_reservation(tgt_dev);
1704
1705         /* With activity suspended the lock isn't needed, but let's be safe */
1706         spin_lock_bh(&tgt_dev->tgt_dev_lock);
1707         scst_free_all_UA(tgt_dev);
1708         memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1709         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1710
1711         if (queue_UA) {
1712                 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1713                 int sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
1714                                 tgt_dev->dev->d_sense,
1715                                 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1716                 scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
1717         }
1718
1719         TRACE_EXIT();
1720         return;
1721 }
1722
1723 /*
1724  * scst_mutex supposed to be held, there must not be parallel activity in this
1725  * session.
1726  */
1727 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1728 {
1729         struct scst_device *dev = tgt_dev->dev;
1730         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1731
1732         TRACE_ENTRY();
1733
1734         tm_dbg_deinit_tgt_dev(tgt_dev);
1735
1736         spin_lock_bh(&dev->dev_lock);
1737         list_del(&tgt_dev->dev_tgt_dev_list_entry);
1738         spin_unlock_bh(&dev->dev_lock);
1739
1740         list_del(&tgt_dev->sess_tgt_dev_list_entry);
1741
1742         scst_clear_reservation(tgt_dev);
1743         scst_free_all_UA(tgt_dev);
1744
1745         if (dev->handler && dev->handler->detach_tgt) {
1746                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1747                       tgt_dev);
1748                 dev->handler->detach_tgt(tgt_dev);
1749                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1750         }
1751
1752         if (vtt->threads_num > 0) {
1753                 if (dev->handler->threads_num > 0)
1754                         scst_del_dev_threads(dev, vtt->threads_num);
1755                 else if (dev->handler->threads_num == 0)
1756                         scst_del_global_threads(vtt->threads_num);
1757         }
1758
1759         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1760
1761         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1762
1763         TRACE_EXIT();
1764         return;
1765 }
1766
1767 /* scst_mutex supposed to be held */
1768 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1769 {
1770         int res = 0;
1771         struct scst_acg_dev *acg_dev;
1772         struct scst_tgt_dev *tgt_dev;
1773
1774         TRACE_ENTRY();
1775
1776         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1777                         acg_dev_list_entry) {
1778                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1779                 if (tgt_dev == NULL) {
1780                         res = -ENOMEM;
1781                         goto out_free;
1782                 }
1783         }
1784
1785 out:
1786         TRACE_EXIT();
1787         return res;
1788
1789 out_free:
1790         scst_sess_free_tgt_devs(sess);
1791         goto out;
1792 }
1793
1794 /*
1795  * scst_mutex supposed to be held, there must not be parallel activity in this
1796  * session.
1797  */
1798 void scst_sess_free_tgt_devs(struct scst_session *sess)
1799 {
1800         int i;
1801         struct scst_tgt_dev *tgt_dev, *t;
1802
1803         TRACE_ENTRY();
1804
1805         /* The session is going down, no users, so no locks */
1806         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1807                 struct list_head *sess_tgt_dev_list_head =
1808                         &sess->sess_tgt_dev_list_hash[i];
1809                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1810                                 sess_tgt_dev_list_entry) {
1811                         scst_free_tgt_dev(tgt_dev);
1812                 }
1813                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1814         }
1815
1816         TRACE_EXIT();
1817         return;
1818 }
1819
1820 /* The activity supposed to be suspended and scst_mutex held */
1821 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1822         uint64_t lun, int read_only, bool gen_scst_report_luns_changed)
1823 {
1824         int res = 0;
1825         struct scst_acg_dev *acg_dev;
1826         struct scst_tgt_dev *tgt_dev;
1827         struct scst_session *sess;
1828         LIST_HEAD(tmp_tgt_dev_list);
1829
1830         TRACE_ENTRY();
1831
1832         INIT_LIST_HEAD(&tmp_tgt_dev_list);
1833
1834         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1835         if (acg_dev == NULL) {
1836                 res = -ENOMEM;
1837                 goto out;
1838         }
1839         acg_dev->rd_only = read_only;
1840
1841         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1842                 acg_dev);
1843         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1844         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1845
1846         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1847                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1848                 if (tgt_dev == NULL) {
1849                         res = -ENOMEM;
1850                         goto out_free;
1851                 }
1852                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1853                               &tmp_tgt_dev_list);
1854         }
1855
1856         if (gen_scst_report_luns_changed)
1857                 scst_report_luns_changed(acg);
1858
1859         PRINT_INFO("Added device %s to group %s (LUN %lld, "
1860                 "rd_only %d)", dev->virt_name, acg->acg_name,
1861                 (long long unsigned int)lun, read_only);
1862
1863 out:
1864         TRACE_EXIT_RES(res);
1865         return res;
1866
1867 out_free:
1868         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1869                          extra_tgt_dev_list_entry) {
1870                 scst_free_tgt_dev(tgt_dev);
1871         }
1872         scst_free_acg_dev(acg_dev);
1873         goto out;
1874 }
1875
1876 /* The activity supposed to be suspended and scst_mutex held */
1877 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev,
1878         bool gen_scst_report_luns_changed)
1879 {
1880         int res = 0;
1881         struct scst_acg_dev *acg_dev = NULL, *a;
1882         struct scst_tgt_dev *tgt_dev, *tt;
1883
1884         TRACE_ENTRY();
1885
1886         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1887                 if (a->dev == dev) {
1888                         acg_dev = a;
1889                         break;
1890                 }
1891         }
1892
1893         if (acg_dev == NULL) {
1894                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1895                 res = -EINVAL;
1896                 goto out;
1897         }
1898
1899         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1900                          dev_tgt_dev_list_entry) {
1901                 if (tgt_dev->acg_dev == acg_dev)
1902                         scst_free_tgt_dev(tgt_dev);
1903         }
1904         scst_free_acg_dev(acg_dev);
1905
1906         if (gen_scst_report_luns_changed)
1907                 scst_report_luns_changed(acg);
1908
1909         PRINT_INFO("Removed device %s from group %s", dev->virt_name,
1910                 acg->acg_name);
1911
1912 out:
1913         TRACE_EXIT_RES(res);
1914         return res;
1915 }
1916
1917 /* The activity supposed to be suspended and scst_mutex held */
1918 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1919 {
1920         int res = 0;
1921         struct scst_acn *n;
1922         int len;
1923         char *nm;
1924
1925         TRACE_ENTRY();
1926
1927         list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1928                 if (strcmp(n->name, name) == 0) {
1929                         PRINT_ERROR("Name %s already exists in group %s",
1930                                 name, acg->acg_name);
1931                         res = -EINVAL;
1932                         goto out;
1933                 }
1934         }
1935
1936         n = kmalloc(sizeof(*n), GFP_KERNEL);
1937         if (n == NULL) {
1938                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1939                 res = -ENOMEM;
1940                 goto out;
1941         }
1942
1943         len = strlen(name);
1944         nm = kmalloc(len + 1, GFP_KERNEL);
1945         if (nm == NULL) {
1946                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1947                 res = -ENOMEM;
1948                 goto out_free;
1949         }
1950
1951         strcpy(nm, name);
1952         n->name = nm;
1953
1954         list_add_tail(&n->acn_list_entry, &acg->acn_list);
1955
1956 out:
1957         if (res == 0) {
1958                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1959                 scst_check_reassign_sessions();
1960         }
1961
1962         TRACE_EXIT_RES(res);
1963         return res;
1964
1965 out_free:
1966         kfree(n);
1967         goto out;
1968 }
1969
1970 /* scst_mutex supposed to be held */
1971 void __scst_acg_remove_acn(struct scst_acn *n)
1972 {
1973         TRACE_ENTRY();
1974
1975         list_del(&n->acn_list_entry);
1976         kfree(n->name);
1977         kfree(n);
1978
1979         TRACE_EXIT();
1980         return;
1981 }
1982
1983 /* The activity supposed to be suspended and scst_mutex held */
1984 int scst_acg_remove_name(struct scst_acg *acg, const char *name, bool reassign)
1985 {
1986         int res = -EINVAL;
1987         struct scst_acn *n;
1988
1989         TRACE_ENTRY();
1990
1991         list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1992                 if (strcmp(n->name, name) == 0) {
1993                         __scst_acg_remove_acn(n);
1994                         res = 0;
1995                         break;
1996                 }
1997         }
1998
1999         if (res == 0) {
2000                 PRINT_INFO("Removed name %s from group %s", name,
2001                         acg->acg_name);
2002                 if (reassign)
2003                         scst_check_reassign_sessions();
2004         } else
2005                 PRINT_ERROR("Unable to find name %s in group %s", name,
2006                         acg->acg_name);
2007
2008         TRACE_EXIT_RES(res);
2009         return res;
2010 }
2011
2012 static struct scst_cmd *scst_create_prepare_internal_cmd(
2013         struct scst_cmd *orig_cmd, int bufsize)
2014 {
2015         struct scst_cmd *res;
2016         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
2017
2018         TRACE_ENTRY();
2019
2020         res = scst_alloc_cmd(gfp_mask);
2021         if (res == NULL)
2022                 goto out;
2023
2024         res->cmd_lists = orig_cmd->cmd_lists;
2025         res->sess = orig_cmd->sess;
2026         res->atomic = scst_cmd_atomic(orig_cmd);
2027         res->internal = 1;
2028         res->tgtt = orig_cmd->tgtt;
2029         res->tgt = orig_cmd->tgt;
2030         res->dev = orig_cmd->dev;
2031         res->tgt_dev = orig_cmd->tgt_dev;
2032         res->lun = orig_cmd->lun;
2033         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2034         res->data_direction = SCST_DATA_UNKNOWN;
2035         res->orig_cmd = orig_cmd;
2036         res->bufflen = bufsize;
2037
2038         scst_sess_get(res->sess);
2039         if (res->tgt_dev != NULL)
2040                 __scst_get(0);
2041
2042         res->state = SCST_CMD_STATE_PRE_PARSE;
2043
2044 out:
2045         TRACE_EXIT_HRES((unsigned long)res);
2046         return res;
2047 }
2048
2049 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
2050 {
2051         int res = 0;
2052         static const uint8_t request_sense[6] =
2053             { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
2054         struct scst_cmd *rs_cmd;
2055
2056         TRACE_ENTRY();
2057
2058         if (orig_cmd->sense != NULL) {
2059                 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
2060                         orig_cmd->sense, orig_cmd);
2061                 mempool_free(orig_cmd->sense, scst_sense_mempool);
2062                 orig_cmd->sense = NULL;
2063         }
2064
2065         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
2066                         SCST_SENSE_BUFFERSIZE);
2067         if (rs_cmd == NULL)
2068                 goto out_error;
2069
2070         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
2071         rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
2072         rs_cmd->cdb_len = sizeof(request_sense);
2073         rs_cmd->data_direction = SCST_DATA_READ;
2074         rs_cmd->expected_data_direction = rs_cmd->data_direction;
2075         rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
2076         rs_cmd->expected_values_set = 1;
2077
2078         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
2079                 "cmd list", rs_cmd);
2080         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2081         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
2082         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
2083         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2084
2085 out:
2086         TRACE_EXIT_RES(res);
2087         return res;
2088
2089 out_error:
2090         res = -1;
2091         goto out;
2092 }
2093
2094 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
2095 {
2096         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
2097         uint8_t *buf;
2098         int len;
2099
2100         TRACE_ENTRY();
2101
2102         sBUG_ON(orig_cmd == NULL);
2103
2104         len = scst_get_buf_first(req_cmd, &buf);
2105
2106         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
2107             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
2108                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
2109                         buf, len);
2110                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
2111                         len);
2112         } else {
2113                 PRINT_ERROR("%s", "Unable to get the sense via "
2114                         "REQUEST SENSE, returning HARDWARE ERROR");
2115                 scst_set_cmd_error(orig_cmd,
2116                         SCST_LOAD_SENSE(scst_sense_hardw_error));
2117         }
2118
2119         if (len > 0)
2120                 scst_put_buf(req_cmd, buf);
2121
2122         TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
2123                 "cmd list", orig_cmd);
2124         spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2125         list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
2126         wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
2127         spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2128
2129         TRACE_EXIT();
2130         return;
2131 }
2132
2133 int scst_finish_internal_cmd(struct scst_cmd *cmd)
2134 {
2135         int res;
2136
2137         TRACE_ENTRY();
2138
2139         sBUG_ON(!cmd->internal);
2140
2141         if (cmd->cdb[0] == REQUEST_SENSE)
2142                 scst_complete_request_sense(cmd);
2143
2144         __scst_cmd_put(cmd);
2145
2146         res = SCST_CMD_STATE_RES_CONT_NEXT;
2147
2148         TRACE_EXIT_HRES(res);
2149         return res;
2150 }
2151
2152 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2153 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
2154 {
2155         struct scsi_request *req;
2156
2157         TRACE_ENTRY();
2158
2159         if (scsi_cmd) {
2160                 req = scsi_cmd->sc_request;
2161                 if (req) {
2162                         if (req->sr_bufflen)
2163                                 kfree(req->sr_buffer);
2164                         scsi_release_request(req);
2165                 }
2166         }
2167
2168         TRACE_EXIT();
2169         return;
2170 }
2171
2172 static void scst_send_release(struct scst_device *dev)
2173 {
2174         struct scsi_request *req;
2175         struct scsi_device *scsi_dev;
2176         uint8_t cdb[6];
2177
2178         TRACE_ENTRY();
2179
2180         if (dev->scsi_dev == NULL)
2181                 goto out;
2182
2183         scsi_dev = dev->scsi_dev;
2184
2185         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
2186         if (req == NULL) {
2187                 PRINT_ERROR("Allocation of scsi_request failed: unable "
2188                             "to RELEASE device %s", dev->virt_name);
2189                 goto out;
2190         }
2191
2192         memset(cdb, 0, sizeof(cdb));
2193         cdb[0] = RELEASE;
2194         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2195             ((scsi_dev->lun << 5) & 0xe0) : 0;
2196         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
2197         req->sr_cmd_len = sizeof(cdb);
2198         req->sr_data_direction = SCST_DATA_NONE;
2199         req->sr_use_sg = 0;
2200         req->sr_bufflen = 0;
2201         req->sr_buffer = NULL;
2202         req->sr_request->rq_disk = dev->rq_disk;
2203         req->sr_sense_buffer[0] = 0;
2204
2205         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
2206                 "mid-level", req);
2207         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
2208                     scst_req_done, 15, 3);
2209
2210 out:
2211         TRACE_EXIT();
2212         return;
2213 }
2214 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2215 static void scst_send_release(struct scst_device *dev)
2216 {
2217         struct scsi_device *scsi_dev;
2218         unsigned char cdb[6];
2219         uint8_t sense[SCSI_SENSE_BUFFERSIZE];
2220         int rc, i;
2221
2222         TRACE_ENTRY();
2223
2224         if (dev->scsi_dev == NULL)
2225                 goto out;
2226
2227         scsi_dev = dev->scsi_dev;
2228
2229         for (i = 0; i < 5; i++) {
2230                 memset(cdb, 0, sizeof(cdb));
2231                 cdb[0] = RELEASE;
2232                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2233                     ((scsi_dev->lun << 5) & 0xe0) : 0;
2234
2235                 memset(sense, 0, sizeof(sense));
2236
2237                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
2238                         "SCSI mid-level");
2239                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
2240                                 sense, 15, 0, 0
2241 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
2242                                 , NULL
2243 #endif
2244                                 );
2245                 TRACE_DBG("MODE_SENSE done: %x", rc);
2246
2247                 if (scsi_status_is_good(rc)) {
2248                         break;
2249                 } else {
2250                         PRINT_ERROR("RELEASE failed: %d", rc);
2251                         PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
2252                         scst_check_internal_sense(dev, rc, sense,
2253                                 sizeof(sense));
2254                 }
2255         }
2256
2257 out:
2258         TRACE_EXIT();
2259         return;
2260 }
2261 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2262
2263 /* scst_mutex supposed to be held */
2264 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
2265 {
2266         struct scst_device *dev = tgt_dev->dev;
2267         int release = 0;
2268
2269         TRACE_ENTRY();
2270
2271         spin_lock_bh(&dev->dev_lock);
2272         if (dev->dev_reserved &&
2273             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
2274                 /* This is one who holds the reservation */
2275                 struct scst_tgt_dev *tgt_dev_tmp;
2276                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
2277                                     dev_tgt_dev_list_entry) {
2278                         clear_bit(SCST_TGT_DEV_RESERVED,
2279                                     &tgt_dev_tmp->tgt_dev_flags);
2280                 }
2281                 dev->dev_reserved = 0;
2282                 release = 1;
2283         }
2284         spin_unlock_bh(&dev->dev_lock);
2285
2286         if (release)
2287                 scst_send_release(dev);
2288
2289         TRACE_EXIT();
2290         return;
2291 }
2292
2293 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
2294         const char *initiator_name)
2295 {
2296         struct scst_session *sess;
2297         int i;
2298         int len;
2299         char *nm;
2300
2301         TRACE_ENTRY();
2302
2303 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2304         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
2305 #else
2306         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
2307 #endif
2308         if (sess == NULL) {
2309                 TRACE(TRACE_OUT_OF_MEM, "%s",
2310                       "Allocation of scst_session failed");
2311                 goto out;
2312         }
2313 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2314         memset(sess, 0, sizeof(*sess));
2315 #endif
2316
2317         sess->init_phase = SCST_SESS_IPH_INITING;
2318         sess->shut_phase = SCST_SESS_SPH_READY;
2319         atomic_set(&sess->refcnt, 0);
2320         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
2321                 struct list_head *sess_tgt_dev_list_head =
2322                          &sess->sess_tgt_dev_list_hash[i];
2323                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
2324         }
2325         spin_lock_init(&sess->sess_list_lock);
2326         INIT_LIST_HEAD(&sess->search_cmd_list);
2327         INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
2328         sess->tgt = tgt;
2329         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
2330         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
2331 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
2332         INIT_DELAYED_WORK(&sess->hw_pending_work,
2333                 (void (*)(struct work_struct *))scst_hw_pending_work_fn);
2334 #else
2335         INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
2336 #endif
2337
2338 #ifdef CONFIG_SCST_MEASURE_LATENCY
2339         spin_lock_init(&sess->lat_lock);
2340 #endif
2341
2342         len = strlen(initiator_name);
2343         nm = kmalloc(len + 1, gfp_mask);
2344         if (nm == NULL) {
2345                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
2346                 goto out_free;
2347         }
2348
2349         strcpy(nm, initiator_name);
2350         sess->initiator_name = nm;
2351
2352 out:
2353         TRACE_EXIT();
2354         return sess;
2355
2356 out_free:
2357         kmem_cache_free(scst_sess_cachep, sess);
2358         sess = NULL;
2359         goto out;
2360 }
2361
2362 void scst_free_session(struct scst_session *sess)
2363 {
2364         TRACE_ENTRY();
2365
2366         mutex_lock(&scst_mutex);
2367
2368         TRACE_DBG("Removing sess %p from the list", sess);
2369         list_del(&sess->sess_list_entry);
2370         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
2371         list_del(&sess->acg_sess_list_entry);
2372
2373         scst_sess_free_tgt_devs(sess);
2374
2375         /* Called under lock to protect from too early tgt release */
2376         wake_up_all(&sess->tgt->unreg_waitQ);
2377
2378         mutex_unlock(&scst_mutex);
2379
2380         scst_sess_sysfs_put(sess);
2381
2382         TRACE_EXIT();
2383         return;
2384 }
2385
2386 void scst_release_session(struct scst_session *sess)
2387 {
2388         TRACE_ENTRY();
2389
2390         kfree(sess->initiator_name);
2391         kmem_cache_free(scst_sess_cachep, sess);
2392
2393         TRACE_EXIT();
2394         return;
2395 }
2396
2397 void scst_free_session_callback(struct scst_session *sess)
2398 {
2399         struct completion *c;
2400
2401         TRACE_ENTRY();
2402
2403         TRACE_DBG("Freeing session %p", sess);
2404
2405         cancel_delayed_work_sync(&sess->hw_pending_work);
2406
2407         c = sess->shutdown_compl;
2408
2409         if (sess->unreg_done_fn) {
2410                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
2411                 sess->unreg_done_fn(sess);
2412                 TRACE_DBG("%s", "unreg_done_fn() returned");
2413         }
2414         scst_free_session(sess);
2415
2416         if (c)
2417                 complete_all(c);
2418
2419         TRACE_EXIT();
2420         return;
2421 }
2422
2423 void scst_sched_session_free(struct scst_session *sess)
2424 {
2425         unsigned long flags;
2426
2427         TRACE_ENTRY();
2428
2429         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
2430                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
2431                         "shut phase %lx", sess, sess->shut_phase);
2432                 sBUG();
2433         }
2434
2435         spin_lock_irqsave(&scst_mgmt_lock, flags);
2436         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
2437         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
2438         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
2439
2440         wake_up(&scst_mgmt_waitQ);
2441
2442         TRACE_EXIT();
2443         return;
2444 }
2445
2446 void scst_cmd_get(struct scst_cmd *cmd)
2447 {
2448         __scst_cmd_get(cmd);
2449 }
2450 EXPORT_SYMBOL(scst_cmd_get);
2451
2452 void scst_cmd_put(struct scst_cmd *cmd)
2453 {
2454         __scst_cmd_put(cmd);
2455 }
2456 EXPORT_SYMBOL(scst_cmd_put);
2457
2458 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
2459 {
2460         struct scst_cmd *cmd;
2461
2462         TRACE_ENTRY();
2463
2464 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2465         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
2466 #else
2467         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
2468 #endif
2469         if (cmd == NULL) {
2470                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
2471                 goto out;
2472         }
2473 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2474         memset(cmd, 0, sizeof(*cmd));
2475 #endif
2476
2477         cmd->state = SCST_CMD_STATE_INIT_WAIT;
2478         cmd->start_time = jiffies;
2479         atomic_set(&cmd->cmd_ref, 1);
2480         cmd->cmd_lists = &scst_main_cmd_lists;
2481         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
2482         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2483         cmd->timeout = SCST_DEFAULT_TIMEOUT;
2484         cmd->retries = 0;
2485         cmd->data_len = -1;
2486         cmd->is_send_status = 1;
2487         cmd->resp_data_len = -1;
2488
2489         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2490         cmd->dbl_ua_orig_resp_data_len = -1;
2491
2492 out:
2493         TRACE_EXIT();
2494         return cmd;
2495 }
2496
2497 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2498 {
2499         scst_sess_put(cmd->sess);
2500
2501         /*
2502          * At this point tgt_dev can be dead, but the pointer remains non-NULL
2503          */
2504         if (likely(cmd->tgt_dev != NULL))
2505                 __scst_put();
2506
2507         scst_destroy_cmd(cmd);
2508         return;
2509 }
2510
2511 /* No locks supposed to be held */
2512 void scst_free_cmd(struct scst_cmd *cmd)
2513 {
2514         int destroy = 1;
2515
2516         TRACE_ENTRY();
2517
2518         TRACE_DBG("Freeing cmd %p (tag %llu)",
2519                   cmd, (long long unsigned int)cmd->tag);
2520
2521         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2522                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2523                         cmd, atomic_read(&scst_cmd_count));
2524         }
2525
2526         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2527                 cmd->dec_on_dev_needed);
2528
2529 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2530 #if defined(CONFIG_SCST_EXTRACHECKS)
2531         if (cmd->scsi_req) {
2532                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2533                         "scsi_req!");
2534                 scst_release_request(cmd);
2535         }
2536 #endif
2537 #endif
2538
2539         /*
2540          * Target driver can already free sg buffer before calling
2541          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2542          */
2543         if (!cmd->tgt_data_buf_alloced)
2544                 scst_check_restore_sg_buff(cmd);
2545
2546         if ((cmd->tgtt->on_free_cmd != NULL) && likely(!cmd->internal)) {
2547                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2548                 scst_set_cur_start(cmd);
2549                 cmd->tgtt->on_free_cmd(cmd);
2550                 scst_set_tgt_on_free_time(cmd);
2551                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2552         }
2553
2554         if (likely(cmd->dev != NULL)) {
2555                 struct scst_dev_type *handler = cmd->dev->handler;
2556                 if (handler->on_free_cmd != NULL) {
2557                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2558                                 handler->name, cmd);
2559                         scst_set_cur_start(cmd);
2560                         handler->on_free_cmd(cmd);
2561                         scst_set_dev_on_free_time(cmd);
2562                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
2563                                 handler->name);
2564                 }
2565         }
2566
2567         scst_release_space(cmd);
2568
2569         if (unlikely(cmd->sense != NULL)) {
2570                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2571                 mempool_free(cmd->sense, scst_sense_mempool);
2572                 cmd->sense = NULL;
2573         }
2574
2575         if (likely(cmd->tgt_dev != NULL)) {
2576 #ifdef CONFIG_SCST_EXTRACHECKS
2577                 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2578                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
2579                             "%d, target %s, LUN %lld, sn %ld, expected_sn %ld)",
2580                             cmd, cmd->cdb[0], cmd->tgtt->name,
2581                             (long long unsigned int)cmd->lun,
2582                             cmd->sn, cmd->tgt_dev->expected_sn);
2583                         scst_unblock_deferred(cmd->tgt_dev, cmd);
2584                 }
2585 #endif
2586
2587                 if (unlikely(cmd->out_of_sn)) {
2588                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2589                                 "destroy=%d", cmd,
2590                                 (long long unsigned int)cmd->tag,
2591                                 cmd->sn, destroy);
2592                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2593                                         &cmd->cmd_flags);
2594                 }
2595         }
2596
2597         if (likely(destroy))
2598                 scst_destroy_put_cmd(cmd);
2599
2600         TRACE_EXIT();
2601         return;
2602 }
2603
2604 /* No locks supposed to be held. */
2605 void scst_check_retries(struct scst_tgt *tgt)
2606 {
2607         int need_wake_up = 0;
2608
2609         TRACE_ENTRY();
2610
2611         /*
2612          * We don't worry about overflow of finished_cmds, because we check
2613          * only for its change.
2614          */
2615         atomic_inc(&tgt->finished_cmds);
2616         /* See comment in scst_queue_retry_cmd() */
2617         smp_mb__after_atomic_inc();
2618         if (unlikely(tgt->retry_cmds > 0)) {
2619                 struct scst_cmd *c, *tc;
2620                 unsigned long flags;
2621
2622                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2623                       tgt->retry_cmds);
2624
2625                 spin_lock_irqsave(&tgt->tgt_lock, flags);
2626                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2627                                 cmd_list_entry) {
2628                         tgt->retry_cmds--;
2629
2630                         TRACE_RETRY("Moving retry cmd %p to head of active "
2631                                 "cmd list (retry_cmds left %d)",
2632                                 c, tgt->retry_cmds);
2633                         spin_lock(&c->cmd_lists->cmd_list_lock);
2634                         list_move(&c->cmd_list_entry,
2635                                   &c->cmd_lists->active_cmd_list);
2636                         wake_up(&c->cmd_lists->cmd_list_waitQ);
2637                         spin_unlock(&c->cmd_lists->cmd_list_lock);
2638
2639                         need_wake_up++;
2640                         if (need_wake_up >= 2) /* "slow start" */
2641                                 break;
2642                 }
2643                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2644         }
2645
2646         TRACE_EXIT();
2647         return;
2648 }
2649
2650 static void scst_tgt_retry_timer_fn(unsigned long arg)
2651 {
2652         struct scst_tgt *tgt = (struct scst_tgt *)arg;
2653         unsigned long flags;
2654
2655         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2656
2657         spin_lock_irqsave(&tgt->tgt_lock, flags);
2658         tgt->retry_timer_active = 0;
2659         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2660
2661         scst_check_retries(tgt);
2662
2663         TRACE_EXIT();
2664         return;
2665 }
2666
2667 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2668 {
2669         struct scst_mgmt_cmd *mcmd;
2670
2671         TRACE_ENTRY();
2672
2673         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2674         if (mcmd == NULL) {
2675                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2676                         "failed, some commands and their data could leak");
2677                 goto out;
2678         }
2679         memset(mcmd, 0, sizeof(*mcmd));
2680
2681 out:
2682         TRACE_EXIT();
2683         return mcmd;
2684 }
2685
2686 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2687 {
2688         unsigned long flags;
2689
2690         TRACE_ENTRY();
2691
2692         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2693         atomic_dec(&mcmd->sess->sess_cmd_count);
2694         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2695
2696         scst_sess_put(mcmd->sess);
2697
2698         if (mcmd->mcmd_tgt_dev != NULL)
2699                 __scst_put();
2700
2701         mempool_free(mcmd, scst_mgmt_mempool);
2702
2703         TRACE_EXIT();
2704         return;
2705 }
2706
2707 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2708 int scst_alloc_request(struct scst_cmd *cmd)
2709 {
2710         int res = 0;
2711         struct scsi_request *req;
2712         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2713
2714         TRACE_ENTRY();
2715
2716         /* cmd->dev->scsi_dev must be non-NULL here */
2717         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2718         if (req == NULL) {
2719                 TRACE(TRACE_OUT_OF_MEM, "%s",
2720                       "Allocation of scsi_request failed");
2721                 res = -ENOMEM;
2722                 goto out;
2723         }
2724
2725         cmd->scsi_req = req;
2726
2727         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2728         req->sr_cmd_len = cmd->cdb_len;
2729         req->sr_data_direction = cmd->data_direction;
2730         req->sr_use_sg = cmd->sg_cnt;
2731         req->sr_bufflen = cmd->bufflen;
2732         req->sr_buffer = cmd->sg;
2733         req->sr_request->rq_disk = cmd->dev->rq_disk;
2734         req->sr_sense_buffer[0] = 0;
2735
2736         cmd->scsi_req->upper_private_data = cmd;
2737
2738 out:
2739         TRACE_EXIT();
2740         return res;
2741 }
2742
2743 void scst_release_request(struct scst_cmd *cmd)
2744 {
2745         scsi_release_request(cmd->scsi_req);
2746         cmd->scsi_req = NULL;
2747 }
2748 #endif
2749
2750 static bool is_report_sg_limitation(void)
2751 {
2752 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2753         return (trace_flag & TRACE_OUT_OF_MEM) != 0;
2754 #else
2755         return false;
2756 #endif
2757 }
2758
2759 int scst_alloc_space(struct scst_cmd *cmd)
2760 {
2761         gfp_t gfp_mask;
2762         int res = -ENOMEM;
2763         int atomic = scst_cmd_atomic(cmd);
2764         int flags;
2765         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2766         static int ll;
2767
2768         TRACE_ENTRY();
2769
2770         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2771
2772         flags = atomic ? SGV_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2773         if (cmd->no_sgv)
2774                 flags |= SGV_POOL_ALLOC_NO_CACHED;
2775
2776         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2777                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2778         if (cmd->sg == NULL)
2779                 goto out;
2780
2781         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2782                 if ((ll < 10) || is_report_sg_limitation()) {
2783                         PRINT_INFO("Unable to complete command due to "
2784                                 "SG IO count limitation (requested %d, "
2785                                 "available %d, tgt lim %d)", cmd->sg_cnt,
2786                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2787                         ll++;
2788                 }
2789                 goto out_sg_free;
2790         }
2791
2792         if (cmd->data_direction != SCST_DATA_BIDI)
2793                 goto success;
2794
2795         cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2796                          flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2797                          &cmd->dev->dev_mem_lim, NULL);
2798         if (cmd->in_sg == NULL)
2799                 goto out_sg_free;
2800
2801         if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2802                 if ((ll < 10)  || is_report_sg_limitation()) {
2803                         PRINT_INFO("Unable to complete command due to "
2804                                 "SG IO count limitation (IN buffer, requested "
2805                                 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2806                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2807                         ll++;
2808                 }
2809                 goto out_in_sg_free;
2810         }
2811
2812 success:
2813         res = 0;
2814
2815 out:
2816         TRACE_EXIT();
2817         return res;
2818
2819 out_in_sg_free:
2820         sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2821         cmd->in_sgv = NULL;
2822         cmd->in_sg = NULL;
2823         cmd->in_sg_cnt = 0;
2824
2825 out_sg_free:
2826         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2827         cmd->sgv = NULL;
2828         cmd->sg = NULL;
2829         cmd->sg_cnt = 0;
2830         goto out;
2831 }
2832
2833 static void scst_release_space(struct scst_cmd *cmd)
2834 {
2835         TRACE_ENTRY();
2836
2837         if (cmd->sgv == NULL)
2838                 goto out;
2839
2840         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2841                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2842                 goto out;
2843         }
2844
2845         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2846         cmd->sgv = NULL;
2847         cmd->sg_cnt = 0;
2848         cmd->sg = NULL;
2849         cmd->bufflen = 0;
2850         cmd->data_len = 0;
2851
2852         if (cmd->in_sgv != NULL) {
2853                 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2854                 cmd->in_sgv = NULL;
2855                 cmd->in_sg_cnt = 0;
2856                 cmd->in_sg = NULL;
2857                 cmd->in_bufflen = 0;
2858         }
2859
2860 out:
2861         TRACE_EXIT();
2862         return;
2863 }
2864
2865 #if !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED))
2866
2867 /*
2868  * Can switch to the next dst_sg element, so, to copy to strictly only
2869  * one dst_sg element, it must be either last in the chain, or
2870  * copy_len == dst_sg->length.
2871  */
2872 static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
2873                         size_t *pdst_offs, struct scatterlist *src_sg,
2874                         size_t copy_len,
2875                         enum km_type d_km_type, enum km_type s_km_type)
2876 {
2877         int res = 0;
2878         struct scatterlist *dst_sg;
2879         size_t src_len, dst_len, src_offs, dst_offs;
2880         struct page *src_page, *dst_page;
2881
2882         dst_sg = *pdst_sg;
2883         dst_len = *pdst_len;
2884         dst_offs = *pdst_offs;
2885         dst_page = sg_page(dst_sg);
2886
2887         src_page = sg_page(src_sg);
2888         src_len = src_sg->length;
2889         src_offs = src_sg->offset;
2890
2891         do {
2892                 void *saddr, *daddr;
2893                 size_t n;
2894
2895                 saddr = kmap_atomic(src_page +
2896                                          (src_offs >> PAGE_SHIFT), s_km_type) +
2897                                     (src_offs & ~PAGE_MASK);
2898                 daddr = kmap_atomic(dst_page +
2899                                         (dst_offs >> PAGE_SHIFT), d_km_type) +
2900                                     (dst_offs & ~PAGE_MASK);
2901
2902                 if (((src_offs & ~PAGE_MASK) == 0) &&
2903                     ((dst_offs & ~PAGE_MASK) == 0) &&
2904                     (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
2905                     (copy_len >= PAGE_SIZE)) {
2906                         copy_page(daddr, saddr);
2907                         n = PAGE_SIZE;
2908                 } else {
2909                         n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
2910                                           PAGE_SIZE - (src_offs & ~PAGE_MASK));
2911                         n = min(n, src_len);
2912                         n = min(n, dst_len);
2913                         n = min_t(size_t, n, copy_len);
2914                         memcpy(daddr, saddr, n);
2915                 }
2916                 dst_offs += n;
2917                 src_offs += n;
2918
2919                 kunmap_atomic(saddr, s_km_type);
2920                 kunmap_atomic(daddr, d_km_type);
2921
2922                 res += n;
2923                 copy_len -= n;
2924                 if (copy_len == 0)
2925                         goto out;
2926
2927                 src_len -= n;
2928                 dst_len -= n;
2929                 if (dst_len == 0) {
2930                         dst_sg = sg_next(dst_sg);
2931                         if (dst_sg == NULL)
2932                                 goto out;
2933                         dst_page = sg_page(dst_sg);
2934                         dst_len = dst_sg->length;
2935                         dst_offs = dst_sg->offset;
2936                 }
2937         } while (src_len > 0);
2938
2939 out:
2940         *pdst_sg = dst_sg;
2941         *pdst_len = dst_len;
2942         *pdst_offs = dst_offs;
2943         return res;
2944 }
2945
2946 /**
2947  * sg_copy - copy one SG vector to another
2948  * @dst_sg:     destination SG
2949  * @src_sg:     source SG
2950  * @nents_to_copy: maximum number of entries to copy
2951  * @copy_len:   maximum amount of data to copy. If 0, then copy all.
2952  * @d_km_type:  kmap_atomic type for the destination SG
2953  * @s_km_type:  kmap_atomic type for the source SG
2954  *
2955  * Description:
2956  *    Data from the source SG vector will be copied to the destination SG
2957  *    vector. End of the vectors will be determined by sg_next() returning
2958  *    NULL. Returns number of bytes copied.
2959  */
2960 static int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
2961             int nents_to_copy, size_t copy_len,
2962             enum km_type d_km_type, enum km_type s_km_type)
2963 {
2964         int res = 0;
2965         size_t dst_len, dst_offs;
2966
2967         if (copy_len == 0)
2968                 copy_len = 0x7FFFFFFF; /* copy all */
2969
2970         if (nents_to_copy == 0)
2971                 nents_to_copy = 0x7FFFFFFF; /* copy all */
2972
2973         dst_len = dst_sg->length;
2974         dst_offs = dst_sg->offset;
2975
2976         do {
2977                 int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
2978                                 src_sg, copy_len, d_km_type, s_km_type);
2979                 copy_len -= copied;
2980                 res += copied;
2981                 if ((copy_len == 0) || (dst_sg == NULL))
2982                         goto out;
2983
2984                 nents_to_copy--;
2985                 if (nents_to_copy == 0)
2986                         goto out;
2987
2988                 src_sg = sg_next(src_sg);
2989         } while (src_sg != NULL);
2990
2991 out:
2992         return res;
2993 }
2994
2995 #endif /* !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
2996
2997 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2998 static void scsi_end_async(struct request *req, int error)
2999 {
3000         struct scsi_io_context *sioc = req->end_io_data;
3001
3002         TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
3003
3004         if (sioc->done)
3005 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
3006                 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
3007 #else
3008                 sioc->done(sioc->data, sioc->sense, req->errors, req->resid_len);
3009 #endif
3010
3011         if (!sioc->full_cdb_used)
3012                 kmem_cache_free(scsi_io_context_cache, sioc);
3013         else
3014                 kfree(sioc);
3015
3016         __blk_put_request(req->q, req);
3017         return;
3018 }
3019
3020 /**
3021  * scst_scsi_exec_async - executes a SCSI command in pass-through mode
3022  * @cmd:        scst command
3023  * @done:       callback function when done
3024  */
3025 int scst_scsi_exec_async(struct scst_cmd *cmd,
3026                        void (*done)(void *, char *, int, int))
3027 {
3028         int res = 0;
3029         struct request_queue *q = cmd->dev->scsi_dev->request_queue;
3030         struct request *rq;
3031         struct scsi_io_context *sioc;
3032         int write = (cmd->data_direction & SCST_DATA_WRITE) ? WRITE : READ;
3033         gfp_t gfp = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
3034         int cmd_len = cmd->cdb_len;
3035
3036         if (cmd->ext_cdb_len == 0) {
3037                 TRACE_DBG("Simple CDB (cmd_len %d)", cmd_len);
3038                 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
3039                 if (sioc == NULL) {
3040                         res = -ENOMEM;
3041                         goto out;
3042                 }
3043         } else {
3044                 cmd_len += cmd->ext_cdb_len;
3045
3046                 TRACE_DBG("Extended CDB (cmd_len %d)", cmd_len);
3047
3048                 sioc = kzalloc(sizeof(*sioc) + cmd_len, gfp);
3049                 if (sioc == NULL) {
3050                         res = -ENOMEM;
3051                         goto out;
3052                 }
3053
3054                 sioc->full_cdb_used = 1;
3055
3056                 memcpy(sioc->full_cdb, cmd->cdb, cmd->cdb_len);
3057                 memcpy(&sioc->full_cdb[cmd->cdb_len], cmd->ext_cdb,
3058                         cmd->ext_cdb_len);
3059         }
3060
3061         rq = blk_get_request(q, write, gfp);
3062         if (rq == NULL) {
3063                 res = -ENOMEM;
3064                 goto out_free_sioc;
3065         }
3066
3067         rq->cmd_type = REQ_TYPE_BLOCK_PC;
3068         rq->cmd_flags |= REQ_QUIET;
3069
3070         if (cmd->sg != NULL) {
3071                 res = blk_rq_map_kern_sg(rq, cmd->sg, cmd->sg_cnt, gfp);
3072                 if (res) {
3073                         TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
3074                         goto out_free_rq;
3075                 }
3076         }
3077
3078         if (cmd->data_direction  == SCST_DATA_BIDI) {
3079                 struct request *next_rq;
3080
3081                 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
3082                         res = -EOPNOTSUPP;
3083                         goto out_free_unmap;
3084                 }
3085
3086                 next_rq = blk_get_request(q, READ, gfp);
3087                 if (next_rq == NULL) {
3088                         res = -ENOMEM;
3089                         goto out_free_unmap;
3090                 }
3091                 rq->next_rq = next_rq;
3092                 next_rq->cmd_type = rq->cmd_type;
3093
3094                 res = blk_rq_map_kern_sg(next_rq, cmd->in_sg,
3095                         cmd->in_sg_cnt, gfp);
3096                 if (res != 0)
3097                         goto out_free_unmap;
3098         }
3099
3100         TRACE_DBG("sioc %p, cmd %p", sioc, cmd);
3101
3102         sioc->data = cmd;
3103         sioc->done = done;
3104
3105         rq->cmd_len = cmd_len;
3106         if (cmd->ext_cdb_len == 0) {
3107                 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
3108                 memcpy(rq->cmd, cmd->cdb, cmd->cdb_len);
3109         } else
3110                 rq->cmd = sioc->full_cdb;
3111
3112         rq->sense = sioc->sense;
3113         rq->sense_len = sizeof(sioc->sense);
3114         rq->timeout = cmd->timeout;
3115         rq->retries = cmd->retries;
3116         rq->end_io_data = sioc;
3117
3118         blk_execute_rq_nowait(rq->q, NULL, rq,
3119                 (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE), scsi_end_async);
3120 out:
3121         return res;
3122
3123 out_free_unmap:
3124         if (rq->next_rq != NULL) {
3125                 blk_put_request(rq->next_rq);
3126                 rq->next_rq = NULL;
3127         }
3128         blk_rq_unmap_kern_sg(rq, res);
3129
3130 out_free_rq:
3131         blk_put_request(rq);
3132
3133 out_free_sioc:
3134         if (!sioc->full_cdb_used)
3135                 kmem_cache_free(scsi_io_context_cache, sioc);
3136         else
3137                 kfree(sioc);
3138         goto out;
3139 }
3140
3141 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
3142
3143 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
3144 {
3145         struct scatterlist *src_sg, *dst_sg;
3146         unsigned int to_copy;
3147         int atomic = scst_cmd_atomic(cmd);
3148
3149         TRACE_ENTRY();
3150
3151         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
3152                 if (cmd->data_direction != SCST_DATA_BIDI) {
3153                         src_sg = cmd->tgt_sg;
3154                         dst_sg = cmd->sg;
3155                         to_copy = cmd->bufflen;
3156                 } else {
3157                         TRACE_MEM("BIDI cmd %p", cmd);
3158                         src_sg = cmd->tgt_in_sg;
3159                         dst_sg = cmd->in_sg;
3160                         to_copy = cmd->in_bufflen;
3161                 }
3162         } else {
3163                 src_sg = cmd->sg;
3164                 dst_sg = cmd->tgt_sg;
3165                 to_copy = cmd->resp_data_len;
3166         }
3167
3168         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, to_copy %lld",
3169                 cmd, copy_dir, src_sg, dst_sg, (long long)to_copy);
3170
3171         if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
3172                 /*
3173                  * It can happened, e.g., with scst_user for cmd with delay
3174                  * alloc, which failed with Check Condition.
3175                  */
3176                 goto out;
3177         }
3178
3179         sg_copy(dst_sg, src_sg, 0, to_copy,
3180                 atomic ? KM_SOFTIRQ0 : KM_USER0,
3181                 atomic ? KM_SOFTIRQ1 : KM_USER1);
3182
3183 out:
3184         TRACE_EXIT();
3185         return;
3186 }
3187 EXPORT_SYMBOL(scst_copy_sg);
3188
3189 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
3190
3191 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
3192 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
3193
3194 int scst_get_cdb_len(const uint8_t *cdb)
3195 {
3196         return SCST_GET_CDB_LEN(cdb[0]);
3197 }
3198
3199 /* get_trans_len_x extract x bytes from cdb as length starting from off */
3200
3201 static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
3202 {
3203         cmd->cdb_len = 10;
3204         cmd->bufflen = 0;
3205         return 0;
3206 }
3207
3208 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
3209 {
3210         cmd->bufflen = 6;
3211         return 0;
3212 }
3213
3214 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
3215 {
3216         cmd->bufflen = READ_CAP_LEN;
3217         return 0;
3218 }
3219
3220 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
3221 {
3222         int res = 0;
3223
3224         TRACE_ENTRY();
3225
3226         if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
3227                 cmd->op_name = "READ CAPACITY(16)";
3228                 cmd->bufflen = READ_CAP16_LEN;
3229                 cmd->op_flags |= SCST_IMPLICIT_HQ;
3230         } else
3231                 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
3232
3233         TRACE_EXIT_RES(res);
3234         return res;
3235 }
3236
3237 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
3238 {
3239         cmd->bufflen = 1;
3240         return 0;
3241 }
3242
3243 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
3244 {
3245         uint8_t *p = (uint8_t *)cmd->cdb + off;
3246         int res = 0;
3247
3248         cmd->bufflen = 0;
3249         cmd->bufflen |= ((u32)p[0]) << 8;
3250         cmd->bufflen |= ((u32)p[1]);
3251
3252         switch (cmd->cdb[1] & 0x1f) {
3253         case 0:
3254         case 1:
3255         case 6:
3256                 if (cmd->bufflen != 0) {
3257                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
3258                                 "allocation length for service action %x",
3259                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
3260                         goto out_inval;
3261                 }
3262                 break;
3263         }
3264
3265         switch (cmd->cdb[1] & 0x1f) {
3266         case 0:
3267         case 1:
3268                 cmd->bufflen = 20;
3269                 break;
3270         case 6:
3271                 cmd->bufflen = 32;
3272                 break;
3273         case 8:
3274                 cmd->bufflen = max(28, cmd->bufflen);
3275                 break;
3276         default:
3277                 PRINT_ERROR("READ POSITION: Invalid service action %x",
3278                         cmd->cdb[1] & 0x1f);
3279                 goto out_inval;
3280         }
3281
3282 out:
3283         return res;
3284
3285 out_inval:
3286         scst_set_cmd_error(cmd,
3287                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
3288         res = 1;
3289         goto out;
3290 }
3291
3292 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
3293 {
3294         cmd->bufflen = (u32)cmd->cdb[off];
3295         return 0;
3296 }
3297
3298 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
3299 {
3300         cmd->bufflen = (u32)cmd->cdb[off];
3301         if (cmd->bufflen == 0)
3302                 cmd->bufflen = 256;
3303         return 0;
3304 }
3305
3306 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
3307 {
3308         const uint8_t *p = cmd->cdb + off;
3309
3310         cmd->bufflen = 0;
3311         cmd->bufflen |= ((u32)p[0]) << 8;
3312         cmd->bufflen |= ((u32)p[1]);
3313
3314         return 0;
3315 }
3316
3317 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
3318 {
3319         const uint8_t *p = cmd->cdb + off;
3320
3321         cmd->bufflen = 0;
3322         cmd->bufflen |= ((u32)p[0]) << 16;
3323         cmd->bufflen |= ((u32)p[1]) << 8;
3324         cmd->bufflen |= ((u32)p[2]);
3325
3326         return 0;
3327 }
3328
3329 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
3330 {
3331         const uint8_t *p = cmd->cdb + off;
3332
3333         cmd->bufflen = 0;
3334         cmd->bufflen |= ((u32)p[0]) << 24;
3335         cmd->bufflen |= ((u32)p[1]) << 16;
3336         cmd->bufflen |= ((u32)p[2]) << 8;
3337         cmd->bufflen |= ((u32)p[3]);
3338
3339         return 0;
3340 }
3341
3342 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
3343 {
3344         cmd->bufflen = 0;
3345         return 0;
3346 }
3347
3348 int scst_get_cdb_info(struct scst_cmd *cmd)
3349 {
3350         int dev_type = cmd->dev->type;
3351         int i, res = 0;
3352         uint8_t op;
3353         const struct scst_sdbops *ptr = NULL;
3354
3355         TRACE_ENTRY();
3356
3357         op = cmd->cdb[0];       /* get clear opcode */
3358
3359         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
3360                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
3361                 dev_type);
3362
3363         i = scst_scsi_op_list[op];
3364         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
3365                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
3366                         ptr = &scst_scsi_op_table[i];
3367                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
3368                               ptr->ops, ptr->devkey[0], /* disk     */
3369                               ptr->devkey[1],   /* tape     */
3370                               ptr->devkey[2],   /* printer */
3371                               ptr->devkey[3],   /* cpu      */
3372                               ptr->devkey[4],   /* cdr      */
3373                               ptr->devkey[5],   /* cdrom    */
3374                               ptr->devkey[6],   /* scanner */
3375                               ptr->devkey[7],   /* worm     */
3376                               ptr->devkey[8],   /* changer */
3377                               ptr->devkey[9],   /* commdev */
3378                               ptr->op_name);
3379                         TRACE_DBG("direction=%d flags=%d off=%d",
3380                               ptr->direction,
3381                               ptr->flags,
3382                               ptr->off);
3383                         break;
3384                 }
3385                 i++;
3386         }
3387
3388         if (unlikely(ptr == NULL)) {
3389                 /* opcode not found or now not used !!! */
3390                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
3391                       dev_type);
3392                 res = -1;
3393                 cmd->op_flags = SCST_INFO_NOT_FOUND;
3394                 goto out;
3395         }
3396
3397         cmd->cdb_len = SCST_GET_CDB_LEN(op);
3398         cmd->op_name = ptr->op_name;
3399         cmd->data_direction = ptr->direction;
3400         cmd->op_flags = ptr->flags;
3401         res = (*ptr->get_trans_len)(cmd, ptr->off);
3402
3403 out:
3404         TRACE_EXIT_RES(res);
3405         return res;
3406 }
3407 EXPORT_SYMBOL(scst_get_cdb_info);
3408
3409 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
3410 uint64_t scst_pack_lun(const uint64_t lun)
3411 {
3412         uint64_t res;
3413         uint16_t *p = (uint16_t *)&res;
3414
3415         res = lun;
3416         *p = cpu_to_be16(*p);
3417
3418         TRACE_EXIT_HRES((unsigned long)res);
3419         return res;
3420 }
3421
3422 /*
3423  * Routine to extract a lun number from an 8-byte LUN structure
3424  * in network byte order (BE).
3425  * (see SAM-2, Section 4.12.3 page 40)
3426  * Supports 2 types of lun unpacking: peripheral and logical unit.
3427  */
3428 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
3429 {
3430         uint64_t res = NO_SUCH_LUN;
3431         int address_method;
3432
3433         TRACE_ENTRY();
3434
3435         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
3436
3437         if (unlikely(len < 2)) {
3438                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
3439                         "more", len);
3440                 goto out;
3441         }
3442
3443         if (len > 2) {
3444                 switch (len) {
3445                 case 8:
3446                         if ((*((uint64_t *)lun) &
3447                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
3448                                 goto out_err;
3449                         break;
3450                 case 4:
3451                         if (*((uint16_t *)&lun[2]) != 0)
3452                                 goto out_err;
3453                         break;
3454                 case 6:
3455                         if (*((uint32_t *)&lun[2]) != 0)
3456                                 goto out_err;
3457                         break;
3458                 default:
3459                         goto out_err;
3460                 }
3461         }
3462
3463         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
3464         switch (address_method) {
3465         case 0: /* peripheral device addressing method */
3466 #if 0
3467                 if (*lun) {
3468                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
3469                              "peripheral device addressing method 0x%02x, "
3470                              "expected 0", *lun);
3471                         break;
3472                 }
3473                 res = *(lun + 1);
3474                 break;
3475 #else
3476                 /*
3477                  * Looks like it's legal to use it as flat space addressing
3478                  * method as well
3479                  */
3480
3481                 /* go through */
3482 #endif
3483
3484         case 1: /* flat space addressing method */
3485                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
3486                 break;
3487
3488         case 2: /* logical unit addressing method */
3489                 if (*lun & 0x3f) {
3490                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
3491                                     "addressing method 0x%02x, expected 0",
3492                                     *lun & 0x3f);
3493                         break;
3494                 }
3495                 if (*(lun + 1) & 0xe0) {
3496                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
3497                                     "addressing method 0x%02x, expected 0",
3498                                     (*(lun + 1) & 0xf8) >> 5);
3499                         break;
3500                 }
3501                 res = *(lun + 1) & 0x1f;
3502                 break;
3503
3504         case 3: /* extended logical unit addressing method */
3505         default:
3506                 PRINT_ERROR("Unimplemented LUN addressing method %u",
3507                             address_method);
3508                 break;
3509         }
3510
3511 out:
3512         TRACE_EXIT_RES((int)res);
3513         return res;
3514
3515 out_err:
3516         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
3517         goto out;
3518 }
3519
3520 int scst_calc_block_shift(int sector_size)
3521 {
3522         int block_shift = 0;
3523         int t;
3524
3525         if (sector_size == 0)
3526                 sector_size = 512;
3527
3528         t = sector_size;
3529         while (1) {
3530                 if ((t & 1) != 0)
3531                         break;
3532                 t >>= 1;
3533                 block_shift++;
3534         }
3535         if (block_shift < 9) {
3536                 PRINT_ERROR("Wrong sector size %d", sector_size);
3537                 block_shift = -1;
3538         }
3539
3540         TRACE_EXIT_RES(block_shift);
3541         return block_shift;
3542 }
3543 EXPORT_SYMBOL(scst_calc_block_shift);
3544
3545 int scst_sbc_generic_parse(struct scst_cmd *cmd,
3546         int (*get_block_shift)(struct scst_cmd *cmd))
3547 {
3548         int res = 0;
3549
3550         TRACE_ENTRY();
3551
3552         /*
3553          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3554          * therefore change them only if necessary
3555          */
3556
3557         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3558               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3559
3560         switch (cmd->cdb[0]) {
3561         case VERIFY_6:
3562         case VERIFY:
3563         case VERIFY_12:
3564         case VERIFY_16:
3565                 if ((cmd->cdb[1] & BYTCHK) == 0) {
3566                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3567                         cmd->bufflen = 0;
3568                         goto set_timeout;
3569                 } else
3570                         cmd->data_len = 0;
3571                 break;
3572         default:
3573                 /* It's all good */
3574                 break;
3575         }
3576
3577         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
3578                 /*
3579                  * No need for locks here, since *_detach() can not be
3580                  * called, when there are existing commands.
3581                  */
3582                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3583         }
3584
3585 set_timeout:
3586         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3587                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
3588         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3589                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
3590         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3591                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
3592
3593         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
3594               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
3595
3596         TRACE_EXIT_RES(res);
3597         return res;
3598 }
3599 EXPORT_SYMBOL(scst_sbc_generic_parse);
3600
3601 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
3602         int (*get_block_shift)(struct scst_cmd *cmd))
3603 {
3604         int res = 0;
3605
3606         TRACE_ENTRY();
3607
3608         /*
3609          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3610          * therefore change them only if necessary
3611          */
3612
3613         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3614               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3615
3616         cmd->cdb[1] &= 0x1f;
3617
3618         switch (cmd->cdb[0]) {
3619         case VERIFY_6:
3620         case VERIFY:
3621         case VERIFY_12:
3622         case VERIFY_16:
3623                 if ((cmd->cdb[1] & BYTCHK) == 0) {
3624                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3625                         cmd->bufflen = 0;
3626                         goto set_timeout;
3627                 }
3628                 break;
3629         default:
3630                 /* It's all good */
3631                 break;
3632         }
3633
3634         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
3635                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3636
3637 set_timeout:
3638         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3639                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
3640         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3641                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
3642         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3643                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
3644
3645         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
3646                 cmd->data_direction);
3647
3648         TRACE_EXIT();
3649         return res;
3650 }
3651 EXPORT_SYMBOL(scst_cdrom_generic_parse);
3652
3653 int scst_modisk_generic_parse(struct scst_cmd *cmd,
3654         int (*get_block_shift)(struct scst_cmd *cmd))
3655 {
3656         int res = 0;
3657
3658         TRACE_ENTRY();
3659
3660         /*
3661          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3662          * therefore change them only if necessary
3663          */
3664
3665         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3666               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3667
3668         cmd->cdb[1] &= 0x1f;
3669
3670         switch (cmd->cdb[0]) {
3671         case VERIFY_6:
3672         case VERIFY:
3673         case VERIFY_12:
3674         case VERIFY_16:
3675                 if ((cmd->cdb[1] & BYTCHK) == 0) {
3676                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3677                         cmd->bufflen = 0;
3678                         goto set_timeout;
3679                 }
3680                 break;
3681         default:
3682                 /* It's all good */
3683                 break;
3684         }
3685
3686         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
3687                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3688
3689 set_timeout:
3690         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3691                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
3692         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3693                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
3694         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3695                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
3696
3697         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
3698                 cmd->data_direction);
3699
3700         TRACE_EXIT_RES(res);
3701         return res;
3702 }
3703 EXPORT_SYMBOL(scst_modisk_generic_parse);
3704
3705 int scst_tape_generic_parse(struct scst_cmd *cmd,
3706         int (*get_block_size)(struct scst_cmd *cmd))
3707 {
3708         int res = 0;
3709
3710         TRACE_ENTRY();
3711
3712         /*
3713          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3714          * therefore change them only if necessary
3715          */
3716
3717         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3718               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3719
3720         if (cmd->cdb[0] == READ_POSITION) {
3721                 int tclp = cmd->cdb[1] & 4;
3722                 int long_bit = cmd->cdb[1] & 2;
3723                 int bt = cmd->cdb[1] & 1;
3724
3725                 if ((tclp == long_bit) && (!bt || !long_bit)) {
3726                         cmd->bufflen =
3727                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
3728                         cmd->data_direction = SCST_DATA_READ;
3729                 } else {
3730                         cmd->bufflen = 0;
3731                         cmd->data_direction = SCST_DATA_NONE;
3732                 }
3733         }
3734
3735         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
3736                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
3737
3738         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3739                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
3740         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3741                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
3742         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3743                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
3744
3745         TRACE_EXIT_RES(res);
3746         return res;
3747 }
3748 EXPORT_SYMBOL(scst_tape_generic_parse);
3749
3750 static int scst_null_parse(struct scst_cmd *cmd)
3751 {
3752         int res = 0;
3753
3754         TRACE_ENTRY();
3755
3756         /*
3757          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3758          * therefore change them only if necessary
3759          */
3760
3761         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3762               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3763 #if 0
3764         switch (cmd->cdb[0]) {
3765         default:
3766                 /* It's all good */
3767                 break;
3768         }
3769 #endif
3770         TRACE_DBG("res %d bufflen %d direct %d",
3771               res, cmd->bufflen, cmd->data_direction);
3772
3773         TRACE_EXIT();
3774         return res;
3775 }
3776
3777 int scst_changer_generic_parse(struct scst_cmd *cmd,
3778         int (*nothing)(struct scst_cmd *cmd))
3779 {
3780         int res = scst_null_parse(cmd);
3781
3782         if (cmd->op_flags & SCST_LONG_TIMEOUT)
3783                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
3784         else
3785                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
3786
3787         return res;
3788 }
3789 EXPORT_SYMBOL(scst_changer_generic_parse);
3790
3791 int scst_processor_generic_parse(struct scst_cmd *cmd,
3792         int (*nothing)(struct scst_cmd *cmd))
3793 {
3794         int res = scst_null_parse(cmd);
3795
3796         if (cmd->op_flags & SCST_LONG_TIMEOUT)
3797                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
3798         else
3799                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
3800
3801         return res;
3802 }
3803 EXPORT_SYMBOL(scst_processor_generic_parse);
3804
3805 int scst_raid_generic_parse(struct scst_cmd *cmd,
3806         int (*nothing)(struct scst_cmd *cmd))
3807 {
3808         int res = scst_null_parse(cmd);
3809
3810         if (cmd->op_flags & SCST_LONG_TIMEOUT)
3811                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
3812         else
3813                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
3814
3815         return res;
3816 }
3817 EXPORT_SYMBOL(scst_raid_generic_parse);
3818
3819 int scst_block_generic_dev_done(struct scst_cmd *cmd,
3820         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
3821 {
3822         int opcode = cmd->cdb[0];
3823         int status = cmd->status;
3824         int res = SCST_CMD_STATE_DEFAULT;
3825
3826         TRACE_ENTRY();
3827
3828         /*
3829          * SCST sets good defaults for cmd->is_send_status and
3830          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
3831          * therefore change them only if necessary
3832          */
3833
3834         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
3835                 switch (opcode) {
3836                 case READ_CAPACITY:
3837                 {
3838                         /* Always keep track of disk capacity */
3839                         int buffer_size, sector_size, sh;
3840                         uint8_t *buffer;
3841
3842                         buffer_size = scst_get_buf_first(cmd, &buffer);
3843                         if (unlikely(buffer_size <= 0)) {
3844                                 if (buffer_size < 0) {
3845                                         PRINT_ERROR("%s: Unable to get the"
3846                                         " buffer (%d)", __func__, buffer_size);
3847                                &n