Whitespace-only change: fixed a recently introduced checkpatch error
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2009 ID7 Ltd.
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <asm/kmap_types.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #include "scst_cdbprobe.h"
37
38 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
39 static void scst_check_internal_sense(struct scst_device *dev, int result,
40         uint8_t *sense, int sense_len);
41 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
42         int flags);
43 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
44         const uint8_t *sense, int sense_len, int flags);
45 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
46         const uint8_t *sense, int sense_len, int flags);
47 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
48 static void scst_release_space(struct scst_cmd *cmd);
49 static void scst_sess_free_tgt_devs(struct scst_session *sess);
50 static void scst_unblock_cmds(struct scst_device *dev);
51
52 #ifdef CONFIG_SCST_DEBUG_TM
53 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
54         struct scst_acg_dev *acg_dev);
55 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
56 #else
57 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
58         struct scst_acg_dev *acg_dev) {}
59 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
60 #endif /* CONFIG_SCST_DEBUG_TM */
61
62 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
63 {
64         int res = 0;
65         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
66
67         TRACE_ENTRY();
68
69         if (cmd->sense != NULL)
70                 goto memzero;
71
72         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
73         if (cmd->sense == NULL) {
74                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
75                         "The sense data will be lost!!", cmd->cdb[0]);
76                 res = -ENOMEM;
77                 goto out;
78         }
79
80 memzero:
81         cmd->sense_bufflen = SCST_SENSE_BUFFERSIZE;
82         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
83
84 out:
85         TRACE_EXIT_RES(res);
86         return res;
87 }
88 EXPORT_SYMBOL(scst_alloc_sense);
89
90 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
91         const uint8_t *sense, unsigned int len)
92 {
93         int res;
94
95         TRACE_ENTRY();
96
97         res = scst_alloc_sense(cmd, atomic);
98         if (res != 0) {
99                 PRINT_BUFFER("Lost sense", sense, len);
100                 goto out;
101         }
102
103         memcpy(cmd->sense, sense, min((int)len, (int)cmd->sense_bufflen));
104         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
105
106 out:
107         TRACE_EXIT_RES(res);
108         return res;
109 }
110 EXPORT_SYMBOL(scst_alloc_set_sense);
111
112 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
113 {
114         TRACE_ENTRY();
115
116         cmd->status = status;
117         cmd->host_status = DID_OK;
118
119         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
120         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
121
122         cmd->data_direction = SCST_DATA_NONE;
123         cmd->resp_data_len = 0;
124         cmd->is_send_status = 1;
125
126         cmd->completed = 1;
127
128         TRACE_EXIT();
129         return;
130 }
131 EXPORT_SYMBOL(scst_set_cmd_error_status);
132
133 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
134 {
135         int rc;
136
137         TRACE_ENTRY();
138
139         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
140
141         rc = scst_alloc_sense(cmd, 1);
142         if (rc != 0) {
143                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
144                         key, asc, ascq);
145                 goto out;
146         }
147
148         scst_set_sense(cmd->sense, cmd->sense_bufflen,
149                 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
150         TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
151
152 out:
153         TRACE_EXIT();
154         return;
155 }
156 EXPORT_SYMBOL(scst_set_cmd_error);
157
158 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
159         int key, int asc, int ascq)
160 {
161         sBUG_ON(len == 0);
162
163         memset(buffer, 0, len);
164
165         if (d_sense) {
166                 /* Descriptor format */
167                 if (len < 4) {
168                         PRINT_ERROR("Length %d of sense buffer too small to "
169                                 "fit sense %x:%x:%x", len, key, asc, ascq);
170                 }
171
172                 buffer[0] = 0x72;               /* Response Code        */
173                 if (len > 1)
174                         buffer[1] = key;        /* Sense Key            */
175                 if (len > 2)
176                         buffer[2] = asc;        /* ASC                  */
177                 if (len > 3)
178                         buffer[3] = ascq;       /* ASCQ                 */
179         } else {
180                 /* Fixed format */
181                 if (len < 14) {
182                         PRINT_ERROR("Length %d of sense buffer too small to "
183                                 "fit sense %x:%x:%x", len, key, asc, ascq);
184                 }
185
186                 buffer[0] = 0x70;               /* Response Code        */
187                 if (len > 2)
188                         buffer[2] = key;        /* Sense Key            */
189                 if (len > 7)
190                         buffer[7] = 0x0a;       /* Additional Sense Length */
191                 if (len > 12)
192                         buffer[12] = asc;       /* ASC                  */
193                 if (len > 13)
194                         buffer[13] = ascq;      /* ASCQ                 */
195         }
196
197         TRACE_BUFFER("Sense set", buffer, len);
198         return;
199 }
200 EXPORT_SYMBOL(scst_set_sense);
201
202 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
203         int key, int asc, int ascq)
204 {
205         bool res = false;
206
207         /* Response Code */
208         if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
209                 /* Fixed format */
210
211                 if (len < 14) {
212                         PRINT_ERROR("Sense too small to analyze (%d, "
213                                 "type fixed)", len);
214                         goto out;
215                 }
216
217                 /* Sense Key */
218                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
219                         goto out;
220
221                 /* ASC */
222                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
223                         goto out;
224
225                 /* ASCQ */
226                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
227                         goto out;
228         } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
229                 /* Descriptor format */
230
231                 if (len < 4) {
232                         PRINT_ERROR("Sense too small to analyze (%d, "
233                                 "type descriptor)", len);
234                         goto out;
235                 }
236
237                 /* Sense Key */
238                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
239                         goto out;
240
241                 /* ASC */
242                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
243                         goto out;
244
245                 /* ASCQ */
246                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
247                         goto out;
248         } else
249                 goto out;
250
251         res = true;
252
253 out:
254         TRACE_EXIT_RES((int)res);
255         return res;
256 }
257 EXPORT_SYMBOL(scst_analyze_sense);
258
259 void scst_check_convert_sense(struct scst_cmd *cmd)
260 {
261         bool d_sense;
262
263         TRACE_ENTRY();
264
265         if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
266                 goto out;
267
268         d_sense = scst_get_cmd_dev_d_sense(cmd);
269         if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
270                 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
271                         cmd);
272                 if (cmd->sense_bufflen < 14) {
273                         PRINT_ERROR("Sense too small to convert (%d, "
274                                 "type fixed)", cmd->sense_bufflen);
275                         goto out;
276                 }
277                 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
278                         cmd->sense[2], cmd->sense[12], cmd->sense[13]);
279         } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
280                                 (cmd->sense[0] == 0x73))) {
281                 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
282                         cmd);
283                 if (cmd->sense_bufflen < 4) {
284                         PRINT_ERROR("Sense too small to convert (%d, "
285                                 "type descryptor)", cmd->sense_bufflen);
286                         goto out;
287                 }
288                 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
289                         cmd->sense[1], cmd->sense[2], cmd->sense[3]);
290         }
291
292 out:
293         TRACE_EXIT();
294         return;
295 }
296 EXPORT_SYMBOL(scst_check_convert_sense);
297
298 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
299         unsigned int len)
300 {
301         TRACE_ENTRY();
302
303         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
304         scst_alloc_set_sense(cmd, 1, sense, len);
305
306         TRACE_EXIT();
307         return;
308 }
309
310 void scst_set_busy(struct scst_cmd *cmd)
311 {
312         int c = atomic_read(&cmd->sess->sess_cmd_count);
313
314         TRACE_ENTRY();
315
316         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
317                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
318                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
319                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
320                         cmd->sess->initiator_name, c,
321                         cmd->queue_type, cmd->sess->init_phase);
322         } else {
323                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
324                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
325                         "initiator %s (cmds count %d, queue_type %x, "
326                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
327                         cmd->queue_type, cmd->sess->init_phase);
328         }
329
330         TRACE_EXIT();
331         return;
332 }
333 EXPORT_SYMBOL(scst_set_busy);
334
335 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
336 {
337         int i;
338
339         TRACE_ENTRY();
340
341         TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
342                 asc, ascq);
343
344         /* Protect sess_tgt_dev_list_hash */
345         mutex_lock(&scst_mutex);
346
347         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
348                 struct list_head *sess_tgt_dev_list_head =
349                         &sess->sess_tgt_dev_list_hash[i];
350                 struct scst_tgt_dev *tgt_dev;
351
352                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
353                                 sess_tgt_dev_list_entry) {
354                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
355                         if (!list_empty(&tgt_dev->UA_list)) {
356                                 struct scst_tgt_dev_UA *ua;
357
358                                 ua = list_entry(tgt_dev->UA_list.next,
359                                         typeof(*ua), UA_list_entry);
360                                 if (scst_analyze_sense(ua->UA_sense_buffer,
361                                                 sizeof(ua->UA_sense_buffer),
362                                                 SCST_SENSE_ALL_VALID,
363                                                 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
364                                         scst_set_sense(ua->UA_sense_buffer,
365                                                 sizeof(ua->UA_sense_buffer),
366                                                 tgt_dev->dev->d_sense,
367                                                 key, asc, ascq);
368                                 } else
369                                         PRINT_ERROR("%s",
370                                                 "The first UA isn't RESET UA");
371                         } else
372                                 PRINT_ERROR("%s", "There's no RESET UA to "
373                                         "replace");
374                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
375                 }
376         }
377
378         mutex_unlock(&scst_mutex);
379
380         TRACE_EXIT();
381         return;
382 }
383 EXPORT_SYMBOL(scst_set_initial_UA);
384
385 static struct scst_aen *scst_alloc_aen(struct scst_tgt_dev *tgt_dev)
386 {
387         struct scst_aen *aen;
388
389         TRACE_ENTRY();
390
391         aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
392         if (aen == NULL) {
393                 PRINT_ERROR("AEN memory allocation failed. Corresponding "
394                         "event notification will not be performed (initiator "
395                         "%s)", tgt_dev->sess->initiator_name);
396                 goto out;
397         }
398         memset(aen, 0, sizeof(*aen));
399
400         aen->sess = tgt_dev->sess;
401         scst_sess_get(aen->sess);
402
403         aen->lun = scst_pack_lun(tgt_dev->lun);
404
405 out:
406         TRACE_EXIT_HRES((unsigned long)aen);
407         return aen;
408 };
409
410 static void scst_free_aen(struct scst_aen *aen)
411 {
412         TRACE_ENTRY();
413
414         scst_sess_put(aen->sess);
415         mempool_free(aen, scst_aen_mempool);
416
417         TRACE_EXIT();
418         return;
419 };
420
421 /* No locks */
422 void scst_capacity_data_changed(struct scst_device *dev)
423 {
424         struct scst_tgt_dev *tgt_dev;
425         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
426
427         TRACE_ENTRY();
428
429         if (dev->type != TYPE_DISK) {
430                 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
431                         "CHANGED UA", dev->type);
432                 goto out;
433         }
434
435         TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
436
437         mutex_lock(&scst_mutex);
438
439         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
440                             dev_tgt_dev_list_entry) {
441                 struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
442
443                 if (tgtt->report_aen != NULL) {
444                         struct scst_aen *aen;
445                         int rc;
446
447                         aen = scst_alloc_aen(tgt_dev);
448                         if (aen == NULL)
449                                 goto queue_ua;
450
451                         aen->event_fn = SCST_AEN_SCSI;
452                         aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
453                         scst_set_sense(aen->aen_sense, aen->aen_sense_len,
454                                 tgt_dev->dev->d_sense,
455                                 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
456
457                         TRACE_DBG("Calling target's %s report_aen(%p)",
458                                 tgtt->name, aen);
459                         rc = tgtt->report_aen(aen);
460                         TRACE_DBG("Target's %s report_aen(%p) returned %d",
461                                 tgtt->name, aen, rc);
462                         if (rc == SCST_AEN_RES_SUCCESS)
463                                 continue;
464
465                         scst_free_aen(aen);
466                 }
467 queue_ua:
468                 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED UA (tgt_dev %p)",
469                         tgt_dev);
470                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
471                         tgt_dev->dev->d_sense,
472                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
473                 scst_check_set_UA(tgt_dev, sense_buffer,
474                         sizeof(sense_buffer), 0);
475         }
476
477         mutex_unlock(&scst_mutex);
478
479 out:
480         TRACE_EXIT();
481         return;
482 }
483 EXPORT_SYMBOL(scst_capacity_data_changed);
484
485 static inline bool scst_is_report_luns_changed_type(int type)
486 {
487         switch (type) {
488         case TYPE_DISK:
489         case TYPE_TAPE:
490         case TYPE_PRINTER:
491         case TYPE_PROCESSOR:
492         case TYPE_WORM:
493         case TYPE_ROM:
494         case TYPE_SCANNER:
495         case TYPE_MOD:
496         case TYPE_MEDIUM_CHANGER:
497         case TYPE_RAID:
498         case TYPE_ENCLOSURE:
499                 return true;
500         default:
501                 return false;
502         }
503 }
504
505 /* scst_mutex supposed to be held */
506 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
507                                               int flags)
508 {
509         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
510         struct list_head *shead;
511         struct scst_tgt_dev *tgt_dev;
512         int i;
513
514         TRACE_ENTRY();
515
516         TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
517                 "(sess %p)", sess);
518
519         local_bh_disable();
520
521         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
522                 shead = &sess->sess_tgt_dev_list_hash[i];
523
524                 list_for_each_entry(tgt_dev, shead,
525                                 sess_tgt_dev_list_entry) {
526                         /* Lockdep triggers here a false positive.. */
527                         spin_lock(&tgt_dev->tgt_dev_lock);
528                 }
529         }
530
531         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
532                 shead = &sess->sess_tgt_dev_list_hash[i];
533
534                 list_for_each_entry(tgt_dev, shead,
535                                 sess_tgt_dev_list_entry) {
536                         if (!scst_is_report_luns_changed_type(
537                                         tgt_dev->dev->type))
538                                 continue;
539
540                         scst_set_sense(sense_buffer, sizeof(sense_buffer),
541                                 tgt_dev->dev->d_sense,
542                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
543
544                         __scst_check_set_UA(tgt_dev, sense_buffer,
545                                 sizeof(sense_buffer),
546                                 flags | SCST_SET_UA_FLAG_GLOBAL);
547                 }
548         }
549
550         for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
551                 shead = &sess->sess_tgt_dev_list_hash[i];
552
553                 list_for_each_entry_reverse(tgt_dev,
554                                 shead, sess_tgt_dev_list_entry) {
555                         spin_unlock(&tgt_dev->tgt_dev_lock);
556                 }
557         }
558
559         local_bh_enable();
560
561         TRACE_EXIT();
562         return;
563 }
564
565 /* The activity supposed to be suspended and scst_mutex held */
566 void scst_report_luns_changed(struct scst_acg *acg)
567 {
568         struct scst_session *sess;
569
570         TRACE_ENTRY();
571
572         TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
573
574         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
575                 int i;
576                 struct list_head *shead;
577                 struct scst_tgt_dev *tgt_dev;
578                 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
579
580                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
581                         shead = &sess->sess_tgt_dev_list_hash[i];
582
583                         list_for_each_entry(tgt_dev, shead,
584                                         sess_tgt_dev_list_entry) {
585                                 if (scst_is_report_luns_changed_type(
586                                                 tgt_dev->dev->type))
587                                         goto found;
588                         }
589                 }
590                 TRACE_MGMT_DBG("Not found a device capable REPORTED "
591                         "LUNS DATA CHANGED UA (sess %p)", sess);
592                 continue;
593 found:
594                 if (tgtt->report_aen != NULL) {
595                         struct scst_aen *aen;
596                         int rc;
597
598                         aen = scst_alloc_aen(tgt_dev);
599                         if (aen == NULL)
600                                 goto queue_ua;
601
602                         aen->event_fn = SCST_AEN_SCSI;
603                         aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
604                         scst_set_sense(aen->aen_sense, aen->aen_sense_len,
605                                 tgt_dev->dev->d_sense,
606                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
607
608                         TRACE_DBG("Calling target's %s report_aen(%p)",
609                                 tgtt->name, aen);
610                         rc = tgtt->report_aen(aen);
611                         TRACE_DBG("Target's %s report_aen(%p) returned %d",
612                                 tgtt->name, aen, rc);
613                         if (rc == SCST_AEN_RES_SUCCESS)
614                                 continue;
615
616                         scst_free_aen(aen);
617                 }
618
619 queue_ua:
620                 scst_queue_report_luns_changed_UA(sess, 0);
621         }
622
623         TRACE_EXIT();
624         return;
625 }
626
627 void scst_aen_done(struct scst_aen *aen)
628 {
629         TRACE_ENTRY();
630
631         TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
632                 aen->event_fn, aen->sess->initiator_name);
633
634         if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
635                 goto out_free;
636
637         if (aen->event_fn != SCST_AEN_SCSI)
638                 goto out_free;
639
640         TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
641                 aen->sess->initiator_name);
642
643         if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
644                         SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
645                                 scst_sense_reported_luns_data_changed))) {
646                 mutex_lock(&scst_mutex);
647                 scst_queue_report_luns_changed_UA(aen->sess,
648                         SCST_SET_UA_FLAG_AT_HEAD);
649                 mutex_unlock(&scst_mutex);
650         } else if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
651                         SCST_SENSE_ALL_VALID,
652                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed))) {
653                 /* tgt_dev might get dead, so we need to reseek it */
654                 struct list_head *shead;
655                 struct scst_tgt_dev *tgt_dev;
656                 uint64_t lun;
657
658                 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
659
660                 mutex_lock(&scst_mutex);
661
662                 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
663                 list_for_each_entry(tgt_dev, shead,
664                                 sess_tgt_dev_list_entry) {
665                         if (tgt_dev->lun == lun) {
666                                 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED "
667                                         "UA (tgt_dev %p)", tgt_dev);
668                                 scst_check_set_UA(tgt_dev, aen->aen_sense,
669                                         aen->aen_sense_len,
670                                         SCST_SET_UA_FLAG_AT_HEAD);
671                                 break;
672                         }
673                 }
674
675                 mutex_unlock(&scst_mutex);
676         } else
677                 PRINT_ERROR("%s", "Unknown SCSI AEN");
678
679 out_free:
680         scst_free_aen(aen);
681
682         TRACE_EXIT();
683         return;
684 }
685 EXPORT_SYMBOL(scst_aen_done);
686
687 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
688 {
689         int res;
690
691         TRACE_ENTRY();
692
693         switch (cmd->state) {
694         case SCST_CMD_STATE_INIT_WAIT:
695         case SCST_CMD_STATE_INIT:
696         case SCST_CMD_STATE_PRE_PARSE:
697         case SCST_CMD_STATE_DEV_PARSE:
698         case SCST_CMD_STATE_DEV_DONE:
699                 if (cmd->internal)
700                         res = SCST_CMD_STATE_FINISHED_INTERNAL;
701                 else
702                         res = SCST_CMD_STATE_PRE_XMIT_RESP;
703                 break;
704
705         case SCST_CMD_STATE_PRE_DEV_DONE:
706         case SCST_CMD_STATE_MODE_SELECT_CHECKS:
707                 res = SCST_CMD_STATE_DEV_DONE;
708                 break;
709
710         case SCST_CMD_STATE_PRE_XMIT_RESP:
711                 res = SCST_CMD_STATE_XMIT_RESP;
712                 break;
713
714         case SCST_CMD_STATE_PREPROCESS_DONE:
715         case SCST_CMD_STATE_PREPARE_SPACE:
716         case SCST_CMD_STATE_RDY_TO_XFER:
717         case SCST_CMD_STATE_DATA_WAIT:
718         case SCST_CMD_STATE_TGT_PRE_EXEC:
719         case SCST_CMD_STATE_SEND_FOR_EXEC:
720         case SCST_CMD_STATE_LOCAL_EXEC:
721         case SCST_CMD_STATE_REAL_EXEC:
722         case SCST_CMD_STATE_REAL_EXECUTING:
723                 res = SCST_CMD_STATE_PRE_DEV_DONE;
724                 break;
725
726         default:
727                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
728                         cmd->state, cmd, cmd->cdb[0]);
729                 sBUG();
730                 /* Invalid state to supress compiler's warning */
731                 res = SCST_CMD_STATE_LAST_ACTIVE;
732         }
733
734         TRACE_EXIT_RES(res);
735         return res;
736 }
737 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
738
739 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
740 {
741         TRACE_ENTRY();
742
743 #ifdef CONFIG_SCST_EXTRACHECKS
744         switch (cmd->state) {
745         case SCST_CMD_STATE_XMIT_RESP:
746         case SCST_CMD_STATE_FINISHED:
747         case SCST_CMD_STATE_FINISHED_INTERNAL:
748         case SCST_CMD_STATE_XMIT_WAIT:
749                 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
750                         cmd->state, cmd, cmd->cdb[0]);
751                 sBUG();
752         }
753 #endif
754
755         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
756
757 #ifdef CONFIG_SCST_EXTRACHECKS
758         if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
759                    (cmd->tgt_dev == NULL) && !cmd->internal) {
760                 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
761                         "op %x)", cmd->state, cmd, cmd->cdb[0]);
762                 sBUG();
763         }
764 #endif
765
766         TRACE_EXIT();
767         return;
768 }
769 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
770
771 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
772 {
773         int i, l;
774
775         TRACE_ENTRY();
776
777         scst_check_restore_sg_buff(cmd);
778         cmd->resp_data_len = resp_data_len;
779
780         if (resp_data_len == cmd->bufflen)
781                 goto out;
782
783         l = 0;
784         for (i = 0; i < cmd->sg_cnt; i++) {
785                 l += cmd->sg[i].length;
786                 if (l >= resp_data_len) {
787                         int left = resp_data_len - (l - cmd->sg[i].length);
788 #ifdef CONFIG_SCST_DEBUG
789                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
790                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
791                                 "left %d",
792                                 cmd, (long long unsigned int)cmd->tag,
793                                 resp_data_len, i,
794                                 cmd->sg[i].length, left);
795 #endif
796                         cmd->orig_sg_cnt = cmd->sg_cnt;
797                         cmd->orig_sg_entry = i;
798                         cmd->orig_entry_len = cmd->sg[i].length;
799                         cmd->sg_cnt = (left > 0) ? i+1 : i;
800                         cmd->sg[i].length = left;
801                         cmd->sg_buff_modified = 1;
802                         break;
803                 }
804         }
805
806 out:
807         TRACE_EXIT();
808         return;
809 }
810 EXPORT_SYMBOL(scst_set_resp_data_len);
811
812 /* No locks */
813 int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
814 {
815         struct scst_tgt *tgt = cmd->tgt;
816         int res = 0;
817         unsigned long flags;
818
819         TRACE_ENTRY();
820
821         spin_lock_irqsave(&tgt->tgt_lock, flags);
822         tgt->retry_cmds++;
823         /*
824          * Memory barrier is needed here, because we need the exact order
825          * between the read and write between retry_cmds and finished_cmds to
826          * not miss the case when a command finished while we queuing it for
827          * retry after the finished_cmds check.
828          */
829         smp_mb();
830         TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
831               tgt->retry_cmds);
832         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
833                 /* At least one cmd finished, so try again */
834                 tgt->retry_cmds--;
835                 TRACE_RETRY("Some command(s) finished, direct retry "
836                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
837                       "retry_cmds=%d)", finished_cmds,
838                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
839                 res = -1;
840                 goto out_unlock_tgt;
841         }
842
843         TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
844         list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
845
846         if (!tgt->retry_timer_active) {
847                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
848                 add_timer(&tgt->retry_timer);
849                 tgt->retry_timer_active = 1;
850         }
851
852 out_unlock_tgt:
853         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
854
855         TRACE_EXIT_RES(res);
856         return res;
857 }
858
859 /* Returns 0 to continue, >0 to restart, <0 to break */
860 static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
861         unsigned long cur_time, unsigned long max_time,
862         struct scst_session *sess, unsigned long *flags,
863         struct scst_tgt_template *tgtt)
864 {
865         int res = -1; /* break */
866
867         TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
868                 "pending time %ld", cmd, cmd->cmd_hw_pending,
869                 (long)(cur_time - cmd->start_time) / HZ,
870                 (long)(cur_time - cmd->hw_pending_start) / HZ);
871
872         if (time_before_eq(cur_time, cmd->start_time + max_time)) {
873                 /* Cmds are ordered, so no need to check more */
874                 goto out;
875         }
876
877         if (!cmd->cmd_hw_pending) {
878                 res = 0; /* continue */
879                 goto out;
880         }
881
882         if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
883                 /* Cmds are ordered, so no need to check more */
884                 goto out;
885         }
886
887         TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
888                 cmd, (cur_time - cmd->hw_pending_start) / HZ,
889                 cmd->state);
890
891         cmd->cmd_hw_pending = 0;
892
893         spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
894         tgtt->on_hw_pending_cmd_timeout(cmd);
895         spin_lock_irqsave(&sess->sess_list_lock, *flags);
896
897         res = 1; /* restart */
898
899 out:
900         TRACE_EXIT_RES(res);
901         return res;
902 }
903
904 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
905 static void scst_hw_pending_work_fn(void *p)
906 #else
907 static void scst_hw_pending_work_fn(struct delayed_work *work)
908 #endif
909 {
910 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
911         struct scst_session *sess = (struct scst_session *)p;
912 #else
913         struct scst_session *sess = container_of(work, struct scst_session,
914                                         hw_pending_work);
915 #endif
916         struct scst_tgt_template *tgtt = sess->tgt->tgtt;
917         struct scst_cmd *cmd;
918         unsigned long cur_time = jiffies;
919         unsigned long flags;
920         unsigned long max_time = tgtt->max_hw_pending_time * HZ;
921
922         TRACE_ENTRY();
923
924         TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
925
926         clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
927
928         spin_lock_irqsave(&sess->sess_list_lock, flags);
929
930 restart:
931         list_for_each_entry(cmd, &sess->search_cmd_list,
932                                 sess_cmd_list_entry) {
933                 int rc;
934
935                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
936                                         &flags, tgtt);
937                 if (rc < 0)
938                         break;
939                 else if (rc == 0)
940                         continue;
941                 else
942                         goto restart;
943         }
944
945 restart1:
946         list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
947                                 sess_cmd_list_entry) {
948                 int rc;
949
950                 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
951                                         &flags, tgtt);
952                 if (rc < 0)
953                         break;
954                 else if (rc == 0)
955                         continue;
956                 else
957                         goto restart1;
958         }
959
960         if (!list_empty(&sess->search_cmd_list) ||
961             !list_empty(&sess->after_pre_xmit_cmd_list)) {
962                 /*
963                  * For stuck cmds if there is no activity we might need to have
964                  * one more run to release them, so reschedule once again.
965                  */
966                 TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
967                         sess, tgtt->max_hw_pending_time);
968                 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
969                 schedule_delayed_work(&sess->hw_pending_work,
970                                 tgtt->max_hw_pending_time * HZ);
971         }
972
973         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
974
975         TRACE_EXIT();
976         return;
977 }
978
979 /* Called under scst_mutex and suspended activity */
980 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
981 {
982         struct scst_device *dev;
983         int res = 0;
984         static int dev_num; /* protected by scst_mutex */
985
986         TRACE_ENTRY();
987
988         dev = kzalloc(sizeof(*dev), gfp_mask);
989         if (dev == NULL) {
990                 TRACE(TRACE_OUT_OF_MEM, "%s",
991                         "Allocation of scst_device failed");
992                 res = -ENOMEM;
993                 goto out;
994         }
995
996         dev->handler = &scst_null_devtype;
997         dev->p_cmd_lists = &scst_main_cmd_lists;
998         atomic_set(&dev->dev_cmd_count, 0);
999         atomic_set(&dev->write_cmd_count, 0);
1000         scst_init_mem_lim(&dev->dev_mem_lim);
1001         spin_lock_init(&dev->dev_lock);
1002         atomic_set(&dev->on_dev_count, 0);
1003         INIT_LIST_HEAD(&dev->blocked_cmd_list);
1004         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
1005         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
1006         INIT_LIST_HEAD(&dev->threads_list);
1007         init_waitqueue_head(&dev->on_dev_waitQ);
1008         dev->dev_double_ua_possible = 1;
1009         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
1010         dev->dev_num = dev_num++;
1011
1012 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1013 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1014         dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1015         if (dev->dev_io_ctx == NULL) {
1016                 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
1017                 res = -ENOMEM;
1018                 kfree(dev);
1019                 goto out;
1020         }
1021 #endif
1022 #endif
1023
1024         *out_dev = dev;
1025
1026 out:
1027         TRACE_EXIT_RES(res);
1028         return res;
1029 }
1030
1031 /* Called under scst_mutex and suspended activity */
1032 void scst_free_device(struct scst_device *dev)
1033 {
1034         TRACE_ENTRY();
1035
1036 #ifdef CONFIG_SCST_EXTRACHECKS
1037         if (!list_empty(&dev->dev_tgt_dev_list) ||
1038             !list_empty(&dev->dev_acg_dev_list)) {
1039                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
1040                         "is not empty!", __func__);
1041                 sBUG();
1042         }
1043 #endif
1044
1045         __exit_io_context(dev->dev_io_ctx);
1046
1047         kfree(dev);
1048
1049         TRACE_EXIT();
1050         return;
1051 }
1052
1053 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
1054 {
1055         atomic_set(&mem_lim->alloced_pages, 0);
1056         mem_lim->max_allowed_pages =
1057                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
1058 }
1059 EXPORT_SYMBOL(scst_init_mem_lim);
1060
1061 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
1062                                         struct scst_device *dev, uint64_t lun)
1063 {
1064         struct scst_acg_dev *res;
1065
1066         TRACE_ENTRY();
1067
1068 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1069         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
1070 #else
1071         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
1072 #endif
1073         if (res == NULL) {
1074                 TRACE(TRACE_OUT_OF_MEM,
1075                       "%s", "Allocation of scst_acg_dev failed");
1076                 goto out;
1077         }
1078 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1079         memset(res, 0, sizeof(*res));
1080 #endif
1081
1082         res->dev = dev;
1083         res->acg = acg;
1084         res->lun = lun;
1085
1086 out:
1087         TRACE_EXIT_HRES(res);
1088         return res;
1089 }
1090
1091 /* The activity supposed to be suspended and scst_mutex held */
1092 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
1093 {
1094         TRACE_ENTRY();
1095
1096         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
1097                 acg_dev);
1098         list_del(&acg_dev->acg_dev_list_entry);
1099         list_del(&acg_dev->dev_acg_dev_list_entry);
1100
1101         kmem_cache_free(scst_acgd_cachep, acg_dev);
1102
1103         TRACE_EXIT();
1104         return;
1105 }
1106
1107 /* The activity supposed to be suspended and scst_mutex held */
1108 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
1109 {
1110         struct scst_acg *acg;
1111
1112         TRACE_ENTRY();
1113
1114         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
1115         if (acg == NULL) {
1116                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
1117                 goto out;
1118         }
1119
1120         INIT_LIST_HEAD(&acg->acg_dev_list);
1121         INIT_LIST_HEAD(&acg->acg_sess_list);
1122         INIT_LIST_HEAD(&acg->acn_list);
1123         acg->acg_name = acg_name;
1124
1125         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
1126         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
1127
1128 out:
1129         TRACE_EXIT_HRES(acg);
1130         return acg;
1131 }
1132
1133 /* The activity supposed to be suspended and scst_mutex held */
1134 int scst_destroy_acg(struct scst_acg *acg)
1135 {
1136         struct scst_acn *n, *nn;
1137         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
1138         int res = 0;
1139
1140         TRACE_ENTRY();
1141
1142         if (!list_empty(&acg->acg_sess_list)) {
1143                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
1144                 res = -EBUSY;
1145                 goto out;
1146         }
1147
1148         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
1149         list_del(&acg->scst_acg_list_entry);
1150
1151         /* Freeing acg_devs */
1152         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
1153                         acg_dev_list_entry) {
1154                 struct scst_tgt_dev *tgt_dev, *tt;
1155                 list_for_each_entry_safe(tgt_dev, tt,
1156                                  &acg_dev->dev->dev_tgt_dev_list,
1157                                  dev_tgt_dev_list_entry) {
1158                         if (tgt_dev->acg_dev == acg_dev)
1159                                 scst_free_tgt_dev(tgt_dev);
1160                 }
1161                 scst_free_acg_dev(acg_dev);
1162         }
1163
1164         /* Freeing names */
1165         list_for_each_entry_safe(n, nn, &acg->acn_list,
1166                         acn_list_entry) {
1167                 list_del(&n->acn_list_entry);
1168                 kfree(n->name);
1169                 kfree(n);
1170         }
1171         INIT_LIST_HEAD(&acg->acn_list);
1172
1173         kfree(acg);
1174 out:
1175         TRACE_EXIT_RES(res);
1176         return res;
1177 }
1178
1179 /*
1180  * scst_mutex supposed to be held, there must not be parallel activity in this
1181  * session.
1182  */
1183 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1184         struct scst_acg_dev *acg_dev)
1185 {
1186         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1187         struct scst_tgt_dev *tgt_dev, *t = NULL;
1188         struct scst_device *dev = acg_dev->dev;
1189         struct list_head *sess_tgt_dev_list_head;
1190         struct scst_tgt_template *vtt = sess->tgt->tgtt;
1191         int rc, i;
1192         bool share_io_ctx = false;
1193         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1194
1195         TRACE_ENTRY();
1196
1197 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1198         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1199 #else
1200         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1201 #endif
1202         if (tgt_dev == NULL) {
1203                 TRACE(TRACE_OUT_OF_MEM, "%s",
1204                       "Allocation of scst_tgt_dev failed");
1205                 goto out;
1206         }
1207 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1208         memset(tgt_dev, 0, sizeof(*tgt_dev));
1209 #endif
1210
1211         tgt_dev->dev = dev;
1212         tgt_dev->lun = acg_dev->lun;
1213         tgt_dev->acg_dev = acg_dev;
1214         tgt_dev->sess = sess;
1215         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1216
1217         scst_sgv_pool_use_norm(tgt_dev);
1218
1219         if (dev->scsi_dev != NULL) {
1220                 ini_sg = dev->scsi_dev->host->sg_tablesize;
1221                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1222                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1223                                 ENABLE_CLUSTERING);
1224         } else {
1225                 ini_sg = (1 << 15) /* infinite */;
1226                 ini_unchecked_isa_dma = 0;
1227                 ini_use_clustering = 0;
1228         }
1229         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1230
1231         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1232             !sess->tgt->tgtt->no_clustering)
1233                 scst_sgv_pool_use_norm_clust(tgt_dev);
1234
1235         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1236                 scst_sgv_pool_use_dma(tgt_dev);
1237
1238         if (dev->scsi_dev != NULL) {
1239                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1240                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
1241                       dev->scsi_dev->channel, dev->scsi_dev->id,
1242                       dev->scsi_dev->lun,
1243                       (long long unsigned int)tgt_dev->lun);
1244         } else {
1245                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1246                        dev->virt_name, (long long unsigned int)tgt_dev->lun);
1247         }
1248
1249         spin_lock_init(&tgt_dev->tgt_dev_lock);
1250         INIT_LIST_HEAD(&tgt_dev->UA_list);
1251         spin_lock_init(&tgt_dev->thr_data_lock);
1252         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1253         spin_lock_init(&tgt_dev->sn_lock);
1254         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1255         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1256         tgt_dev->expected_sn = 1;
1257         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1258         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1259         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1260                 atomic_set(&tgt_dev->sn_slots[i], 0);
1261
1262         if (dev->handler->parse_atomic &&
1263             (sess->tgt->tgtt->preprocessing_done == NULL)) {
1264                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1265                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1266                                 &tgt_dev->tgt_dev_flags);
1267                 if (dev->handler->exec_atomic)
1268                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1269                                 &tgt_dev->tgt_dev_flags);
1270         }
1271         if (dev->handler->exec_atomic) {
1272                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1273                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1274                                 &tgt_dev->tgt_dev_flags);
1275                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1276                                 &tgt_dev->tgt_dev_flags);
1277                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1278                         &tgt_dev->tgt_dev_flags);
1279         }
1280         if (dev->handler->dev_done_atomic &&
1281             sess->tgt->tgtt->xmit_response_atomic) {
1282                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1283                         &tgt_dev->tgt_dev_flags);
1284         }
1285
1286         scst_set_sense(sense_buffer, sizeof(sense_buffer),
1287                 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1288         scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1289
1290         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1291
1292         if (tgt_dev->sess->initiator_name != NULL) {
1293                 spin_lock_bh(&dev->dev_lock);
1294                 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1295                                 dev_tgt_dev_list_entry) {
1296                         TRACE_DBG("t name %s (tgt_dev name %s)",
1297                                 t->sess->initiator_name,
1298                                 tgt_dev->sess->initiator_name);
1299                         if (t->sess->initiator_name == NULL)
1300                                 continue;
1301                         if (strcmp(t->sess->initiator_name,
1302                                         tgt_dev->sess->initiator_name) == 0) {
1303                                 share_io_ctx = true;
1304                                 break;
1305                         }
1306                 }
1307                 spin_unlock_bh(&dev->dev_lock);
1308         }
1309
1310         if (share_io_ctx) {
1311                 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1312                         t->tgt_dev_io_ctx, tgt_dev,
1313                         tgt_dev->sess->initiator_name);
1314                 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1315         } else {
1316 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1317 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1318                 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1319                 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1320                         TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1321                                 "context for dev %s (initiator %s)",
1322                                 dev->virt_name, sess->initiator_name);
1323                         goto out_free;
1324                 }
1325 #endif
1326 #endif
1327         }
1328
1329         if (vtt->threads_num > 0) {
1330                 rc = 0;
1331                 if (dev->handler->threads_num > 0)
1332                         rc = scst_add_dev_threads(dev, vtt->threads_num);
1333                 else if (dev->handler->threads_num == 0)
1334                         rc = scst_add_global_threads(vtt->threads_num);
1335                 if (rc != 0)
1336                         goto out_free;
1337         }
1338
1339         if (dev->handler && dev->handler->attach_tgt) {
1340                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1341                       tgt_dev);
1342                 rc = dev->handler->attach_tgt(tgt_dev);
1343                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1344                 if (rc != 0) {
1345                         PRINT_ERROR("Device handler's %s attach_tgt() "
1346                             "failed: %d", dev->handler->name, rc);
1347                         goto out_thr_free;
1348                 }
1349         }
1350
1351         spin_lock_bh(&dev->dev_lock);
1352         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1353         if (dev->dev_reserved)
1354                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1355         spin_unlock_bh(&dev->dev_lock);
1356
1357         sess_tgt_dev_list_head =
1358                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1359         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1360                       sess_tgt_dev_list_head);
1361
1362 out:
1363         TRACE_EXIT();
1364         return tgt_dev;
1365
1366 out_thr_free:
1367         if (vtt->threads_num > 0) {
1368                 if (dev->handler->threads_num > 0)
1369                         scst_del_dev_threads(dev, vtt->threads_num);
1370                 else if (dev->handler->threads_num == 0)
1371                         scst_del_global_threads(vtt->threads_num);
1372         }
1373
1374 out_free:
1375         scst_free_all_UA(tgt_dev);
1376         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1377
1378         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1379         tgt_dev = NULL;
1380         goto out;
1381 }
1382
1383 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
1384
1385 /* No locks supposed to be held, scst_mutex - held */
1386 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1387 {
1388         TRACE_ENTRY();
1389
1390         scst_clear_reservation(tgt_dev);
1391
1392         /* With activity suspended the lock isn't needed, but let's be safe */
1393         spin_lock_bh(&tgt_dev->tgt_dev_lock);
1394         scst_free_all_UA(tgt_dev);
1395         memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1396         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1397
1398         if (queue_UA) {
1399                 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1400                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1401                         tgt_dev->dev->d_sense,
1402                         SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1403                 scst_check_set_UA(tgt_dev, sense_buffer,
1404                         sizeof(sense_buffer), 0);
1405         }
1406
1407         TRACE_EXIT();
1408         return;
1409 }
1410
1411 /*
1412  * scst_mutex supposed to be held, there must not be parallel activity in this
1413  * session.
1414  */
1415 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1416 {
1417         struct scst_device *dev = tgt_dev->dev;
1418         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1419
1420         TRACE_ENTRY();
1421
1422         tm_dbg_deinit_tgt_dev(tgt_dev);
1423
1424         spin_lock_bh(&dev->dev_lock);
1425         list_del(&tgt_dev->dev_tgt_dev_list_entry);
1426         spin_unlock_bh(&dev->dev_lock);
1427
1428         list_del(&tgt_dev->sess_tgt_dev_list_entry);
1429
1430         scst_clear_reservation(tgt_dev);
1431         scst_free_all_UA(tgt_dev);
1432
1433         if (dev->handler && dev->handler->detach_tgt) {
1434                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1435                       tgt_dev);
1436                 dev->handler->detach_tgt(tgt_dev);
1437                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1438         }
1439
1440         if (vtt->threads_num > 0) {
1441                 if (dev->handler->threads_num > 0)
1442                         scst_del_dev_threads(dev, vtt->threads_num);
1443                 else if (dev->handler->threads_num == 0)
1444                         scst_del_global_threads(vtt->threads_num);
1445         }
1446
1447         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1448
1449         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1450
1451         TRACE_EXIT();
1452         return;
1453 }
1454
1455 /* scst_mutex supposed to be held */
1456 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1457 {
1458         int res = 0;
1459         struct scst_acg_dev *acg_dev;
1460         struct scst_tgt_dev *tgt_dev;
1461
1462         TRACE_ENTRY();
1463
1464         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1465                         acg_dev_list_entry) {
1466                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1467                 if (tgt_dev == NULL) {
1468                         res = -ENOMEM;
1469                         goto out_free;
1470                 }
1471         }
1472
1473 out:
1474         TRACE_EXIT();
1475         return res;
1476
1477 out_free:
1478         scst_sess_free_tgt_devs(sess);
1479         goto out;
1480 }
1481
1482 /*
1483  * scst_mutex supposed to be held, there must not be parallel activity in this
1484  * session.
1485  */
1486 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1487 {
1488         int i;
1489         struct scst_tgt_dev *tgt_dev, *t;
1490
1491         TRACE_ENTRY();
1492
1493         /* The session is going down, no users, so no locks */
1494         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1495                 struct list_head *sess_tgt_dev_list_head =
1496                         &sess->sess_tgt_dev_list_hash[i];
1497                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1498                                 sess_tgt_dev_list_entry) {
1499                         scst_free_tgt_dev(tgt_dev);
1500                 }
1501                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1502         }
1503
1504         TRACE_EXIT();
1505         return;
1506 }
1507
1508 /* The activity supposed to be suspended and scst_mutex held */
1509 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1510                      uint64_t lun, int read_only)
1511 {
1512         int res = 0;
1513         struct scst_acg_dev *acg_dev;
1514         struct scst_tgt_dev *tgt_dev;
1515         struct scst_session *sess;
1516         LIST_HEAD(tmp_tgt_dev_list);
1517
1518         TRACE_ENTRY();
1519
1520         INIT_LIST_HEAD(&tmp_tgt_dev_list);
1521
1522 #ifdef CONFIG_SCST_EXTRACHECKS
1523         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
1524                 if (acg_dev->dev == dev) {
1525                         PRINT_ERROR("Device is already in group %s",
1526                                 acg->acg_name);
1527                         res = -EINVAL;
1528                         goto out;
1529                 }
1530         }
1531 #endif
1532
1533         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1534         if (acg_dev == NULL) {
1535                 res = -ENOMEM;
1536                 goto out;
1537         }
1538         acg_dev->rd_only = read_only;
1539
1540         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1541                 acg_dev);
1542         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1543         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1544
1545         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1546                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1547                 if (tgt_dev == NULL) {
1548                         res = -ENOMEM;
1549                         goto out_free;
1550                 }
1551                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1552                               &tmp_tgt_dev_list);
1553         }
1554
1555         scst_report_luns_changed(acg);
1556
1557         if (dev->virt_name != NULL) {
1558                 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1559                         "rd_only %d)", dev->virt_name, acg->acg_name,
1560                         (long long unsigned int)lun,
1561                         read_only);
1562         } else {
1563                 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1564                         "%lld, rd_only %d)",
1565                         dev->scsi_dev->host->host_no,
1566                         dev->scsi_dev->channel, dev->scsi_dev->id,
1567                         dev->scsi_dev->lun, acg->acg_name,
1568                         (long long unsigned int)lun,
1569                         read_only);
1570         }
1571
1572 out:
1573         TRACE_EXIT_RES(res);
1574         return res;
1575
1576 out_free:
1577         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1578                          extra_tgt_dev_list_entry) {
1579                 scst_free_tgt_dev(tgt_dev);
1580         }
1581         scst_free_acg_dev(acg_dev);
1582         goto out;
1583 }
1584
1585 /* The activity supposed to be suspended and scst_mutex held */
1586 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
1587 {
1588         int res = 0;
1589         struct scst_acg_dev *acg_dev = NULL, *a;
1590         struct scst_tgt_dev *tgt_dev, *tt;
1591
1592         TRACE_ENTRY();
1593
1594         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1595                 if (a->dev == dev) {
1596                         acg_dev = a;
1597                         break;
1598                 }
1599         }
1600
1601         if (acg_dev == NULL) {
1602                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1603                 res = -EINVAL;
1604                 goto out;
1605         }
1606
1607         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1608                          dev_tgt_dev_list_entry) {
1609                 if (tgt_dev->acg_dev == acg_dev)
1610                         scst_free_tgt_dev(tgt_dev);
1611         }
1612         scst_free_acg_dev(acg_dev);
1613
1614         scst_report_luns_changed(acg);
1615
1616         if (dev->virt_name != NULL) {
1617                 PRINT_INFO("Removed device %s from group %s",
1618                         dev->virt_name, acg->acg_name);
1619         } else {
1620                 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1621                         dev->scsi_dev->host->host_no,
1622                         dev->scsi_dev->channel, dev->scsi_dev->id,
1623                         dev->scsi_dev->lun, acg->acg_name);
1624         }
1625
1626 out:
1627         TRACE_EXIT_RES(res);
1628         return res;
1629 }
1630
1631 /* scst_mutex supposed to be held */
1632 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1633 {
1634         int res = 0;
1635         struct scst_acn *n;
1636         int len;
1637         char *nm;
1638
1639         TRACE_ENTRY();
1640
1641         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1642         {
1643                 if (strcmp(n->name, name) == 0) {
1644                         PRINT_ERROR("Name %s already exists in group %s",
1645                                 name, acg->acg_name);
1646                         res = -EINVAL;
1647                         goto out;
1648                 }
1649         }
1650
1651         n = kmalloc(sizeof(*n), GFP_KERNEL);
1652         if (n == NULL) {
1653                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1654                 res = -ENOMEM;
1655                 goto out;
1656         }
1657
1658         len = strlen(name);
1659         nm = kmalloc(len + 1, GFP_KERNEL);
1660         if (nm == NULL) {
1661                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1662                 res = -ENOMEM;
1663                 goto out_free;
1664         }
1665
1666         strcpy(nm, name);
1667         n->name = nm;
1668
1669         list_add_tail(&n->acn_list_entry, &acg->acn_list);
1670
1671 out:
1672         if (res == 0)
1673                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1674
1675         TRACE_EXIT_RES(res);
1676         return res;
1677
1678 out_free:
1679         kfree(n);
1680         goto out;
1681 }
1682
1683 /* scst_mutex supposed to be held */
1684 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
1685 {
1686         int res = -EINVAL;
1687         struct scst_acn *n;
1688
1689         TRACE_ENTRY();
1690
1691         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1692         {
1693                 if (strcmp(n->name, name) == 0) {
1694                         list_del(&n->acn_list_entry);
1695                         kfree(n->name);
1696                         kfree(n);
1697                         res = 0;
1698                         break;
1699                 }
1700         }
1701
1702         if (res == 0) {
1703                 PRINT_INFO("Removed name %s from group %s", name,
1704                         acg->acg_name);
1705         } else {
1706                 PRINT_ERROR("Unable to find name %s in group %s", name,
1707                         acg->acg_name);
1708         }
1709
1710         TRACE_EXIT_RES(res);
1711         return res;
1712 }
1713
1714 static struct scst_cmd *scst_create_prepare_internal_cmd(
1715         struct scst_cmd *orig_cmd, int bufsize)
1716 {
1717         struct scst_cmd *res;
1718         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1719
1720         TRACE_ENTRY();
1721
1722         res = scst_alloc_cmd(gfp_mask);
1723         if (res == NULL)
1724                 goto out;
1725
1726         res->cmd_lists = orig_cmd->cmd_lists;
1727         res->sess = orig_cmd->sess;
1728         res->atomic = scst_cmd_atomic(orig_cmd);
1729         res->internal = 1;
1730         res->tgtt = orig_cmd->tgtt;
1731         res->tgt = orig_cmd->tgt;
1732         res->dev = orig_cmd->dev;
1733         res->tgt_dev = orig_cmd->tgt_dev;
1734         res->lun = orig_cmd->lun;
1735         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1736         res->data_direction = SCST_DATA_UNKNOWN;
1737         res->orig_cmd = orig_cmd;
1738         res->bufflen = bufsize;
1739
1740         scst_sess_get(res->sess);
1741         if (res->tgt_dev != NULL)
1742                 __scst_get(0);
1743
1744         res->state = SCST_CMD_STATE_PRE_PARSE;
1745
1746 out:
1747         TRACE_EXIT_HRES((unsigned long)res);
1748         return res;
1749 }
1750
1751 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1752 {
1753         int res = 0;
1754         static const uint8_t request_sense[6] =
1755             { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1756         struct scst_cmd *rs_cmd;
1757
1758         TRACE_ENTRY();
1759
1760         if (orig_cmd->sense != NULL) {
1761                 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1762                         orig_cmd->sense, orig_cmd);
1763                 mempool_free(orig_cmd->sense, scst_sense_mempool);
1764                 orig_cmd->sense = NULL;
1765         }
1766
1767         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
1768                         SCST_SENSE_BUFFERSIZE);
1769         if (rs_cmd == NULL)
1770                 goto out_error;
1771
1772         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1773         rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
1774         rs_cmd->cdb_len = sizeof(request_sense);
1775         rs_cmd->data_direction = SCST_DATA_READ;
1776         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1777         rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
1778         rs_cmd->expected_values_set = 1;
1779
1780         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1781                 "cmd list", rs_cmd);
1782         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1783         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1784         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1785         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1786
1787 out:
1788         TRACE_EXIT_RES(res);
1789         return res;
1790
1791 out_error:
1792         res = -1;
1793         goto out;
1794 }
1795
1796 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
1797 {
1798         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1799         uint8_t *buf;
1800         int len;
1801
1802         TRACE_ENTRY();
1803
1804         sBUG_ON(orig_cmd == NULL);
1805
1806         len = scst_get_buf_first(req_cmd, &buf);
1807
1808         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1809             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1810                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1811                         buf, len);
1812                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1813                         len);
1814         } else {
1815                 PRINT_ERROR("%s", "Unable to get the sense via "
1816                         "REQUEST SENSE, returning HARDWARE ERROR");
1817                 scst_set_cmd_error(orig_cmd,
1818                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1819         }
1820
1821         if (len > 0)
1822                 scst_put_buf(req_cmd, buf);
1823
1824         TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
1825                 "cmd list", orig_cmd);
1826         spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1827         list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
1828         wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
1829         spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1830
1831         TRACE_EXIT();
1832         return;
1833 }
1834
1835 int scst_finish_internal_cmd(struct scst_cmd *cmd)
1836 {
1837         int res;
1838
1839         TRACE_ENTRY();
1840
1841         sBUG_ON(!cmd->internal);
1842
1843         if (cmd->cdb[0] == REQUEST_SENSE)
1844                 scst_complete_request_sense(cmd);
1845
1846         __scst_cmd_put(cmd);
1847
1848         res = SCST_CMD_STATE_RES_CONT_NEXT;
1849
1850         TRACE_EXIT_HRES(res);
1851         return res;
1852 }
1853
1854 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1855 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1856 {
1857         struct scsi_request *req;
1858
1859         TRACE_ENTRY();
1860
1861         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1862                 if (req) {
1863                         if (req->sr_bufflen)
1864                                 kfree(req->sr_buffer);
1865                         scsi_release_request(req);
1866                 }
1867         }
1868
1869         TRACE_EXIT();
1870         return;
1871 }
1872
1873 static void scst_send_release(struct scst_device *dev)
1874 {
1875         struct scsi_request *req;
1876         struct scsi_device *scsi_dev;
1877         uint8_t cdb[6];
1878
1879         TRACE_ENTRY();
1880
1881         if (dev->scsi_dev == NULL)
1882                 goto out;
1883
1884         scsi_dev = dev->scsi_dev;
1885
1886         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1887         if (req == NULL) {
1888                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1889                             "to RELEASE device %d:%d:%d:%d",
1890                             scsi_dev->host->host_no, scsi_dev->channel,
1891                             scsi_dev->id, scsi_dev->lun);
1892                 goto out;
1893         }
1894
1895         memset(cdb, 0, sizeof(cdb));
1896         cdb[0] = RELEASE;
1897         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1898             ((scsi_dev->lun << 5) & 0xe0) : 0;
1899         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1900         req->sr_cmd_len = sizeof(cdb);
1901         req->sr_data_direction = SCST_DATA_NONE;
1902         req->sr_use_sg = 0;
1903         req->sr_bufflen = 0;
1904         req->sr_buffer = NULL;
1905         req->sr_request->rq_disk = dev->rq_disk;
1906         req->sr_sense_buffer[0] = 0;
1907
1908         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1909                 "mid-level", req);
1910         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1911                     scst_req_done, 15, 3);
1912
1913 out:
1914         TRACE_EXIT();
1915         return;
1916 }
1917 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1918 static void scst_send_release(struct scst_device *dev)
1919 {
1920         struct scsi_device *scsi_dev;
1921         unsigned char cdb[6];
1922         uint8_t sense[SCSI_SENSE_BUFFERSIZE];
1923         int rc, i;
1924
1925         TRACE_ENTRY();
1926
1927         if (dev->scsi_dev == NULL)
1928                 goto out;
1929
1930         scsi_dev = dev->scsi_dev;
1931
1932         for (i = 0; i < 5; i++) {
1933                 memset(cdb, 0, sizeof(cdb));
1934                 cdb[0] = RELEASE;
1935                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1936                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1937
1938                 memset(sense, 0, sizeof(sense));
1939
1940                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1941                         "SCSI mid-level");
1942                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1943                                 sense, 15, 0, 0
1944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
1945                                 , NULL
1946 #endif
1947                                 );
1948                 TRACE_DBG("MODE_SENSE done: %x", rc);
1949
1950                 if (scsi_status_is_good(rc)) {
1951                         break;
1952                 } else {
1953                         PRINT_ERROR("RELEASE failed: %d", rc);
1954                         PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
1955                         scst_check_internal_sense(dev, rc, sense,
1956                                 sizeof(sense));
1957                 }
1958         }
1959
1960 out:
1961         TRACE_EXIT();
1962         return;
1963 }
1964 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1965
1966 /* scst_mutex supposed to be held */
1967 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1968 {
1969         struct scst_device *dev = tgt_dev->dev;
1970         int release = 0;
1971
1972         TRACE_ENTRY();
1973
1974         spin_lock_bh(&dev->dev_lock);
1975         if (dev->dev_reserved &&
1976             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1977                 /* This is one who holds the reservation */
1978                 struct scst_tgt_dev *tgt_dev_tmp;
1979                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1980                                     dev_tgt_dev_list_entry) {
1981                         clear_bit(SCST_TGT_DEV_RESERVED,
1982                                     &tgt_dev_tmp->tgt_dev_flags);
1983                 }
1984                 dev->dev_reserved = 0;
1985                 release = 1;
1986         }
1987         spin_unlock_bh(&dev->dev_lock);
1988
1989         if (release)
1990                 scst_send_release(dev);
1991
1992         TRACE_EXIT();
1993         return;
1994 }
1995
1996 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1997         const char *initiator_name)
1998 {
1999         struct scst_session *sess;
2000         int i;
2001         int len;
2002         char *nm;
2003
2004         TRACE_ENTRY();
2005
2006 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2007         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
2008 #else
2009         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
2010 #endif
2011         if (sess == NULL) {
2012                 TRACE(TRACE_OUT_OF_MEM, "%s",
2013                       "Allocation of scst_session failed");
2014                 goto out;
2015         }
2016 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2017         memset(sess, 0, sizeof(*sess));
2018 #endif
2019
2020         sess->init_phase = SCST_SESS_IPH_INITING;
2021         sess->shut_phase = SCST_SESS_SPH_READY;
2022         atomic_set(&sess->refcnt, 0);
2023         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
2024                 struct list_head *sess_tgt_dev_list_head =
2025                          &sess->sess_tgt_dev_list_hash[i];
2026                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
2027         }
2028         spin_lock_init(&sess->sess_list_lock);
2029         INIT_LIST_HEAD(&sess->search_cmd_list);
2030         INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
2031         sess->tgt = tgt;
2032         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
2033         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
2034 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
2035         INIT_DELAYED_WORK(&sess->hw_pending_work,
2036                 (void (*)(struct work_struct *))scst_hw_pending_work_fn);
2037 #else
2038         INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
2039 #endif
2040
2041 #ifdef CONFIG_SCST_MEASURE_LATENCY
2042         spin_lock_init(&sess->meas_lock);
2043 #endif
2044
2045         len = strlen(initiator_name);
2046         nm = kmalloc(len + 1, gfp_mask);
2047         if (nm == NULL) {
2048                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
2049                 goto out_free;
2050         }
2051
2052         strcpy(nm, initiator_name);
2053         sess->initiator_name = nm;
2054
2055 out:
2056         TRACE_EXIT();
2057         return sess;
2058
2059 out_free:
2060         kmem_cache_free(scst_sess_cachep, sess);
2061         sess = NULL;
2062         goto out;
2063 }
2064
2065 void scst_free_session(struct scst_session *sess)
2066 {
2067         TRACE_ENTRY();
2068
2069         mutex_lock(&scst_mutex);
2070
2071         TRACE_DBG("Removing sess %p from the list", sess);
2072         list_del(&sess->sess_list_entry);
2073         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
2074         list_del(&sess->acg_sess_list_entry);
2075
2076         scst_sess_free_tgt_devs(sess);
2077
2078         wake_up_all(&sess->tgt->unreg_waitQ);
2079
2080         mutex_unlock(&scst_mutex);
2081
2082         kfree(sess->initiator_name);
2083         kmem_cache_free(scst_sess_cachep, sess);
2084
2085         TRACE_EXIT();
2086         return;
2087 }
2088
2089 void scst_free_session_callback(struct scst_session *sess)
2090 {
2091         struct completion *c;
2092
2093         TRACE_ENTRY();
2094
2095         TRACE_DBG("Freeing session %p", sess);
2096
2097         cancel_delayed_work_sync(&sess->hw_pending_work);
2098
2099         c = sess->shutdown_compl;
2100
2101         if (sess->unreg_done_fn) {
2102                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
2103                 sess->unreg_done_fn(sess);
2104                 TRACE_DBG("%s", "unreg_done_fn() returned");
2105         }
2106         scst_free_session(sess);
2107
2108         if (c)
2109                 complete_all(c);
2110
2111         TRACE_EXIT();
2112         return;
2113 }
2114
2115 void scst_sched_session_free(struct scst_session *sess)
2116 {
2117         unsigned long flags;
2118
2119         TRACE_ENTRY();
2120
2121         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
2122                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
2123                         "shut phase %lx", sess, sess->shut_phase);
2124                 sBUG();
2125         }
2126
2127         spin_lock_irqsave(&scst_mgmt_lock, flags);
2128         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
2129         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
2130         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
2131
2132         wake_up(&scst_mgmt_waitQ);
2133
2134         TRACE_EXIT();
2135         return;
2136 }
2137
2138 void scst_cmd_get(struct scst_cmd *cmd)
2139 {
2140         __scst_cmd_get(cmd);
2141 }
2142 EXPORT_SYMBOL(scst_cmd_get);
2143
2144 void scst_cmd_put(struct scst_cmd *cmd)
2145 {
2146         __scst_cmd_put(cmd);
2147 }
2148 EXPORT_SYMBOL(scst_cmd_put);
2149
2150 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
2151 {
2152         struct scst_cmd *cmd;
2153
2154         TRACE_ENTRY();
2155
2156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2157         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
2158 #else
2159         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
2160 #endif
2161         if (cmd == NULL) {
2162                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
2163                 goto out;
2164         }
2165 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2166         memset(cmd, 0, sizeof(*cmd));
2167 #endif
2168
2169         cmd->state = SCST_CMD_STATE_INIT_WAIT;
2170         cmd->start_time = jiffies;
2171         atomic_set(&cmd->cmd_ref, 1);
2172         cmd->cmd_lists = &scst_main_cmd_lists;
2173         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
2174         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2175         cmd->timeout = SCST_DEFAULT_TIMEOUT;
2176         cmd->retries = 0;
2177         cmd->data_len = -1;
2178         cmd->is_send_status = 1;
2179         cmd->resp_data_len = -1;
2180
2181         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2182         cmd->dbl_ua_orig_resp_data_len = -1;
2183
2184 out:
2185         TRACE_EXIT();
2186         return cmd;
2187 }
2188
2189 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2190 {
2191         scst_sess_put(cmd->sess);
2192
2193         /*
2194          * At this point tgt_dev can be dead, but the pointer remains non-NULL
2195          */
2196         if (likely(cmd->tgt_dev != NULL))
2197                 __scst_put();
2198
2199         scst_destroy_cmd(cmd);
2200         return;
2201 }
2202
2203 /* No locks supposed to be held */
2204 void scst_free_cmd(struct scst_cmd *cmd)
2205 {
2206         int destroy = 1;
2207
2208         TRACE_ENTRY();
2209
2210         TRACE_DBG("Freeing cmd %p (tag %llu)",
2211                   cmd, (long long unsigned int)cmd->tag);
2212
2213         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2214                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2215                         cmd, atomic_read(&scst_cmd_count));
2216         }
2217
2218         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2219                 cmd->dec_on_dev_needed);
2220
2221 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2222 #if defined(CONFIG_SCST_EXTRACHECKS)
2223         if (cmd->scsi_req) {
2224                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2225                         "scsi_req!");
2226                 scst_release_request(cmd);
2227         }
2228 #endif
2229 #endif
2230
2231         /*
2232          * Target driver can already free sg buffer before calling
2233          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2234          */
2235         if (!cmd->tgt_data_buf_alloced)
2236                 scst_check_restore_sg_buff(cmd);
2237
2238         if (cmd->tgtt->on_free_cmd != NULL) {
2239                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2240                 cmd->tgtt->on_free_cmd(cmd);
2241                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2242         }
2243
2244         if (likely(cmd->dev != NULL)) {
2245                 struct scst_dev_type *handler = cmd->dev->handler;
2246                 if (handler->on_free_cmd != NULL) {
2247                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2248                               handler->name, cmd);
2249                         handler->on_free_cmd(cmd);
2250                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
2251                                 handler->name);
2252                 }
2253         }
2254
2255         scst_release_space(cmd);
2256
2257         if (unlikely(cmd->sense != NULL)) {
2258                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2259                 mempool_free(cmd->sense, scst_sense_mempool);
2260                 cmd->sense = NULL;
2261         }
2262
2263         if (likely(cmd->tgt_dev != NULL)) {
2264 #ifdef CONFIG_SCST_EXTRACHECKS
2265                 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2266                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
2267                             "%d, target %s, LUN %lld, sn %ld, expected_sn %ld)",
2268                             cmd, cmd->cdb[0], cmd->tgtt->name,
2269                             (long long unsigned int)cmd->lun,
2270                             cmd->sn, cmd->tgt_dev->expected_sn);
2271                         scst_unblock_deferred(cmd->tgt_dev, cmd);
2272                 }
2273 #endif
2274
2275                 if (unlikely(cmd->out_of_sn)) {
2276                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2277                                 "destroy=%d", cmd,
2278                                 (long long unsigned int)cmd->tag,
2279                                 cmd->sn, destroy);
2280                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2281                                         &cmd->cmd_flags);
2282                 }
2283         }
2284
2285         if (likely(destroy))
2286                 scst_destroy_put_cmd(cmd);
2287
2288         TRACE_EXIT();
2289         return;
2290 }
2291
2292 /* No locks supposed to be held. */
2293 void scst_check_retries(struct scst_tgt *tgt)
2294 {
2295         int need_wake_up = 0;
2296
2297         TRACE_ENTRY();
2298
2299         /*
2300          * We don't worry about overflow of finished_cmds, because we check
2301          * only for its change.
2302          */
2303         atomic_inc(&tgt->finished_cmds);
2304         /* See comment in scst_queue_retry_cmd() */
2305         smp_mb__after_atomic_inc();
2306         if (unlikely(tgt->retry_cmds > 0)) {
2307                 struct scst_cmd *c, *tc;
2308                 unsigned long flags;
2309
2310                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2311                       tgt->retry_cmds);
2312
2313                 spin_lock_irqsave(&tgt->tgt_lock, flags);
2314                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2315                                 cmd_list_entry) {
2316                         tgt->retry_cmds--;
2317
2318                         TRACE_RETRY("Moving retry cmd %p to head of active "
2319                                 "cmd list (retry_cmds left %d)",
2320                                 c, tgt->retry_cmds);
2321                         spin_lock(&c->cmd_lists->cmd_list_lock);
2322                         list_move(&c->cmd_list_entry,
2323                                   &c->cmd_lists->active_cmd_list);
2324                         wake_up(&c->cmd_lists->cmd_list_waitQ);
2325                         spin_unlock(&c->cmd_lists->cmd_list_lock);
2326
2327                         need_wake_up++;
2328                         if (need_wake_up >= 2) /* "slow start" */
2329                                 break;
2330                 }
2331                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2332         }
2333
2334         TRACE_EXIT();
2335         return;
2336 }
2337
2338 void scst_tgt_retry_timer_fn(unsigned long arg)
2339 {
2340         struct scst_tgt *tgt = (struct scst_tgt *)arg;
2341         unsigned long flags;
2342
2343         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2344
2345         spin_lock_irqsave(&tgt->tgt_lock, flags);
2346         tgt->retry_timer_active = 0;
2347         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2348
2349         scst_check_retries(tgt);
2350
2351         TRACE_EXIT();
2352         return;
2353 }
2354
2355 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2356 {
2357         struct scst_mgmt_cmd *mcmd;
2358
2359         TRACE_ENTRY();
2360
2361         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2362         if (mcmd == NULL) {
2363                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2364                         "failed, some commands and their data could leak");
2365                 goto out;
2366         }
2367         memset(mcmd, 0, sizeof(*mcmd));
2368
2369 out:
2370         TRACE_EXIT();
2371         return mcmd;
2372 }
2373
2374 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2375 {
2376         unsigned long flags;
2377
2378         TRACE_ENTRY();
2379
2380         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2381         atomic_dec(&mcmd->sess->sess_cmd_count);
2382         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2383
2384         scst_sess_put(mcmd->sess);
2385
2386         if (mcmd->mcmd_tgt_dev != NULL)
2387                 __scst_put();
2388
2389         mempool_free(mcmd, scst_mgmt_mempool);
2390
2391         TRACE_EXIT();
2392         return;
2393 }
2394
2395 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2396 int scst_alloc_request(struct scst_cmd *cmd)
2397 {
2398         int res = 0;
2399         struct scsi_request *req;
2400         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2401
2402         TRACE_ENTRY();
2403
2404         /* cmd->dev->scsi_dev must be non-NULL here */
2405         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2406         if (req == NULL) {
2407                 TRACE(TRACE_OUT_OF_MEM, "%s",
2408                       "Allocation of scsi_request failed");
2409                 res = -ENOMEM;
2410                 goto out;
2411         }
2412
2413         cmd->scsi_req = req;
2414
2415         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2416         req->sr_cmd_len = cmd->cdb_len;
2417         req->sr_data_direction = cmd->data_direction;
2418         req->sr_use_sg = cmd->sg_cnt;
2419         req->sr_bufflen = cmd->bufflen;
2420         req->sr_buffer = cmd->sg;
2421         req->sr_request->rq_disk = cmd->dev->rq_disk;
2422         req->sr_sense_buffer[0] = 0;
2423
2424         cmd->scsi_req->upper_private_data = cmd;
2425
2426 out:
2427         TRACE_EXIT();
2428         return res;
2429 }
2430
2431 void scst_release_request(struct scst_cmd *cmd)
2432 {
2433         scsi_release_request(cmd->scsi_req);
2434         cmd->scsi_req = NULL;
2435 }
2436 #endif
2437
2438 static bool is_report_sg_limitation(void)
2439 {
2440 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2441         return (trace_flag & TRACE_OUT_OF_MEM) != 0;
2442 #else
2443         return false;
2444 #endif
2445 }
2446
2447 int scst_alloc_space(struct scst_cmd *cmd)
2448 {
2449         gfp_t gfp_mask;
2450         int res = -ENOMEM;
2451         int atomic = scst_cmd_atomic(cmd);
2452         int flags;
2453         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2454         static int ll;
2455
2456         TRACE_ENTRY();
2457
2458         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2459
2460         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2461         if (cmd->no_sgv)
2462                 flags |= SCST_POOL_ALLOC_NO_CACHED;
2463
2464         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2465                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2466         if (cmd->sg == NULL)
2467                 goto out;
2468
2469         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2470                 if ((ll < 10) || is_report_sg_limitation()) {
2471                         PRINT_INFO("Unable to complete command due to "
2472                                 "SG IO count limitation (requested %d, "
2473                                 "available %d, tgt lim %d)", cmd->sg_cnt,
2474                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2475                         ll++;
2476                 }
2477                 goto out_sg_free;
2478         }
2479
2480         if (cmd->data_direction != SCST_DATA_BIDI)
2481                 goto success;
2482
2483         cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2484                          flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2485                          &cmd->dev->dev_mem_lim, NULL);
2486         if (cmd->in_sg == NULL)
2487                 goto out_sg_free;
2488
2489         if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2490                 if ((ll < 10)  || is_report_sg_limitation()) {
2491                         PRINT_INFO("Unable to complete command due to "
2492                                 "SG IO count limitation (IN buffer, requested "
2493                                 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2494                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2495                         ll++;
2496                 }
2497                 goto out_in_sg_free;
2498         }
2499
2500 success:
2501         res = 0;
2502
2503 out:
2504         TRACE_EXIT();
2505         return res;
2506
2507 out_in_sg_free:
2508         sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2509         cmd->in_sgv = NULL;
2510         cmd->in_sg = NULL;
2511         cmd->in_sg_cnt = 0;
2512
2513 out_sg_free:
2514         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2515         cmd->sgv = NULL;
2516         cmd->sg = NULL;
2517         cmd->sg_cnt = 0;
2518         goto out;
2519 }
2520
2521 static void scst_release_space(struct scst_cmd *cmd)
2522 {
2523         TRACE_ENTRY();
2524
2525         if (cmd->sgv == NULL)
2526                 goto out;
2527
2528         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2529                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2530                 goto out;
2531         }
2532
2533         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2534         cmd->sgv = NULL;
2535         cmd->sg_cnt = 0;
2536         cmd->sg = NULL;
2537         cmd->bufflen = 0;
2538         cmd->data_len = 0;
2539
2540         if (cmd->in_sgv != NULL) {
2541                 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2542                 cmd->in_sgv = NULL;
2543                 cmd->in_sg_cnt = 0;
2544                 cmd->in_sg = NULL;
2545                 cmd->in_bufflen = 0;
2546         }
2547
2548 out:
2549         TRACE_EXIT();
2550         return;
2551 }
2552
2553 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2554
2555 /*
2556  * Can switch to the next dst_sg element, so, to copy to strictly only
2557  * one dst_sg element, it must be either last in the chain, or
2558  * copy_len == dst_sg->length.
2559  */
2560 static int __sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
2561                           size_t *pdst_offs, struct scatterlist *src_sg,
2562                           size_t copy_len,
2563                           enum km_type d_km_type, enum km_type s_km_type)
2564 {
2565         int res = 0;
2566         struct scatterlist *dst_sg;
2567         size_t src_len, dst_len, src_offs, dst_offs;
2568         struct page *src_page, *dst_page;
2569
2570         if (copy_len == 0)
2571                 copy_len = 0x7FFFFFFF; /* copy all */
2572
2573         dst_sg = *pdst_sg;
2574         dst_len = *pdst_len;
2575         dst_offs = *pdst_offs;
2576         dst_page = sg_page(dst_sg);
2577
2578         src_page = sg_page(src_sg);
2579         src_len = src_sg->length;
2580         src_offs = src_sg->offset;
2581
2582         do {
2583                 void *saddr, *daddr;
2584                 size_t n;
2585
2586                 saddr = kmap_atomic(src_page +
2587                                          (src_offs >> PAGE_SHIFT), s_km_type) +
2588                                     (src_offs & ~PAGE_MASK);
2589                 daddr = kmap_atomic(dst_page +
2590                                         (dst_offs >> PAGE_SHIFT), d_km_type) +
2591                                     (dst_offs & ~PAGE_MASK);
2592
2593                 if (((src_offs & ~PAGE_MASK) == 0) &&
2594                     ((dst_offs & ~PAGE_MASK) == 0) &&
2595                     (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
2596                     (copy_len >= PAGE_SIZE)) {
2597                         copy_page(daddr, saddr);
2598                         n = PAGE_SIZE;
2599                 } else {
2600                         n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
2601                                           PAGE_SIZE - (src_offs & ~PAGE_MASK));
2602                         n = min(n, src_len);
2603                         n = min(n, dst_len);
2604                         n = min_t(size_t, n, copy_len);
2605                         memcpy(daddr, saddr, n);
2606                 }
2607                 dst_offs += n;
2608                 src_offs += n;
2609
2610                 kunmap_atomic(saddr, s_km_type);
2611                 kunmap_atomic(daddr, d_km_type);
2612
2613                 res += n;
2614                 copy_len -= n;
2615                 if (copy_len == 0)
2616                         goto out;
2617
2618                 src_len -= n;
2619                 dst_len -= n;
2620                 if (dst_len == 0) {
2621                         dst_sg = sg_next(dst_sg);
2622                         if (dst_sg == NULL)
2623                                 goto out;
2624                         dst_page = sg_page(dst_sg);
2625                         dst_len = dst_sg->length;
2626                         dst_offs = dst_sg->offset;
2627                 }
2628         } while (src_len > 0);
2629
2630 out:
2631         *pdst_sg = dst_sg;
2632         *pdst_len = dst_len;
2633         *pdst_offs = dst_offs;
2634         return res;
2635 }
2636
2637 /**
2638  * sg_copy_elem - copy one SG element to another
2639  * @dst_sg:     destination SG element
2640  * @src_sg:     source SG element
2641  * @copy_len:   maximum amount of data to copy. If 0, then copy all.
2642  * @d_km_type:  kmap_atomic type for the destination SG
2643  * @s_km_type:  kmap_atomic type for the source SG
2644  *
2645  * Description:
2646  *    Data from the source SG element will be copied to the destination SG
2647  *    element. Returns number of bytes copied. Can switch to the next dst_sg
2648  *    element, so, to copy to strictly only one dst_sg element, it must be
2649  *    either last in the chain, or copy_len == dst_sg->length.
2650  */
2651 int sg_copy_elem(struct scatterlist *dst_sg, struct scatterlist *src_sg,
2652                  size_t copy_len, enum km_type d_km_type,
2653                  enum km_type s_km_type)
2654 {
2655         size_t dst_len = dst_sg->length, dst_offs = dst_sg->offset;
2656
2657         return __sg_copy_elem(&dst_sg, &dst_len, &dst_offs, src_sg,
2658                 copy_len, d_km_type, s_km_type);
2659 }
2660
2661
2662 /**
2663  * sg_copy - copy one SG vector to another
2664  * @dst_sg:     destination SG
2665  * @src_sg:     source SG
2666  * @copy_len:   maximum amount of data to copy. If 0, then copy all.
2667  * @d_km_type:  kmap_atomic type for the destination SG
2668  * @s_km_type:  kmap_atomic type for the source SG
2669  *
2670  * Description:
2671  *    Data from the source SG vector will be copied to the destination SG
2672  *    vector. End of the vectors will be determined by sg_next() returning
2673  *    NULL. Returns number of bytes copied.
2674  */
2675 int sg_copy(struct scatterlist *dst_sg,
2676             struct scatterlist *src_sg, size_t copy_len,
2677             enum km_type d_km_type, enum km_type s_km_type)
2678 {
2679         int res = 0;
2680         size_t dst_len, dst_offs;
2681
2682         if (copy_len == 0)
2683                 copy_len = 0x7FFFFFFF; /* copy all */
2684
2685         dst_len = dst_sg->length;
2686         dst_offs = dst_sg->offset;
2687
2688         do {
2689                 copy_len -= __sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
2690                                 src_sg, copy_len, d_km_type, s_km_type);
2691                 if ((copy_len == 0) || (dst_sg == NULL))
2692                         goto out;
2693
2694                 src_sg = sg_next(src_sg);
2695         } while (src_sg != NULL);
2696
2697 out:
2698         return res;
2699 }
2700 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
2701
2702 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2703 #include <linux/pfn.h>
2704
2705 struct blk_kern_sg_hdr {
2706         struct scatterlist *orig_sgp;
2707         union {
2708                 struct sg_table new_sg_table;
2709                 struct scatterlist *saved_sg;
2710         };
2711         bool tail_only;
2712 };
2713
2714 #define BLK_KERN_SG_HDR_ENTRIES (1 + (sizeof(struct blk_kern_sg_hdr) - 1) / \
2715                                  sizeof(struct scatterlist))
2716
2717 /**
2718  * blk_rq_unmap_kern_sg - "unmaps" data buffers in the request
2719  * @req:        request to unmap
2720  * @do_copy:    sets copy data between buffers, if needed, or not
2721  *
2722  * Description:
2723  *    It frees all additional buffers allocated for SG->BIO mapping.
2724  */
2725 void blk_rq_unmap_kern_sg(struct request *req, int do_copy)
2726 {
2727         struct blk_kern_sg_hdr *hdr = (struct blk_kern_sg_hdr *)req->end_io_data;
2728
2729         if (hdr == NULL)
2730                 goto out;
2731
2732         if (hdr->tail_only) {
2733                 /* Tail element only was copied */
2734                 struct scatterlist *saved_sg = hdr->saved_sg;
2735                 struct scatterlist *tail_sg = hdr->orig_sgp;
2736
2737                 if ((rq_data_dir(req) == READ) && do_copy)
2738                         sg_copy_elem(saved_sg, tail_sg, tail_sg->length,
2739                                 KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
2740
2741                 __free_pages(sg_page(tail_sg), get_order(tail_sg->length));
2742                 *tail_sg = *saved_sg;
2743                 kfree(hdr);
2744         } else {
2745                 /* The whole SG was copied */
2746                 struct sg_table new_sg_table = hdr->new_sg_table;
2747                 struct scatterlist *new_sgl = new_sg_table.sgl +
2748                                                 BLK_KERN_SG_HDR_ENTRIES;
2749                 struct scatterlist *orig_sgl = hdr->orig_sgp;
2750
2751                 if ((rq_data_dir(req) == READ) && do_copy)
2752                         sg_copy(orig_sgl, new_sgl, 0, KM_BIO_DST_IRQ,
2753                                 KM_BIO_SRC_IRQ);
2754
2755                 sg_free_table(&new_sg_table);
2756         }
2757
2758 out:
2759         return;
2760 }
2761
2762 static int blk_rq_handle_align_tail_only(struct request *rq,
2763                                          struct scatterlist *sg_to_copy,
2764                                          gfp_t gfp, gfp_t page_gfp)
2765 {
2766         int res = 0;
2767         struct scatterlist *tail_sg = sg_to_copy;
2768         struct scatterlist *saved_sg;
2769         struct blk_kern_sg_hdr *hdr;
2770         int saved_sg_nents;
2771         struct page *pg;
2772
2773         saved_sg_nents = 1 + BLK_KERN_SG_HDR_ENTRIES;
2774
2775         saved_sg = kmalloc(sizeof(*saved_sg) * saved_sg_nents, gfp);
2776         if (saved_sg == NULL)
2777                 goto out_nomem;
2778
2779         sg_init_table(saved_sg, saved_sg_nents);
2780
2781         hdr = (struct blk_kern_sg_hdr *)saved_sg;
2782         saved_sg += BLK_KERN_SG_HDR_ENTRIES;
2783         saved_sg_nents -= BLK_KERN_SG_HDR_ENTRIES;
2784
2785         hdr->tail_only = true;
2786         hdr->orig_sgp = tail_sg;
2787         hdr->saved_sg = saved_sg;
2788
2789         *saved_sg = *tail_sg;
2790
2791         pg = alloc_pages(page_gfp, get_order(tail_sg->length));
2792         if (pg == NULL)
2793                 goto err_free_saved_sg;
2794
2795         sg_assign_page(tail_sg, pg);
2796         tail_sg->offset = 0;
2797
2798         if (rq_data_dir(rq) == WRITE)
2799                 sg_copy_elem(tail_sg, saved_sg, saved_sg->length,
2800                                 KM_USER1, KM_USER0);
2801
2802         rq->end_io_data = hdr;
2803         rq->cmd_flags |= REQ_COPY_USER;
2804
2805 out:
2806         return res;
2807
2808 err_free_saved_sg:
2809         kfree(saved_sg);
2810
2811 out_nomem:
2812         res = -ENOMEM;
2813         goto out;
2814 }
2815
2816 static int blk_rq_handle_align(struct request *rq, struct scatterlist **psgl,
2817                                int *pnents, struct scatterlist *sgl_to_copy,
2818                                int nents_to_copy, gfp_t gfp, gfp_t page_gfp)
2819 {
2820         int res = 0, i;
2821         struct scatterlist *sgl = *psgl;
2822         int nents = *pnents;
2823         struct sg_table sg_table;
2824         struct scatterlist *sg;
2825         struct scatterlist *new_sgl;
2826         size_t len = 0, to_copy;
2827         int new_sgl_nents;
2828         struct blk_kern_sg_hdr *hdr;
2829
2830         if (sgl != sgl_to_copy) {
2831                 /* copy only the last element */
2832                 res = blk_rq_handle_align_tail_only(rq, sgl_to_copy,
2833                                 gfp, page_gfp);
2834                 if (res == 0)
2835                         goto out;
2836                 /* else go through */
2837         }
2838
2839         for_each_sg(sgl, sg, nents, i)
2840                 len += sg->length;
2841         to_copy = len;
2842
2843         new_sgl_nents = PFN_UP(len) + BLK_KERN_SG_HDR_ENTRIES;
2844
2845         res = sg_alloc_table(&sg_table, new_sgl_nents, gfp);
2846         if (res != 0)
2847                 goto out;
2848
2849         new_sgl = sg_table.sgl;
2850         hdr = (struct blk_kern_sg_hdr *)new_sgl;
2851         new_sgl += BLK_KERN_SG_HDR_ENTRIES;
2852         new_sgl_nents -= BLK_KERN_SG_HDR_ENTRIES;
2853
2854         hdr->tail_only = false;
2855         hdr->orig_sgp = sgl;
2856         hdr->new_sg_table = sg_table;
2857
2858         for_each_sg(new_sgl, sg, new_sgl_nents, i) {
2859                 struct page *pg;
2860
2861                 pg = alloc_page(page_gfp);
2862                 if (pg == NULL)
2863                         goto err_free_new_sgl;
2864
2865                 sg_assign_page(sg, pg);
2866                 sg->length = min_t(size_t, PAGE_SIZE, len);
2867
2868                 len -= PAGE_SIZE;
2869         }
2870
2871         if (rq_data_dir(rq) == WRITE) {
2872                 /*
2873                  * We need to limit amount of copied data to to_copy, because
2874                  * sgl might have the last element not marked as last in
2875                  * SG chaining.
2876                  */
2877                 sg_copy(new_sgl, sgl, to_copy, KM_USER0, KM_USER1);
2878         }
2879
2880         rq->end_io_data = hdr;
2881         rq->cmd_flags |= REQ_COPY_USER;
2882
2883         *psgl = new_sgl;
2884         *pnents = new_sgl_nents;
2885
2886 out:
2887         return res;
2888
2889 err_free_new_sgl:
2890         for_each_sg(new_sgl, sg, new_sgl_nents, i) {
2891                 struct page *pg = sg_page(sg);
2892                 if (pg == NULL)
2893                         break;
2894                 __free_page(pg);
2895         }
2896         sg_free_table(&sg_table);
2897
2898         res = -ENOMEM;
2899         goto out;
2900 }
2901
2902 static void bio_map_kern_endio(struct bio *bio, int err)
2903 {
2904         bio_put(bio);
2905 }
2906
2907 static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
2908         int nents, gfp_t gfp, struct scatterlist **sgl_to_copy,
2909         int *nents_to_copy)
2910 {
2911         int res;
2912         struct request_queue *q = rq->q;
2913         int rw = rq_data_dir(rq);
2914         int max_nr_vecs, i;
2915         size_t tot_len;
2916         bool need_new_bio;
2917         struct scatterlist *sg, *prev_sg = NULL;
2918         struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
2919
2920         *sgl_to_copy = NULL;
2921
2922         if (unlikely((sgl == 0) || (nents <= 0))) {
2923                 WARN_ON(1);
2924                 res = -EINVAL;
2925                 goto out;
2926         }
2927
2928         /*
2929          * Let's keep each bio allocation inside a single page to decrease
2930          * probability of failure.
2931          */
2932         max_nr_vecs =  min_t(size_t,
2933                 ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
2934                 BIO_MAX_PAGES);
2935
2936         need_new_bio = true;
2937         tot_len = 0;
2938         for_each_sg(sgl, sg, nents, i) {
2939                 struct page *page = sg_page(sg);
2940                 void *page_addr = page_address(page);
2941                 size_t len = sg->length, l;
2942                 size_t offset = sg->offset;
2943
2944                 tot_len += len;
2945                 prev_sg = sg;
2946
2947                 /*
2948                  * Each segment must be aligned on DMA boundary and
2949                  * not on stack. The last one may have unaligned
2950                  * length as long as the total length is aligned to
2951                  * DMA padding alignment.
2952                  */
2953                 if (i == nents - 1)
2954                         l = 0;
2955                 else
2956                         l = len;
2957                 if (((sg->offset | l) & queue_dma_alignment(q)) ||
2958                     (page_addr && object_is_on_stack(page_addr + sg->offset))) {
2959                         res = -EINVAL;
2960                         goto out_need_copy;
2961                 }
2962
2963                 while (len > 0) {
2964                         size_t bytes;
2965                         int rc;
2966
2967                         if (need_new_bio) {
2968                                 bio = bio_kmalloc(gfp, max_nr_vecs);
2969                                 if (bio == NULL) {
2970                                         res = -ENOMEM;
2971                                         goto out_free_bios;
2972                                 }
2973
2974                                 if (rw == WRITE)
2975                                         bio->bi_rw |= 1 << BIO_RW;
2976
2977                                 bio->bi_end_io = bio_map_kern_endio;
2978
2979                                 if (hbio == NULL)
2980                                         hbio = tbio = bio;
2981                                 else
2982                                         tbio = tbio->bi_next = bio;
2983                         }
2984
2985                         bytes = min_t(size_t, len, PAGE_SIZE - offset);
2986
2987                         rc = bio_add_pc_page(q, bio, page, bytes, offset);
2988                         if (rc < bytes) {
2989                                 if (unlikely(need_new_bio || (rc < 0))) {
2990                                         if (rc < 0)
2991                                                 res = rc;
2992                                         else
2993                                                 res = -EIO;
2994                                         goto out_need_copy;
2995                                 } else {
2996                                         need_new_bio = true;
2997                                         len -= rc;
2998                                         offset += rc;
2999                                         continue;
3000                                 }
3001                         }
3002
3003                         need_new_bio = false;
3004                         offset = 0;
3005                         len -= bytes;
3006                         page = nth_page(page, 1);
3007                 }
3008         }
3009
3010         if (hbio == NULL) {
3011                 res = -EINVAL;
3012                 goto out_free_bios;
3013         }
3014
3015         /* Total length must be aligned on DMA padding alignment */
3016         if ((tot_len & q->dma_pad_mask) &&
3017             !(rq->cmd_flags & REQ_COPY_USER)) {
3018                 res = -EINVAL;
3019                 if (sgl->offset == 0) {
3020                         *sgl_to_copy = prev_sg;
3021                         *nents_to_copy = 1;
3022                         goto out_free_bios;
3023                 } else
3024                         goto out_need_copy;
3025         }
3026
3027         while (hbio != NULL) {
3028                 bio = hbio;
3029                 hbio = hbio->bi_next;
3030                 bio->bi_next = NULL;
3031
3032                 blk_queue_bounce(q, &bio);
3033
3034                 res = blk_rq_append_bio(q, rq, bio);
3035                 if (unlikely(res != 0)) {
3036                         bio->bi_next = hbio;
3037                         hbio = bio;
3038                         goto out_free_bios;
3039                 }
3040         }
3041
3042         rq->buffer = rq->data = NULL;
3043
3044 out:
3045         return res;
3046
3047 out_need_copy:
3048         *sgl_to_copy = sgl;
3049         *nents_to_copy = nents;
3050
3051 out_free_bios:
3052         while (hbio != NULL) {
3053                 bio = hbio;
3054                 hbio = hbio->bi_next;
3055                 bio_put(bio);
3056         }
3057         goto out;
3058 }
3059
3060 /**
3061  * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
3062  * @rq:         request to fill
3063  * @sgl:        area to map
3064  * @nents:      number of elements in @sgl
3065  * @gfp:        memory allocation flags
3066  *
3067  * Description:
3068  *    Data will be mapped directly if possible. Otherwise a bounce
3069  *    buffer will be used.
3070  */
3071 int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3072                        int nents, gfp_t gfp)
3073 {
3074         int res;
3075         struct scatterlist *sg_to_copy = NULL;
3076         int nents_to_copy = 0;
3077
3078         if (unlikely((sgl == 0) || (sgl->length == 0) ||
3079                      (nents <= 0) || (rq->end_io_data != NULL))) {
3080                 WARN_ON(1);
3081                 res = -EINVAL;
3082                 goto out;
3083         }
3084
3085         res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3086                                 &nents_to_copy);
3087         if (unlikely(res != 0)) {
3088                 if (sg_to_copy == NULL)
3089                         goto out;
3090
3091                 res = blk_rq_handle_align(rq, &sgl, &nents, sg_to_copy,
3092                                 nents_to_copy, gfp, rq->q->bounce_gfp | gfp);
3093                 if (unlikely(res != 0))
3094                         goto out;
3095
3096                 res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3097                                                 &nents_to_copy);
3098                 if (res != 0) {
3099                         blk_rq_unmap_kern_sg(rq, 0);
3100                         goto out;
3101                 }
3102         }
3103
3104         rq->buffer = rq->data = NULL;
3105
3106 out:
3107         return res;
3108 }
3109
3110 struct scsi_io_context {
3111         void *blk_data;
3112         void *data;
3113         void (*done)(void *data, char *sense, int result, int resid);
3114         char sense[SCSI_SENSE_BUFFERSIZE];
3115 };
3116
3117 static void scsi_end_async(struct request *req, int error)
3118 {
3119         struct scsi_io_context *sioc = req->end_io_data;
3120
3121         req->end_io_data = sioc->blk_data;
3122         blk_rq_unmap_kern_sg(req, (error == 0));
3123
3124         if (sioc->done)
3125                 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
3126
3127         kfree(sioc);
3128         __blk_put_request(req->q, req);
3129 }
3130
3131 /**
3132  * scsi_execute_async - insert request
3133  * @sdev:       scsi device
3134  * @cmd:        scsi command
3135  * @cmd_len:    length of scsi cdb
3136  * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
3137  * @sgl:        data buffer scatterlist
3138  * @nents:      number of elements in the sgl
3139  * @timeout:    request timeout in seconds
3140  * @retries:    number of times to retry request
3141  * @privdata:   data passed to done()
3142  * @done:       callback function when done
3143  * @gfp:        memory allocation flags
3144  * @flags:      one or more SCSI_ASYNC_EXEC_FLAG_* flags
3145  */
3146 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
3147                        int cmd_len, int data_direction, struct scatterlist *sgl,
3148                        int nents, int timeout, int retries, void *privdata,
3149                        void (*done)(void *, char *, int, int), gfp_t gfp,
3150                        int flags)
3151 {
3152         struct request *req;
3153         struct scsi_io_context *sioc;
3154         int err = 0;
3155         int write = (data_direction == DMA_TO_DEVICE);
3156
3157         sioc = kzalloc(sizeof(*sioc), gfp);
3158         if (sioc == NULL)
3159                 return DRIVER_ERROR << 24;
3160
3161         req = blk_get_request(sdev->request_queue, write, gfp);
3162         if (req == NULL)
3163                 goto free_sense;
3164         req->cmd_type = REQ_TYPE_BLOCK_PC;
3165         req->cmd_flags |= REQ_QUIET;
3166
3167         if (flags & SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING)
3168                 req->cmd_flags |= REQ_COPY_USER;
3169
3170         if (sgl != NULL) {
3171                 err = blk_rq_map_kern_sg(req, sgl, nents, gfp);
3172                 if (err)
3173                         goto free_req;
3174         }
3175
3176         sioc->blk_data = req->end_io_data;
3177         sioc->data = privdata;
3178         sioc->done = done;
3179
3180         req->cmd_len = cmd_len;
3181         memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
3182         memcpy(req->cmd, cmd, req->cmd_len);
3183         req->sense = sioc->sense;
3184         req->sense_len = 0;
3185         req->timeout = timeout;
3186         req->retries = retries;
3187         req->end_io_data = sioc;
3188
3189         blk_execute_rq_nowait(req->q, NULL, req,
3190                 flags & SCSI_ASYNC_EXEC_FLAG_AT_HEAD, scsi_end_async);
3191         return 0;
3192
3193 free_req:
3194         blk_put_request(req);
3195
3196 free_sense:
3197         kfree(sioc);
3198         return DRIVER_ERROR << 24;
3199 }
3200 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
3201
3202 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
3203 {
3204         struct scatterlist *src_sg, *dst_sg;
3205         unsigned int to_copy;
3206         int atomic = scst_cmd_atomic(cmd);
3207
3208         TRACE_ENTRY();
3209
3210         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
3211                 if (cmd->data_direction != SCST_DATA_BIDI) {
3212                         src_sg = cmd->tgt_sg;
3213                         dst_sg = cmd->sg;
3214                         to_copy = cmd->bufflen;
3215                 } else {
3216                         TRACE_MEM("BIDI cmd %p", cmd);
3217                         src_sg = cmd->tgt_in_sg;
3218                         dst_sg = cmd->in_sg;
3219                         to_copy = cmd->in_bufflen;
3220                 }
3221         } else {
3222                 src_sg = cmd->sg;
3223                 dst_sg = cmd->tgt_sg;
3224                 to_copy = cmd->resp_data_len;
3225         }
3226
3227         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, "
3228                 "to_copy %d", cmd, copy_dir, src_sg, dst_sg, to_copy);
3229
3230         if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
3231                 /*
3232                  * It can happened, e.g., with scst_user for cmd with delay
3233                  * alloc, which failed with Check Condition.
3234                  */
3235                 goto out;
3236         }
3237
3238         sg_copy(dst_sg, src_sg, to_copy, atomic ? KM_SOFTIRQ0 : KM_USER0,
3239                                          atomic ? KM_SOFTIRQ1 : KM_USER1);
3240
3241 out:
3242         TRACE_EXIT();
3243         return;
3244 }
3245
3246 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
3247
3248 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
3249 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
3250
3251 int scst_get_cdb_len(const uint8_t *cdb)
3252 {
3253         return SCST_GET_CDB_LEN(cdb[0]);
3254 }
3255
3256 /* get_trans_len_x extract x bytes from cdb as length starting from off */
3257
3258 static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
3259 {
3260         cmd->cdb_len = 10;
3261         cmd->bufflen = 0;
3262         return 0;
3263 }
3264
3265 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
3266 {
3267         cmd->bufflen = 6;
3268         return 0;
3269 }
3270
3271 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
3272 {
3273         cmd->bufflen = READ_CAP_LEN;
3274         return 0;
3275 }
3276
3277 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
3278 {
3279         int res = 0;
3280
3281         TRACE_ENTRY();
3282
3283         if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
3284                 cmd->op_name = "READ CAPACITY(16)";
3285                 cmd->bufflen = READ_CAP16_LEN;
3286                 cmd->op_flags |= SCST_IMPLICIT_HQ;
3287         } else
3288                 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
3289
3290         TRACE_EXIT_RES(res);
3291         return res;
3292 }
3293
3294 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
3295 {
3296         cmd->bufflen = 1;
3297         return 0;
3298 }
3299
3300 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
3301 {
3302         uint8_t *p = (uint8_t *)cmd->cdb + off;
3303         int res = 0;
3304
3305         cmd->bufflen = 0;
3306         cmd->bufflen |= ((u32)p[0]) << 8;
3307         cmd->bufflen |= ((u32)p[1]);
3308
3309         switch (cmd->cdb[1] & 0x1f) {
3310         case 0:
3311         case 1:
3312         case 6:
3313                 if (cmd->bufflen != 0) {
3314                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
3315                                 "allocation length for service action %x",
3316                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
3317                         goto out_inval;
3318                 }
3319                 break;
3320         }
3321
3322         switch (cmd->cdb[1] & 0x1f) {
3323         case 0:
3324         case 1:
3325                 cmd->bufflen = 20;
3326                 break;
3327         case 6:
3328                 cmd->bufflen = 32;
3329                 break;
3330         case 8:
3331                 cmd->bufflen = max(28, cmd->bufflen);
3332                 break;
3333         default:
3334                 PRINT_ERROR("READ POSITION: Invalid service action %x",
3335                         cmd->cdb[1] & 0x1f);
3336                 goto out_inval;
3337         }
3338
3339 out:
3340         return res;
3341
3342 out_inval:
3343         scst_set_cmd_error(cmd,
3344                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
3345         res = 1;
3346         goto out;
3347 }
3348
3349 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
3350 {
3351         cmd->bufflen = (u32)cmd->cdb[off];
3352         return 0;
3353 }
3354
3355 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
3356 {
3357         cmd->bufflen = (u32)cmd->cdb[off];
3358         if (cmd->bufflen == 0)
3359                 cmd->bufflen = 256;
3360         return 0;
3361 }
3362
3363 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
3364 {
3365         const uint8_t *p = cmd->cdb + off;
3366
3367         cmd->bufflen = 0;
3368         cmd->bufflen |= ((u32)p[0]) << 8;
3369         cmd->bufflen |= ((u32)p[1]);
3370
3371         return 0;
3372 }
3373
3374 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
3375 {
3376         const uint8_t *p = cmd->cdb + off;
3377
3378         cmd->bufflen = 0;
3379         cmd->bufflen |= ((u32)p[0]) << 16;
3380         cmd->bufflen |= ((u32)p[1]) << 8;
3381         cmd->bufflen |= ((u32)p[2]);
3382
3383         return 0;
3384 }
3385
3386 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
3387 {
3388         const uint8_t *p = cmd->cdb + off;
3389
3390         cmd->bufflen = 0;
3391         cmd->bufflen |= ((u32)p[0]) << 24;
3392         cmd->bufflen |= ((u32)p[1]) << 16;
3393         cmd->bufflen |= ((u32)p[2]) << 8;
3394         cmd->bufflen |= ((u32)p[3]);
3395
3396         return 0;
3397 }
3398
3399 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
3400 {
3401         cmd->bufflen = 0;
3402         return 0;
3403 }
3404
3405 int scst_get_cdb_info(struct scst_cmd *cmd)
3406 {
3407         int dev_type = cmd->dev->type;
3408         int i, res = 0;
3409         uint8_t op;
3410         const struct scst_sdbops *ptr = NULL;
3411
3412         TRACE_ENTRY();
3413
3414         op = cmd->cdb[0];       /* get clear opcode */
3415
3416         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
3417                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
3418                 dev_type);
3419
3420         i = scst_scsi_op_list[op];
3421         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
3422                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
3423                         ptr = &scst_scsi_op_table[i];
3424                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
3425                               ptr->ops, ptr->devkey[0], /* disk     */
3426                               ptr->devkey[1],   /* tape     */
3427                               ptr->devkey[2],   /* printer */
3428                               ptr->devkey[3],   /* cpu      */
3429                               ptr->devkey[4],   /* cdr      */
3430                               ptr->devkey[5],   /* cdrom    */
3431                               ptr->devkey[6],   /* scanner */
3432                               ptr->devkey[7],   /* worm     */
3433                               ptr->devkey[8],   /* changer */
3434                               ptr->devkey[9],   /* commdev */
3435                               ptr->op_name);
3436                         TRACE_DBG("direction=%d flags=%d off=%d",
3437                               ptr->direction,
3438                               ptr->flags,
3439                               ptr->off);
3440                         break;
3441                 }
3442                 i++;
3443         }
3444
3445         if (unlikely(ptr == NULL)) {
3446                 /* opcode not found or now not used !!! */
3447                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
3448                       dev_type);
3449                 res = -1;
3450                 cmd->op_flags = SCST_INFO_NOT_FOUND;
3451                 goto out;
3452         }
3453
3454         cmd->cdb_len = SCST_GET_CDB_LEN(op);
3455         cmd->op_name = ptr->op_name;
3456         cmd->data_direction = ptr->direction;
3457         cmd->op_flags = ptr->flags;
3458         res = (*ptr->get_trans_len)(cmd, ptr->off);
3459
3460 out:
3461         TRACE_EXIT_RES(res);
3462         return res;
3463 }
3464 EXPORT_SYMBOL(scst_get_cdb_info);
3465
3466 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
3467 uint64_t scst_pack_lun(const uint64_t lun)
3468 {
3469         uint64_t res;
3470         uint16_t *p = (uint16_t *)&res;
3471
3472         res = lun;
3473         *p = cpu_to_be16(*p);
3474
3475         TRACE_EXIT_HRES((unsigned long)res);
3476         return res;
3477 }
3478
3479 /*
3480  * Routine to extract a lun number from an 8-byte LUN structure
3481  * in network byte order (BE).
3482  * (see SAM-2, Section 4.12.3 page 40)
3483  * Supports 2 types of lun unpacking: peripheral and logical unit.
3484  */
3485 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
3486 {
3487         uint64_t res = NO_SUCH_LUN;
3488         int address_method;
3489
3490         TRACE_ENTRY();
3491
3492         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
3493
3494         if (unlikely(len < 2)) {
3495                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
3496                         "more", len);
3497                 goto out;
3498         }
3499
3500         if (len > 2) {
3501                 switch (len) {
3502                 case 8:
3503                         if ((*((uint64_t *)lun) &
3504                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
3505                                 goto out_err;
3506                         break;
3507                 case 4:
3508                         if (*((uint16_t *)&lun[2]) != 0)
3509                                 goto out_err;
3510                         break;
3511                 case 6:
3512                         if (*((uint32_t *)&lun[2]) != 0)
3513                                 goto out_err;
3514                         break;
3515                 default:
3516                         goto out_err;
3517                 }
3518         }
3519
3520         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
3521         switch (address_method) {
3522         case 0: /* peripheral device addressing method */
3523 #if 0
3524                 if (*lun) {
3525                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
3526                              "peripheral device addressing method 0x%02x, "
3527                              "expected 0", *lun);
3528                         break;
3529                 }
3530                 res = *(lun + 1);
3531                 break;
3532 #else
3533                 /*
3534                  * Looks like it's legal to use it as flat space addressing
3535                  * method as well
3536                  */
3537
3538                 /* go through */
3539 #endif
3540
3541         case 1: /* flat space addressing method */
3542                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
3543                 break;
3544
3545         case 2: /* logical unit addressing method */
3546                 if (*lun & 0x3f) {
3547                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
3548                                     "addressing method 0x%02x, expected 0",
3549                                     *lun & 0x3f);
3550                         break;
3551                 }
3552                 if (*(lun + 1) & 0xe0) {
3553                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
3554                                     "addressing method 0x%02x, expected 0",
3555                                     (*(lun + 1) & 0xf8) >> 5);
3556                         break;
3557                 }
3558                 res = *(lun + 1) & 0x1f;
3559                 break;
3560
3561         case 3: /* extended logical unit addressing method */
3562         default:
3563                 PRINT_ERROR("Unimplemented LUN addressing method %u",
3564                             address_method);
3565                 break;
3566         }
3567
3568 out:
3569         TRACE_EXIT_RES((int)res);
3570         return res;
3571
3572 out_err:
3573         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
3574         goto out;
3575 }
3576
3577 int scst_calc_block_shift(int sector_size)
3578 {
3579         int block_shift = 0;
3580         int t;
3581
3582         if (sector_size == 0)
3583                 sector_size = 512;
3584
3585         t = sector_size;
3586         while (1) {
3587                 if ((t & 1) != 0)
3588                         break;
3589                 t >>= 1;
3590                 block_shift++;
3591         }
3592         if (block_shift < 9) {
3593                 PRINT_ERROR("Wrong sector size %d", sector_size);
3594                 block_shift = -1;
3595         }
3596
3597         TRACE_EXIT_RES(block_shift);
3598         return block_shift;
3599 }
3600 EXPORT_SYMBOL(scst_calc_block_shift);
3601
3602 int scst_sbc_generic_parse(struct scst_cmd *cmd,
3603         int (*get_block_shift)(struct scst_cmd *cmd))
3604 {
3605         int res = 0;
3606
3607         TRACE_ENTRY();
3608
3609         /*
3610          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3611          * therefore change them only if necessary
3612          */
3613
3614         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3615               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3616
3617         switch (cmd->cdb[0]) {
3618         case VERIFY_6:
3619         case VERIFY:
3620         case VERIFY_12:
3621         case VERIFY_16:
3622                 if ((cmd->cdb[1] & BYTCHK) == 0) {
3623                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3624                         cmd->bufflen = 0;
3625                         goto set_timeout;
3626                 } else
3627                         cmd->data_len = 0;
3628                 break;
3629         default:
3630                 /* It's all good */
3631                 break;
3632         }
3633
3634         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
3635                 /*
3636                  * No need for locks here, since *_detach() can not be
3637                  * called, when there are existing commands.
3638                  */
3639                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3640         }
3641
3642 set_timeout:
3643         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3644                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
3645         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3646                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
3647         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3648                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
3649
3650         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
3651               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
3652
3653         TRACE_EXIT_RES(res);
3654         return res;
3655 }
3656 EXPORT_SYMBOL(scst_sbc_generic_parse);
3657
3658 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
3659         int (*get_block_shift)(struct scst_cmd *cmd))
3660 {
3661         int res = 0;
3662
3663         TRACE_ENTRY();
3664
3665         /*
3666          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3667          * therefore change them only if necessary
3668          */
3669
3670         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3671               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3672
3673         cmd->cdb[1] &= 0x1f;
3674
3675         switch (cmd->cdb[0]) {
3676         case VERIFY_6:
3677         case VERIFY:
3678         case VERIFY_12:
3679         case VERIFY_16:
3680                 if ((cmd->cdb[1] & BYTCHK) == 0) {
3681                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3682                         cmd->bufflen = 0;
3683                         goto set_timeout;
3684                 }
3685                 break;
3686         default:
3687                 /* It's all good */
3688                 break;
3689         }
3690
3691         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
3692                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3693
3694 set_timeout:
3695         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3696                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
3697         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3698                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
3699         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3700                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
3701
3702         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
3703                 cmd->data_direction);
3704
3705         TRACE_EXIT();
3706         return res;
3707 }
3708 EXPORT_SYMBOL(scst_cdrom_generic_parse);
3709
3710 int scst_modisk_generic_parse(struct scst_cmd *cmd,
3711         int (*get_block_shift)(struct scst_cmd *cmd))
3712 {
3713         int res = 0;
3714
3715         TRACE_ENTRY();
3716
3717         /*
3718          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3719          * therefore change them only if necessary
3720          */
3721
3722         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3723               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3724
3725         cmd->cdb[1] &= 0x1f;
3726
3727         switch (cmd->cdb[0]) {
3728         case VERIFY_6:
3729         case VERIFY:
3730         case VERIFY_12:
3731         case VERIFY_16:
3732                 if ((cmd->cdb[1] & BYTCHK) == 0) {
3733                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3734                         cmd->bufflen = 0;
3735                         goto set_timeout;
3736                 }
3737                 break;
3738         default:
3739                 /* It's all good */
3740                 break;
3741         }
3742
3743         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
3744                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3745
3746 set_timeout:
3747         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3748                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
3749         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3750                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
3751         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3752                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
3753
3754         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
3755                 cmd->data_direction);
3756
3757         TRACE_EXIT_RES(res);
3758         return res;
3759 }
3760 EXPORT_SYMBOL(scst_modisk_generic_parse);
3761
3762 int scst_tape_generic_parse(struct scst_cmd *cmd,
3763         int (*get_block_size)(struct scst_cmd *cmd))
3764 {
3765         int res = 0;
3766
3767         TRACE_ENTRY();
3768
3769         /*
3770          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3771          * therefore change them only if necessary
3772          */
3773
3774         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3775               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3776
3777         if (cmd->cdb[0] == READ_POSITION) {
3778                 int tclp = cmd->cdb[1] & 4;
3779                 int long_bit = cmd->cdb[1] & 2;
3780                 int bt = cmd->cdb[1] & 1;
3781
3782                 if ((tclp == long_bit) && (!bt || !long_bit)) {
3783                         cmd->bufflen =
3784                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
3785                         cmd->data_direction = SCST_DATA_READ;
3786                 } else {
3787                         cmd->bufflen = 0;
3788                         cmd->data_direction = SCST_DATA_NONE;
3789                 }
3790         }
3791
3792         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
3793                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
3794
3795         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3796                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
3797         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3798                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
3799         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3800                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
3801
3802         TRACE_EXIT_RES(res);
3803         return res;
3804 }
3805 EXPORT_SYMBOL(scst_tape_generic_parse);
3806
3807 static int scst_null_parse(struct scst_cmd *cmd)
3808 {
3809         int res = 0;
3810
3811         TRACE_ENTRY();
3812
3813         /*
3814          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3815          * therefore change them only if necessary
3816          */
3817
3818         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3819               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3820 #if 0
3821         switch (cmd->cdb[0]) {
3822         default:
3823                 /* It's all good */
3824                 break;
3825         }
3826 #endif