The patch below fixes the following recently introduced checkpatch complaints:
[mirror/scst/.git] / scst / src / scst_lib.c
1 /*
2  *  scst_lib.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #include "scst_cdbprobe.h"
36
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39         uint8_t *sense, int sense_len);
40 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
41         int flags);
42 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
43         const uint8_t *sense, int sense_len, int flags);
44 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
45         const uint8_t *sense, int sense_len, int flags);
46 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
47 static void scst_release_space(struct scst_cmd *cmd);
48 static void scst_sess_free_tgt_devs(struct scst_session *sess);
49 static void scst_unblock_cmds(struct scst_device *dev);
50
51 #ifdef CONFIG_SCST_DEBUG_TM
52 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
53         struct scst_acg_dev *acg_dev);
54 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
55 #else
56 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
57         struct scst_acg_dev *acg_dev) {}
58 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
59 #endif /* CONFIG_SCST_DEBUG_TM */
60
61 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
62 {
63         int res = 0;
64         gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
65
66         TRACE_ENTRY();
67
68         if (cmd->sense != NULL)
69                 goto memzero;
70
71         cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
72         if (cmd->sense == NULL) {
73                 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
74                         "The sense data will be lost!!", cmd->cdb[0]);
75                 res = -ENOMEM;
76                 goto out;
77         }
78
79 memzero:
80         memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
81
82 out:
83         TRACE_EXIT_RES(res);
84         return res;
85 }
86 EXPORT_SYMBOL(scst_alloc_sense);
87
88 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
89         const uint8_t *sense, unsigned int len)
90 {
91         int res;
92
93         TRACE_ENTRY();
94
95         res = scst_alloc_sense(cmd, atomic);
96         if (res != 0) {
97                 PRINT_BUFFER("Lost sense", sense, len);
98                 goto out;
99         }
100
101         memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
102         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
103
104 out:
105         TRACE_EXIT_RES(res);
106         return res;
107 }
108 EXPORT_SYMBOL(scst_alloc_set_sense);
109
110 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
111 {
112         TRACE_ENTRY();
113
114         cmd->status = status;
115         cmd->host_status = DID_OK;
116
117         cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
118         cmd->dbl_ua_orig_data_direction = cmd->data_direction;
119
120         cmd->data_direction = SCST_DATA_NONE;
121         cmd->resp_data_len = 0;
122         cmd->is_send_status = 1;
123
124         cmd->completed = 1;
125
126         TRACE_EXIT();
127         return;
128 }
129 EXPORT_SYMBOL(scst_set_cmd_error_status);
130
131 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
132 {
133         int rc;
134
135         TRACE_ENTRY();
136
137         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
138
139         rc = scst_alloc_sense(cmd, 1);
140         if (rc != 0) {
141                 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
142                         key, asc, ascq);
143                 goto out;
144         }
145
146         scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE,
147                 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
148         TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
149
150 out:
151         TRACE_EXIT();
152         return;
153 }
154 EXPORT_SYMBOL(scst_set_cmd_error);
155
156 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
157         int key, int asc, int ascq)
158 {
159         sBUG_ON(len < SCST_STANDARD_SENSE_LEN);
160
161         memset(buffer, 0, len);
162
163         if (d_sense) {
164                 /* Descriptor format */
165                 buffer[0] = 0x72;       /* Response Code                */
166                 buffer[1] = key;        /* Sense Key                    */
167                 buffer[2] = asc;        /* ASC                          */
168                 buffer[3] = ascq;       /* ASCQ                         */
169         } else {
170                 /* Fixed format */
171                 buffer[0] = 0x70;       /* Response Code                */
172                 buffer[2] = key;        /* Sense Key                    */
173                 buffer[7] = 0x0a;       /* Additional Sense Length      */
174                 buffer[12] = asc;       /* ASC                          */
175                 buffer[13] = ascq;      /* ASCQ                         */
176         }
177
178         TRACE_BUFFER("Sense set", buffer, len);
179         return;
180 }
181 EXPORT_SYMBOL(scst_set_sense);
182
183 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
184         int key, int asc, int ascq)
185 {
186         bool res = false;
187
188         if (len < 14)
189                 goto out;
190
191         /* Response Code */
192         if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
193                 /* Fixed format */
194
195                 /* Sense Key */
196                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
197                         goto out;
198
199                 /* ASC */
200                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
201                         goto out;
202
203                 /* ASCQ */
204                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
205                         goto out;
206         } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
207                 /* Descriptor format */
208
209                 /* Sense Key */
210                 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
211                         goto out;
212
213                 /* ASC */
214                 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
215                         goto out;
216
217                 /* ASCQ */
218                 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
219                         goto out;
220         } else
221                 goto out;
222
223         res = true;
224
225 out:
226         TRACE_EXIT_RES((int)res);
227         return res;
228 }
229 EXPORT_SYMBOL(scst_analyze_sense);
230
231 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
232         unsigned int len)
233 {
234         TRACE_ENTRY();
235
236         scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
237         scst_alloc_set_sense(cmd, 1, sense, len);
238
239         TRACE_EXIT();
240         return;
241 }
242
243 void scst_set_busy(struct scst_cmd *cmd)
244 {
245         int c = atomic_read(&cmd->sess->sess_cmd_count);
246
247         TRACE_ENTRY();
248
249         if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
250                 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
251                 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
252                         "(cmds count %d, queue_type %x, sess->init_phase %d)",
253                         cmd->sess->initiator_name, c,
254                         cmd->queue_type, cmd->sess->init_phase);
255         } else {
256                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
257                 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
258                         "initiator %s (cmds count %d, queue_type %x, "
259                         "sess->init_phase %d)", cmd->sess->initiator_name, c,
260                         cmd->queue_type, cmd->sess->init_phase);
261         }
262
263         TRACE_EXIT();
264         return;
265 }
266 EXPORT_SYMBOL(scst_set_busy);
267
268 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
269 {
270         int i;
271
272         TRACE_ENTRY();
273
274         TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
275                 asc, ascq);
276
277         /* Protect sess_tgt_dev_list_hash */
278         mutex_lock(&scst_mutex);
279
280         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
281                 struct list_head *sess_tgt_dev_list_head =
282                         &sess->sess_tgt_dev_list_hash[i];
283                 struct scst_tgt_dev *tgt_dev;
284
285                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
286                                 sess_tgt_dev_list_entry) {
287                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
288                         if (!list_empty(&tgt_dev->UA_list)) {
289                                 struct scst_tgt_dev_UA *ua;
290
291                                 ua = list_entry(tgt_dev->UA_list.next,
292                                         typeof(*ua), UA_list_entry);
293                                 if (scst_analyze_sense(ua->UA_sense_buffer,
294                                                 sizeof(ua->UA_sense_buffer),
295                                                 SCST_SENSE_ALL_VALID,
296                                                 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
297                                         scst_set_sense(ua->UA_sense_buffer,
298                                                 sizeof(ua->UA_sense_buffer),
299                                                 tgt_dev->dev->d_sense,
300                                                 key, asc, ascq);
301                                 } else
302                                         PRINT_ERROR("%s",
303                                                 "The first UA isn't RESET UA");
304                         } else
305                                 PRINT_ERROR("%s", "There's no RESET UA to "
306                                         "replace");
307                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
308                 }
309         }
310
311         mutex_unlock(&scst_mutex);
312
313         TRACE_EXIT();
314         return;
315 }
316 EXPORT_SYMBOL(scst_set_initial_UA);
317
318 static struct scst_aen *scst_alloc_aen(struct scst_tgt_dev *tgt_dev)
319 {
320         struct scst_aen *aen;
321
322         TRACE_ENTRY();
323
324         aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
325         if (aen == NULL) {
326                 PRINT_ERROR("AEN memory allocation failed. Corresponding "
327                         "event notification will not be performed (initiator "
328                         "%s)", tgt_dev->sess->initiator_name);
329                 goto out;
330         }
331         memset(aen, 0, sizeof(*aen));
332
333         aen->sess = tgt_dev->sess;
334         scst_sess_get(aen->sess);
335
336         aen->lun = scst_pack_lun(tgt_dev->lun);
337
338 out:
339         TRACE_EXIT_HRES((unsigned long)aen);
340         return aen;
341 };
342
343 static void scst_free_aen(struct scst_aen *aen)
344 {
345         TRACE_ENTRY();
346
347         scst_sess_put(aen->sess);
348         mempool_free(aen, scst_aen_mempool);
349
350         TRACE_EXIT();
351         return;
352 };
353
354 /* No locks */
355 void scst_capacity_data_changed(struct scst_device *dev)
356 {
357         struct scst_tgt_dev *tgt_dev;
358         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
359
360         TRACE_ENTRY();
361
362         if (dev->type != TYPE_DISK) {
363                 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
364                         "CHANGED UA", dev->type);
365                 goto out;
366         }
367
368         TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
369
370         mutex_lock(&scst_mutex);
371
372         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
373                             dev_tgt_dev_list_entry) {
374                 struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
375
376                 if (tgtt->report_aen != NULL) {
377                         struct scst_aen *aen;
378                         int rc;
379
380                         aen = scst_alloc_aen(tgt_dev);
381                         if (aen == NULL)
382                                 goto queue_ua;
383
384                         aen->event_fn = SCST_AEN_SCSI;
385                         aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
386                         scst_set_sense(aen->aen_sense, aen->aen_sense_len,
387                                 tgt_dev->dev->d_sense,
388                                 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
389
390                         TRACE_DBG("Calling target's %s report_aen(%p)",
391                                 tgtt->name, aen);
392                         rc = tgtt->report_aen(aen);
393                         TRACE_DBG("Target's %s report_aen(%p) returned %d",
394                                 tgtt->name, aen, rc);
395                         if (rc == SCST_AEN_RES_SUCCESS)
396                                 continue;
397
398                         scst_free_aen(aen);
399                 }
400 queue_ua:
401                 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED UA (tgt_dev %p)",
402                         tgt_dev);
403                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
404                         tgt_dev->dev->d_sense,
405                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
406                 scst_check_set_UA(tgt_dev, sense_buffer,
407                         sizeof(sense_buffer), 0);
408         }
409
410         mutex_unlock(&scst_mutex);
411
412 out:
413         TRACE_EXIT();
414         return;
415 }
416 EXPORT_SYMBOL(scst_capacity_data_changed);
417
418 static inline bool scst_is_report_luns_changed_type(int type)
419 {
420         switch (type) {
421         case TYPE_DISK:
422         case TYPE_TAPE:
423         case TYPE_PRINTER:
424         case TYPE_PROCESSOR:
425         case TYPE_WORM:
426         case TYPE_ROM:
427         case TYPE_SCANNER:
428         case TYPE_MOD:
429         case TYPE_MEDIUM_CHANGER:
430         case TYPE_RAID:
431         case TYPE_ENCLOSURE:
432                 return true;
433         default:
434                 return false;
435         }
436 }
437
438 /* scst_mutex supposed to be held */
439 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
440                                               int flags)
441 {
442         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
443         struct list_head *shead;
444         struct scst_tgt_dev *tgt_dev;
445         int i;
446
447         TRACE_ENTRY();
448
449         TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
450                 "(sess %p)", sess);
451
452         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
453                 shead = &sess->sess_tgt_dev_list_hash[i];
454
455                 list_for_each_entry(tgt_dev, shead,
456                                 sess_tgt_dev_list_entry) {
457                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
458                 }
459         }
460
461         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
462                 shead = &sess->sess_tgt_dev_list_hash[i];
463
464                 list_for_each_entry(tgt_dev, shead,
465                                 sess_tgt_dev_list_entry) {
466                         if (!scst_is_report_luns_changed_type(
467                                         tgt_dev->dev->type))
468                                 continue;
469
470                         scst_set_sense(sense_buffer, sizeof(sense_buffer),
471                                 tgt_dev->dev->d_sense,
472                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
473
474                         __scst_check_set_UA(tgt_dev, sense_buffer,
475                                 sizeof(sense_buffer),
476                                 flags | SCST_SET_UA_FLAG_GLOBAL);
477                 }
478         }
479
480         for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
481                 shead = &sess->sess_tgt_dev_list_hash[i];
482
483                 list_for_each_entry_reverse(tgt_dev,
484                                 shead, sess_tgt_dev_list_entry) {
485                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
486                 }
487         }
488
489         TRACE_EXIT();
490         return;
491 }
492
493 /* The activity supposed to be suspended and scst_mutex held */
494 void scst_report_luns_changed(struct scst_acg *acg)
495 {
496         struct scst_session *sess;
497
498         TRACE_ENTRY();
499
500         TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
501
502         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
503                 int i;
504                 struct list_head *shead;
505                 struct scst_tgt_dev *tgt_dev;
506                 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
507
508                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
509                         shead = &sess->sess_tgt_dev_list_hash[i];
510
511                         list_for_each_entry(tgt_dev, shead,
512                                         sess_tgt_dev_list_entry) {
513                                 if (scst_is_report_luns_changed_type(
514                                                 tgt_dev->dev->type))
515                                         goto found;
516                         }
517                 }
518                 TRACE_MGMT_DBG("Not found a device capable REPORTED "
519                         "LUNS DATA CHANGED UA (sess %p)", sess);
520                 continue;
521 found:
522                 if (tgtt->report_aen != NULL) {
523                         struct scst_aen *aen;
524                         int rc;
525
526                         aen = scst_alloc_aen(tgt_dev);
527                         if (aen == NULL)
528                                 goto queue_ua;
529
530                         aen->event_fn = SCST_AEN_SCSI;
531                         aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
532                         scst_set_sense(aen->aen_sense, aen->aen_sense_len,
533                                 tgt_dev->dev->d_sense,
534                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
535
536                         TRACE_DBG("Calling target's %s report_aen(%p)",
537                                 tgtt->name, aen);
538                         rc = tgtt->report_aen(aen);
539                         TRACE_DBG("Target's %s report_aen(%p) returned %d",
540                                 tgtt->name, aen, rc);
541                         if (rc == SCST_AEN_RES_SUCCESS)
542                                 continue;
543
544                         scst_free_aen(aen);
545                 }
546
547 queue_ua:
548                 scst_queue_report_luns_changed_UA(sess, 0);
549         }
550
551         TRACE_EXIT();
552         return;
553 }
554
555 void scst_aen_done(struct scst_aen *aen)
556 {
557         TRACE_ENTRY();
558
559         TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
560                 aen->event_fn, aen->sess->initiator_name);
561
562         if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
563                 goto out_free;
564
565         if (aen->event_fn != SCST_AEN_SCSI)
566                 goto out_free;
567
568         TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
569                 aen->sess->initiator_name);
570
571         if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
572                         SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
573                                 scst_sense_reported_luns_data_changed))) {
574                 mutex_lock(&scst_mutex);
575                 scst_queue_report_luns_changed_UA(aen->sess,
576                         SCST_SET_UA_FLAG_AT_HEAD);
577                 mutex_unlock(&scst_mutex);
578         } else if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
579                         SCST_SENSE_ALL_VALID,
580                         SCST_LOAD_SENSE(scst_sense_capacity_data_changed))) {
581                 /* tgt_dev might get dead, so we need to reseek it */
582                 struct list_head *shead;
583                 struct scst_tgt_dev *tgt_dev;
584                 uint64_t lun;
585
586                 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
587
588                 mutex_lock(&scst_mutex);
589
590                 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
591                 list_for_each_entry(tgt_dev, shead,
592                                 sess_tgt_dev_list_entry) {
593                         if (tgt_dev->lun == lun) {
594                                 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED "
595                                         "UA (tgt_dev %p)", tgt_dev);
596                                 scst_check_set_UA(tgt_dev, aen->aen_sense,
597                                         aen->aen_sense_len,
598                                         SCST_SET_UA_FLAG_AT_HEAD);
599                                 break;
600                         }
601                 }
602
603                 mutex_unlock(&scst_mutex);
604         } else
605                 PRINT_ERROR("%s", "Unknown SCSI AEN");
606
607 out_free:
608         scst_free_aen(aen);
609
610         TRACE_EXIT();
611         return;
612 }
613 EXPORT_SYMBOL(scst_aen_done);
614
615 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
616 {
617         int res;
618
619         TRACE_ENTRY();
620
621         switch (cmd->state) {
622         case SCST_CMD_STATE_INIT_WAIT:
623         case SCST_CMD_STATE_INIT:
624         case SCST_CMD_STATE_PRE_PARSE:
625         case SCST_CMD_STATE_DEV_PARSE:
626         case SCST_CMD_STATE_DEV_DONE:
627                 if (cmd->internal)
628                         res = SCST_CMD_STATE_FINISHED_INTERNAL;
629                 else
630                         res = SCST_CMD_STATE_PRE_XMIT_RESP;
631                 break;
632
633         case SCST_CMD_STATE_PRE_DEV_DONE:
634         case SCST_CMD_STATE_MODE_SELECT_CHECKS:
635                 res = SCST_CMD_STATE_DEV_DONE;
636                 break;
637
638         case SCST_CMD_STATE_PRE_XMIT_RESP:
639                 res = SCST_CMD_STATE_XMIT_RESP;
640                 break;
641
642         case SCST_CMD_STATE_PREPROCESS_DONE:
643         case SCST_CMD_STATE_PREPARE_SPACE:
644         case SCST_CMD_STATE_RDY_TO_XFER:
645         case SCST_CMD_STATE_DATA_WAIT:
646         case SCST_CMD_STATE_TGT_PRE_EXEC:
647         case SCST_CMD_STATE_SEND_FOR_EXEC:
648         case SCST_CMD_STATE_LOCAL_EXEC:
649         case SCST_CMD_STATE_REAL_EXEC:
650         case SCST_CMD_STATE_REAL_EXECUTING:
651                 res = SCST_CMD_STATE_PRE_DEV_DONE;
652                 break;
653
654         default:
655                 sBUG();
656         }
657
658         TRACE_EXIT_RES(res);
659         return res;
660 }
661 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
662
663 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
664 {
665         TRACE_ENTRY();
666
667 #ifdef CONFIG_SCST_EXTRACHECKS
668         switch (cmd->state) {
669         case SCST_CMD_STATE_PRE_XMIT_RESP:
670         case SCST_CMD_STATE_XMIT_RESP:
671         case SCST_CMD_STATE_FINISHED:
672         case SCST_CMD_STATE_FINISHED_INTERNAL:
673         case SCST_CMD_STATE_XMIT_WAIT:
674                 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
675                         cmd->state, cmd, cmd->cdb[0]);
676                 sBUG();
677         }
678 #endif
679
680         cmd->state = scst_get_cmd_abnormal_done_state(cmd);
681
682         EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
683                            (cmd->tgt_dev == NULL));
684
685         TRACE_EXIT();
686         return;
687 }
688 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
689
690 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
691 {
692         int i, l;
693
694         TRACE_ENTRY();
695
696         scst_check_restore_sg_buff(cmd);
697         cmd->resp_data_len = resp_data_len;
698
699         if (resp_data_len == cmd->bufflen)
700                 goto out;
701
702         l = 0;
703         for (i = 0; i < cmd->sg_cnt; i++) {
704                 l += cmd->sg[i].length;
705                 if (l >= resp_data_len) {
706                         int left = resp_data_len - (l - cmd->sg[i].length);
707 #ifdef CONFIG_SCST_DEBUG
708                         TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
709                                 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
710                                 "left %d",
711                                 cmd, (long long unsigned int)cmd->tag,
712                                 resp_data_len, i,
713                                 cmd->sg[i].length, left);
714 #endif
715                         cmd->orig_sg_cnt = cmd->sg_cnt;
716                         cmd->orig_sg_entry = i;
717                         cmd->orig_entry_len = cmd->sg[i].length;
718                         cmd->sg_cnt = (left > 0) ? i+1 : i;
719                         cmd->sg[i].length = left;
720                         cmd->sg_buff_modified = 1;
721                         break;
722                 }
723         }
724
725 out:
726         TRACE_EXIT();
727         return;
728 }
729 EXPORT_SYMBOL(scst_set_resp_data_len);
730
731 /* Called under scst_mutex and suspended activity */
732 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
733 {
734         struct scst_device *dev;
735         int res = 0;
736         static int dev_num; /* protected by scst_mutex */
737
738         TRACE_ENTRY();
739
740         dev = kzalloc(sizeof(*dev), gfp_mask);
741         if (dev == NULL) {
742                 TRACE(TRACE_OUT_OF_MEM, "%s",
743                         "Allocation of scst_device failed");
744                 res = -ENOMEM;
745                 goto out;
746         }
747
748         dev->handler = &scst_null_devtype;
749         dev->p_cmd_lists = &scst_main_cmd_lists;
750         atomic_set(&dev->dev_cmd_count, 0);
751         atomic_set(&dev->write_cmd_count, 0);
752         scst_init_mem_lim(&dev->dev_mem_lim);
753         spin_lock_init(&dev->dev_lock);
754         atomic_set(&dev->on_dev_count, 0);
755         INIT_LIST_HEAD(&dev->blocked_cmd_list);
756         INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
757         INIT_LIST_HEAD(&dev->dev_acg_dev_list);
758         INIT_LIST_HEAD(&dev->threads_list);
759         init_waitqueue_head(&dev->on_dev_waitQ);
760         dev->dev_double_ua_possible = 1;
761         dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
762         dev->dev_num = dev_num++;
763
764 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
765 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
766         dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
767         if (dev->dev_io_ctx == NULL) {
768                 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
769                 res = -ENOMEM;
770                 kfree(dev);
771                 goto out;
772         }
773 #endif
774 #endif
775
776         *out_dev = dev;
777
778 out:
779         TRACE_EXIT_RES(res);
780         return res;
781 }
782
783 /* Called under scst_mutex and suspended activity */
784 void scst_free_device(struct scst_device *dev)
785 {
786         TRACE_ENTRY();
787
788 #ifdef CONFIG_SCST_EXTRACHECKS
789         if (!list_empty(&dev->dev_tgt_dev_list) ||
790             !list_empty(&dev->dev_acg_dev_list)) {
791                 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
792                         "is not empty!", __func__);
793                 sBUG();
794         }
795 #endif
796
797 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
798 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
799         __exit_io_context(dev->dev_io_ctx);
800 #endif
801 #endif
802
803         kfree(dev);
804
805         TRACE_EXIT();
806         return;
807 }
808
809 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
810 {
811         atomic_set(&mem_lim->alloced_pages, 0);
812         mem_lim->max_allowed_pages =
813                 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
814 }
815 EXPORT_SYMBOL(scst_init_mem_lim);
816
817 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
818                                         struct scst_device *dev, uint64_t lun)
819 {
820         struct scst_acg_dev *res;
821
822         TRACE_ENTRY();
823
824 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
825         res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
826 #else
827         res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
828 #endif
829         if (res == NULL) {
830                 TRACE(TRACE_OUT_OF_MEM,
831                       "%s", "Allocation of scst_acg_dev failed");
832                 goto out;
833         }
834 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
835         memset(res, 0, sizeof(*res));
836 #endif
837
838         res->dev = dev;
839         res->acg = acg;
840         res->lun = lun;
841
842 out:
843         TRACE_EXIT_HRES(res);
844         return res;
845 }
846
847 /* The activity supposed to be suspended and scst_mutex held */
848 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
849 {
850         TRACE_ENTRY();
851
852         TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
853                 acg_dev);
854         list_del(&acg_dev->acg_dev_list_entry);
855         list_del(&acg_dev->dev_acg_dev_list_entry);
856
857         kmem_cache_free(scst_acgd_cachep, acg_dev);
858
859         TRACE_EXIT();
860         return;
861 }
862
863 /* The activity supposed to be suspended and scst_mutex held */
864 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
865 {
866         struct scst_acg *acg;
867
868         TRACE_ENTRY();
869
870         acg = kzalloc(sizeof(*acg), GFP_KERNEL);
871         if (acg == NULL) {
872                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
873                 goto out;
874         }
875
876         INIT_LIST_HEAD(&acg->acg_dev_list);
877         INIT_LIST_HEAD(&acg->acg_sess_list);
878         INIT_LIST_HEAD(&acg->acn_list);
879         acg->acg_name = acg_name;
880
881         TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
882         list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
883
884 out:
885         TRACE_EXIT_HRES(acg);
886         return acg;
887 }
888
889 /* The activity supposed to be suspended and scst_mutex held */
890 int scst_destroy_acg(struct scst_acg *acg)
891 {
892         struct scst_acn *n, *nn;
893         struct scst_acg_dev *acg_dev, *acg_dev_tmp;
894         int res = 0;
895
896         TRACE_ENTRY();
897
898         if (!list_empty(&acg->acg_sess_list)) {
899                 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
900                 res = -EBUSY;
901                 goto out;
902         }
903
904         TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
905         list_del(&acg->scst_acg_list_entry);
906
907         /* Freeing acg_devs */
908         list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
909                         acg_dev_list_entry) {
910                 struct scst_tgt_dev *tgt_dev, *tt;
911                 list_for_each_entry_safe(tgt_dev, tt,
912                                  &acg_dev->dev->dev_tgt_dev_list,
913                                  dev_tgt_dev_list_entry) {
914                         if (tgt_dev->acg_dev == acg_dev)
915                                 scst_free_tgt_dev(tgt_dev);
916                 }
917                 scst_free_acg_dev(acg_dev);
918         }
919
920         /* Freeing names */
921         list_for_each_entry_safe(n, nn, &acg->acn_list,
922                         acn_list_entry) {
923                 list_del(&n->acn_list_entry);
924                 kfree(n->name);
925                 kfree(n);
926         }
927         INIT_LIST_HEAD(&acg->acn_list);
928
929         kfree(acg);
930 out:
931         TRACE_EXIT_RES(res);
932         return res;
933 }
934
935 /*
936  * scst_mutex supposed to be held, there must not be parallel activity in this
937  * session.
938  */
939 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
940         struct scst_acg_dev *acg_dev)
941 {
942         int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
943         struct scst_tgt_dev *tgt_dev;
944         struct scst_device *dev = acg_dev->dev;
945         struct list_head *sess_tgt_dev_list_head;
946         struct scst_tgt_template *vtt = sess->tgt->tgtt;
947         int rc, i;
948         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
949
950         TRACE_ENTRY();
951
952 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
953         tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
954 #else
955         tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
956 #endif
957         if (tgt_dev == NULL) {
958                 TRACE(TRACE_OUT_OF_MEM, "%s",
959                       "Allocation of scst_tgt_dev failed");
960                 goto out;
961         }
962 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
963         memset(tgt_dev, 0, sizeof(*tgt_dev));
964 #endif
965
966         tgt_dev->dev = dev;
967         tgt_dev->lun = acg_dev->lun;
968         tgt_dev->acg_dev = acg_dev;
969         tgt_dev->sess = sess;
970         atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
971
972         scst_sgv_pool_use_norm(tgt_dev);
973
974         if (dev->scsi_dev != NULL) {
975                 ini_sg = dev->scsi_dev->host->sg_tablesize;
976                 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
977                 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
978                                 ENABLE_CLUSTERING);
979         } else {
980                 ini_sg = (1 << 15) /* infinite */;
981                 ini_unchecked_isa_dma = 0;
982                 ini_use_clustering = 0;
983         }
984         tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
985
986         if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
987             !sess->tgt->tgtt->no_clustering)
988                 scst_sgv_pool_use_norm_clust(tgt_dev);
989
990         if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
991                 scst_sgv_pool_use_dma(tgt_dev);
992
993         if (dev->scsi_dev != NULL) {
994                 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
995                       "SCST lun=%lld", dev->scsi_dev->host->host_no,
996                       dev->scsi_dev->channel, dev->scsi_dev->id,
997                       dev->scsi_dev->lun,
998                       (long long unsigned int)tgt_dev->lun);
999         } else {
1000                 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1001                                dev->virt_name,
1002                                (long long unsigned int)tgt_dev->lun);
1003         }
1004
1005         spin_lock_init(&tgt_dev->tgt_dev_lock);
1006         INIT_LIST_HEAD(&tgt_dev->UA_list);
1007         spin_lock_init(&tgt_dev->thr_data_lock);
1008         INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1009         spin_lock_init(&tgt_dev->sn_lock);
1010         INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1011         INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1012         tgt_dev->expected_sn = 1;
1013         tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1014         tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1015         for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1016                 atomic_set(&tgt_dev->sn_slots[i], 0);
1017
1018         if (dev->handler->parse_atomic &&
1019             (sess->tgt->tgtt->preprocessing_done == NULL)) {
1020                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1021                         __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1022                                 &tgt_dev->tgt_dev_flags);
1023                 if (dev->handler->exec_atomic)
1024                         __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1025                                 &tgt_dev->tgt_dev_flags);
1026         }
1027         if (dev->handler->exec_atomic) {
1028                 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1029                         __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1030                                 &tgt_dev->tgt_dev_flags);
1031                 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1032                                 &tgt_dev->tgt_dev_flags);
1033                 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1034                         &tgt_dev->tgt_dev_flags);
1035         }
1036         if (dev->handler->dev_done_atomic &&
1037             sess->tgt->tgtt->xmit_response_atomic) {
1038                 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1039                         &tgt_dev->tgt_dev_flags);
1040         }
1041
1042         scst_set_sense(sense_buffer, sizeof(sense_buffer),
1043                 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1044         scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1045
1046         tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1047
1048 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1049 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1050         tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1051         if (tgt_dev->tgt_dev_io_ctx == NULL) {
1052                 TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO context "
1053                         "for dev %s (initiator %s)", dev->virt_name,
1054                         sess->initiator_name);
1055                 goto out_free;
1056         }
1057 #endif
1058 #endif
1059
1060         if (vtt->threads_num > 0) {
1061                 rc = 0;
1062                 if (dev->handler->threads_num > 0)
1063                         rc = scst_add_dev_threads(dev, vtt->threads_num);
1064                 else if (dev->handler->threads_num == 0)
1065                         rc = scst_add_global_threads(vtt->threads_num);
1066                 if (rc != 0)
1067                         goto out_free;
1068         }
1069
1070         if (dev->handler && dev->handler->attach_tgt) {
1071                 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1072                       tgt_dev);
1073                 rc = dev->handler->attach_tgt(tgt_dev);
1074                 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1075                 if (rc != 0) {
1076                         PRINT_ERROR("Device handler's %s attach_tgt() "
1077                             "failed: %d", dev->handler->name, rc);
1078                         goto out_thr_free;
1079                 }
1080         }
1081
1082         spin_lock_bh(&dev->dev_lock);
1083         list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1084         if (dev->dev_reserved)
1085                 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1086         spin_unlock_bh(&dev->dev_lock);
1087
1088         sess_tgt_dev_list_head =
1089                 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1090         list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1091                       sess_tgt_dev_list_head);
1092
1093 out:
1094         TRACE_EXIT();
1095         return tgt_dev;
1096
1097 out_thr_free:
1098         if (vtt->threads_num > 0) {
1099                 if (dev->handler->threads_num > 0)
1100                         scst_del_dev_threads(dev, vtt->threads_num);
1101                 else if (dev->handler->threads_num == 0)
1102                         scst_del_global_threads(vtt->threads_num);
1103         }
1104
1105 out_free:
1106         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1107
1108         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1109         tgt_dev = NULL;
1110         goto out;
1111 }
1112
1113 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
1114
1115 /* No locks supposed to be held, scst_mutex - held */
1116 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1117 {
1118         TRACE_ENTRY();
1119
1120         scst_clear_reservation(tgt_dev);
1121
1122         /* With activity suspended the lock isn't needed, but let's be safe */
1123         spin_lock_bh(&tgt_dev->tgt_dev_lock);
1124         scst_free_all_UA(tgt_dev);
1125         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1126
1127         if (queue_UA) {
1128                 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1129                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1130                         tgt_dev->dev->d_sense,
1131                         SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1132                 scst_check_set_UA(tgt_dev, sense_buffer,
1133                         sizeof(sense_buffer), 0);
1134         }
1135
1136         TRACE_EXIT();
1137         return;
1138 }
1139
1140 /*
1141  * scst_mutex supposed to be held, there must not be parallel activity in this
1142  * session.
1143  */
1144 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1145 {
1146         struct scst_device *dev = tgt_dev->dev;
1147         struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1148
1149         TRACE_ENTRY();
1150
1151         tm_dbg_deinit_tgt_dev(tgt_dev);
1152
1153         spin_lock_bh(&dev->dev_lock);
1154         list_del(&tgt_dev->dev_tgt_dev_list_entry);
1155         spin_unlock_bh(&dev->dev_lock);
1156
1157         list_del(&tgt_dev->sess_tgt_dev_list_entry);
1158
1159         scst_clear_reservation(tgt_dev);
1160         scst_free_all_UA(tgt_dev);
1161
1162         if (dev->handler && dev->handler->detach_tgt) {
1163                 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1164                       tgt_dev);
1165                 dev->handler->detach_tgt(tgt_dev);
1166                 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1167         }
1168
1169         if (vtt->threads_num > 0) {
1170                 if (dev->handler->threads_num > 0)
1171                         scst_del_dev_threads(dev, vtt->threads_num);
1172                 else if (dev->handler->threads_num == 0)
1173                         scst_del_global_threads(vtt->threads_num);
1174         }
1175
1176 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1177 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1178         __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1179 #endif
1180 #endif
1181
1182         kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1183
1184         TRACE_EXIT();
1185         return;
1186 }
1187
1188 /* scst_mutex supposed to be held */
1189 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1190 {
1191         int res = 0;
1192         struct scst_acg_dev *acg_dev;
1193         struct scst_tgt_dev *tgt_dev;
1194
1195         TRACE_ENTRY();
1196
1197         list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1198                         acg_dev_list_entry) {
1199                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1200                 if (tgt_dev == NULL) {
1201                         res = -ENOMEM;
1202                         goto out_free;
1203                 }
1204         }
1205
1206 out:
1207         TRACE_EXIT();
1208         return res;
1209
1210 out_free:
1211         scst_sess_free_tgt_devs(sess);
1212         goto out;
1213 }
1214
1215 /*
1216  * scst_mutex supposed to be held, there must not be parallel activity in this
1217  * session.
1218  */
1219 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1220 {
1221         int i;
1222         struct scst_tgt_dev *tgt_dev, *t;
1223
1224         TRACE_ENTRY();
1225
1226         /* The session is going down, no users, so no locks */
1227         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1228                 struct list_head *sess_tgt_dev_list_head =
1229                         &sess->sess_tgt_dev_list_hash[i];
1230                 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1231                                 sess_tgt_dev_list_entry) {
1232                         scst_free_tgt_dev(tgt_dev);
1233                 }
1234                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1235         }
1236
1237         TRACE_EXIT();
1238         return;
1239 }
1240
1241 /* The activity supposed to be suspended and scst_mutex held */
1242 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1243                      uint64_t lun, int read_only)
1244 {
1245         int res = 0;
1246         struct scst_acg_dev *acg_dev;
1247         struct scst_tgt_dev *tgt_dev;
1248         struct scst_session *sess;
1249         LIST_HEAD(tmp_tgt_dev_list);
1250
1251         TRACE_ENTRY();
1252
1253         INIT_LIST_HEAD(&tmp_tgt_dev_list);
1254
1255 #ifdef CONFIG_SCST_EXTRACHECKS
1256         list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
1257                 if (acg_dev->dev == dev) {
1258                         PRINT_ERROR("Device is already in group %s",
1259                                 acg->acg_name);
1260                         res = -EINVAL;
1261                         goto out;
1262                 }
1263         }
1264 #endif
1265
1266         acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1267         if (acg_dev == NULL) {
1268                 res = -ENOMEM;
1269                 goto out;
1270         }
1271         acg_dev->rd_only_flag = read_only;
1272
1273         TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1274                 acg_dev);
1275         list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1276         list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1277
1278         list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1279                 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1280                 if (tgt_dev == NULL) {
1281                         res = -ENOMEM;
1282                         goto out_free;
1283                 }
1284                 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1285                               &tmp_tgt_dev_list);
1286         }
1287
1288         scst_report_luns_changed(acg);
1289
1290         if (dev->virt_name != NULL) {
1291                 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1292                         "rd_only %d)", dev->virt_name, acg->acg_name,
1293                         (long long unsigned int)lun,
1294                         read_only);
1295         } else {
1296                 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1297                         "%lld, rd_only %d)",
1298                         dev->scsi_dev->host->host_no,
1299                         dev->scsi_dev->channel, dev->scsi_dev->id,
1300                         dev->scsi_dev->lun, acg->acg_name,
1301                         (long long unsigned int)lun,
1302                         read_only);
1303         }
1304
1305 out:
1306         TRACE_EXIT_RES(res);
1307         return res;
1308
1309 out_free:
1310         list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1311                          extra_tgt_dev_list_entry) {
1312                 scst_free_tgt_dev(tgt_dev);
1313         }
1314         scst_free_acg_dev(acg_dev);
1315         goto out;
1316 }
1317
1318 /* The activity supposed to be suspended and scst_mutex held */
1319 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
1320 {
1321         int res = 0;
1322         struct scst_acg_dev *acg_dev = NULL, *a;
1323         struct scst_tgt_dev *tgt_dev, *tt;
1324
1325         TRACE_ENTRY();
1326
1327         list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1328                 if (a->dev == dev) {
1329                         acg_dev = a;
1330                         break;
1331                 }
1332         }
1333
1334         if (acg_dev == NULL) {
1335                 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1336                 res = -EINVAL;
1337                 goto out;
1338         }
1339
1340         list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1341                          dev_tgt_dev_list_entry) {
1342                 if (tgt_dev->acg_dev == acg_dev)
1343                         scst_free_tgt_dev(tgt_dev);
1344         }
1345         scst_free_acg_dev(acg_dev);
1346
1347         scst_report_luns_changed(acg);
1348
1349         if (dev->virt_name != NULL) {
1350                 PRINT_INFO("Removed device %s from group %s",
1351                         dev->virt_name, acg->acg_name);
1352         } else {
1353                 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1354                         dev->scsi_dev->host->host_no,
1355                         dev->scsi_dev->channel, dev->scsi_dev->id,
1356                         dev->scsi_dev->lun, acg->acg_name);
1357         }
1358
1359 out:
1360         TRACE_EXIT_RES(res);
1361         return res;
1362 }
1363
1364 /* scst_mutex supposed to be held */
1365 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1366 {
1367         int res = 0;
1368         struct scst_acn *n;
1369         int len;
1370         char *nm;
1371
1372         TRACE_ENTRY();
1373
1374         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1375         {
1376                 if (strcmp(n->name, name) == 0) {
1377                         PRINT_ERROR("Name %s already exists in group %s",
1378                                 name, acg->acg_name);
1379                         res = -EINVAL;
1380                         goto out;
1381                 }
1382         }
1383
1384         n = kmalloc(sizeof(*n), GFP_KERNEL);
1385         if (n == NULL) {
1386                 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1387                 res = -ENOMEM;
1388                 goto out;
1389         }
1390
1391         len = strlen(name);
1392         nm = kmalloc(len + 1, GFP_KERNEL);
1393         if (nm == NULL) {
1394                 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1395                 res = -ENOMEM;
1396                 goto out_free;
1397         }
1398
1399         strcpy(nm, name);
1400         n->name = nm;
1401
1402         list_add_tail(&n->acn_list_entry, &acg->acn_list);
1403
1404 out:
1405         if (res == 0)
1406                 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1407
1408         TRACE_EXIT_RES(res);
1409         return res;
1410
1411 out_free:
1412         kfree(n);
1413         goto out;
1414 }
1415
1416 /* scst_mutex supposed to be held */
1417 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
1418 {
1419         int res = -EINVAL;
1420         struct scst_acn *n;
1421
1422         TRACE_ENTRY();
1423
1424         list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1425         {
1426                 if (strcmp(n->name, name) == 0) {
1427                         list_del(&n->acn_list_entry);
1428                         kfree(n->name);
1429                         kfree(n);
1430                         res = 0;
1431                         break;
1432                 }
1433         }
1434
1435         if (res == 0) {
1436                 PRINT_INFO("Removed name %s from group %s", name,
1437                         acg->acg_name);
1438         } else {
1439                 PRINT_ERROR("Unable to find name %s in group %s", name,
1440                         acg->acg_name);
1441         }
1442
1443         TRACE_EXIT_RES(res);
1444         return res;
1445 }
1446
1447 static struct scst_cmd *scst_create_prepare_internal_cmd(
1448         struct scst_cmd *orig_cmd, int bufsize)
1449 {
1450         struct scst_cmd *res;
1451         gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1452
1453         TRACE_ENTRY();
1454
1455         res = scst_alloc_cmd(gfp_mask);
1456         if (res == NULL)
1457                 goto out;
1458
1459         res->cmd_lists = orig_cmd->cmd_lists;
1460         res->sess = orig_cmd->sess;
1461         res->atomic = scst_cmd_atomic(orig_cmd);
1462         res->internal = 1;
1463         res->tgtt = orig_cmd->tgtt;
1464         res->tgt = orig_cmd->tgt;
1465         res->dev = orig_cmd->dev;
1466         res->tgt_dev = orig_cmd->tgt_dev;
1467         res->lun = orig_cmd->lun;
1468         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1469         res->data_direction = SCST_DATA_UNKNOWN;
1470         res->orig_cmd = orig_cmd;
1471         res->bufflen = bufsize;
1472
1473         scst_sess_get(res->sess);
1474         if (res->tgt_dev != NULL)
1475                 __scst_get(0);
1476
1477         res->state = SCST_CMD_STATE_PRE_PARSE;
1478
1479 out:
1480         TRACE_EXIT_HRES((unsigned long)res);
1481         return res;
1482 }
1483
1484 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1485 {
1486         int res = 0;
1487         static const uint8_t request_sense[6] =
1488             { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1489         struct scst_cmd *rs_cmd;
1490
1491         TRACE_ENTRY();
1492
1493         if (orig_cmd->sense != NULL) {
1494                 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1495                         orig_cmd->sense, orig_cmd);
1496                 mempool_free(orig_cmd->sense, scst_sense_mempool);
1497                 orig_cmd->sense = NULL;
1498         }
1499
1500         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
1501                         SCST_SENSE_BUFFERSIZE);
1502         if (rs_cmd == NULL)
1503                 goto out_error;
1504
1505         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1506         rs_cmd->cdb_len = sizeof(request_sense);
1507         rs_cmd->data_direction = SCST_DATA_READ;
1508         rs_cmd->expected_data_direction = rs_cmd->data_direction;
1509         rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
1510         rs_cmd->expected_values_set = 1;
1511
1512         TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1513                 "cmd list", rs_cmd);
1514         spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1515         list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1516         wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1517         spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1518
1519 out:
1520         TRACE_EXIT_RES(res);
1521         return res;
1522
1523 out_error:
1524         res = -1;
1525         goto out;
1526 }
1527
1528 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
1529 {
1530         struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1531         uint8_t *buf;
1532         int len;
1533
1534         TRACE_ENTRY();
1535
1536         sBUG_ON(orig_cmd == NULL);
1537
1538         len = scst_get_buf_first(req_cmd, &buf);
1539
1540         if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1541             SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1542                 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1543                         buf, len);
1544                 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1545                         len);
1546         } else {
1547                 PRINT_ERROR("%s", "Unable to get the sense via "
1548                         "REQUEST SENSE, returning HARDWARE ERROR");
1549                 scst_set_cmd_error(orig_cmd,
1550                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1551         }
1552
1553         if (len > 0)
1554                 scst_put_buf(req_cmd, buf);
1555
1556         TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
1557                 "cmd list", orig_cmd);
1558         spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1559         list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
1560         wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
1561         spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1562
1563         TRACE_EXIT();
1564         return;
1565 }
1566
1567 int scst_finish_internal_cmd(struct scst_cmd *cmd)
1568 {
1569         int res;
1570
1571         TRACE_ENTRY();
1572
1573         sBUG_ON(!cmd->internal);
1574
1575         if (cmd->cdb[0] == REQUEST_SENSE)
1576                 scst_complete_request_sense(cmd);
1577
1578         __scst_cmd_put(cmd);
1579
1580         res = SCST_CMD_STATE_RES_CONT_NEXT;
1581
1582         TRACE_EXIT_HRES(res);
1583         return res;
1584 }
1585
1586 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1587 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1588 {
1589         struct scsi_request *req;
1590
1591         TRACE_ENTRY();
1592
1593         if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1594                 if (req) {
1595                         if (req->sr_bufflen)
1596                                 kfree(req->sr_buffer);
1597                         scsi_release_request(req);
1598                 }
1599         }
1600
1601         TRACE_EXIT();
1602         return;
1603 }
1604
1605 static void scst_send_release(struct scst_device *dev)
1606 {
1607         struct scsi_request *req;
1608         struct scsi_device *scsi_dev;
1609         uint8_t cdb[6];
1610
1611         TRACE_ENTRY();
1612
1613         if (dev->scsi_dev == NULL)
1614                 goto out;
1615
1616         scsi_dev = dev->scsi_dev;
1617
1618         req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1619         if (req == NULL) {
1620                 PRINT_ERROR("Allocation of scsi_request failed: unable "
1621                             "to RELEASE device %d:%d:%d:%d",
1622                             scsi_dev->host->host_no, scsi_dev->channel,
1623                             scsi_dev->id, scsi_dev->lun);
1624                 goto out;
1625         }
1626
1627         memset(cdb, 0, sizeof(cdb));
1628         cdb[0] = RELEASE;
1629         cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1630             ((scsi_dev->lun << 5) & 0xe0) : 0;
1631         memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1632         req->sr_cmd_len = sizeof(cdb);
1633         req->sr_data_direction = SCST_DATA_NONE;
1634         req->sr_use_sg = 0;
1635         req->sr_bufflen = 0;
1636         req->sr_buffer = NULL;
1637         req->sr_request->rq_disk = dev->rq_disk;
1638         req->sr_sense_buffer[0] = 0;
1639
1640         TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1641                 "mid-level", req);
1642         scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1643                     scst_req_done, 15, 3);
1644
1645 out:
1646         TRACE_EXIT();
1647         return;
1648 }
1649 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1650 static void scst_send_release(struct scst_device *dev)
1651 {
1652         struct scsi_device *scsi_dev;
1653         unsigned char cdb[6];
1654         uint8_t sense[SCSI_SENSE_BUFFERSIZE];
1655         int rc, i;
1656
1657         TRACE_ENTRY();
1658
1659         if (dev->scsi_dev == NULL)
1660                 goto out;
1661
1662         scsi_dev = dev->scsi_dev;
1663
1664         for (i = 0; i < 5; i++) {
1665                 memset(cdb, 0, sizeof(cdb));
1666                 cdb[0] = RELEASE;
1667                 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1668                     ((scsi_dev->lun << 5) & 0xe0) : 0;
1669
1670                 memset(sense, 0, sizeof(sense));
1671
1672                 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1673                         "SCSI mid-level");
1674                 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1675                                 sense, 15, 0, 0);
1676                 TRACE_DBG("MODE_SENSE done: %x", rc);
1677
1678                 if (scsi_status_is_good(rc)) {
1679                         break;
1680                 } else {
1681                         PRINT_ERROR("RELEASE failed: %d", rc);
1682                         PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
1683                         scst_check_internal_sense(dev, rc, sense,
1684                                 sizeof(sense));
1685                 }
1686         }
1687
1688 out:
1689         TRACE_EXIT();
1690         return;
1691 }
1692 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1693
1694 /* scst_mutex supposed to be held */
1695 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1696 {
1697         struct scst_device *dev = tgt_dev->dev;
1698         int release = 0;
1699
1700         TRACE_ENTRY();
1701
1702         spin_lock_bh(&dev->dev_lock);
1703         if (dev->dev_reserved &&
1704             !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1705                 /* This is one who holds the reservation */
1706                 struct scst_tgt_dev *tgt_dev_tmp;
1707                 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1708                                     dev_tgt_dev_list_entry) {
1709                         clear_bit(SCST_TGT_DEV_RESERVED,
1710                                     &tgt_dev_tmp->tgt_dev_flags);
1711                 }
1712                 dev->dev_reserved = 0;
1713                 release = 1;
1714         }
1715         spin_unlock_bh(&dev->dev_lock);
1716
1717         if (release)
1718                 scst_send_release(dev);
1719
1720         TRACE_EXIT();
1721         return;
1722 }
1723
1724 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1725         const char *initiator_name)
1726 {
1727         struct scst_session *sess;
1728         int i;
1729         int len;
1730         char *nm;
1731
1732         TRACE_ENTRY();
1733
1734 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1735         sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1736 #else
1737         sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1738 #endif
1739         if (sess == NULL) {
1740                 TRACE(TRACE_OUT_OF_MEM, "%s",
1741                       "Allocation of scst_session failed");
1742                 goto out;
1743         }
1744 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1745         memset(sess, 0, sizeof(*sess));
1746 #endif
1747
1748         sess->init_phase = SCST_SESS_IPH_INITING;
1749         sess->shut_phase = SCST_SESS_SPH_READY;
1750         atomic_set(&sess->refcnt, 0);
1751         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1752                 struct list_head *sess_tgt_dev_list_head =
1753                          &sess->sess_tgt_dev_list_hash[i];
1754                 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1755         }
1756         spin_lock_init(&sess->sess_list_lock);
1757         INIT_LIST_HEAD(&sess->search_cmd_list);
1758         sess->tgt = tgt;
1759         INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1760         INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1761
1762 #ifdef CONFIG_SCST_MEASURE_LATENCY
1763         spin_lock_init(&sess->meas_lock);
1764 #endif
1765
1766         len = strlen(initiator_name);
1767         nm = kmalloc(len + 1, gfp_mask);
1768         if (nm == NULL) {
1769                 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1770                 goto out_free;
1771         }
1772
1773         strcpy(nm, initiator_name);
1774         sess->initiator_name = nm;
1775
1776 out:
1777         TRACE_EXIT();
1778         return sess;
1779
1780 out_free:
1781         kmem_cache_free(scst_sess_cachep, sess);
1782         sess = NULL;
1783         goto out;
1784 }
1785
1786 void scst_free_session(struct scst_session *sess)
1787 {
1788         TRACE_ENTRY();
1789
1790         mutex_lock(&scst_mutex);
1791
1792         TRACE_DBG("Removing sess %p from the list", sess);
1793         list_del(&sess->sess_list_entry);
1794         TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1795         list_del(&sess->acg_sess_list_entry);
1796
1797         scst_sess_free_tgt_devs(sess);
1798
1799         wake_up_all(&sess->tgt->unreg_waitQ);
1800
1801         mutex_unlock(&scst_mutex);
1802
1803         kfree(sess->initiator_name);
1804         kmem_cache_free(scst_sess_cachep, sess);
1805
1806         TRACE_EXIT();
1807         return;
1808 }
1809
1810 void scst_free_session_callback(struct scst_session *sess)
1811 {
1812         struct completion *c;
1813
1814         TRACE_ENTRY();
1815
1816         TRACE_DBG("Freeing session %p", sess);
1817
1818         c = sess->shutdown_compl;
1819
1820         if (sess->unreg_done_fn) {
1821                 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1822                 sess->unreg_done_fn(sess);
1823                 TRACE_DBG("%s", "unreg_done_fn() returned");
1824         }
1825         scst_free_session(sess);
1826
1827         if (c)
1828                 complete_all(c);
1829
1830         TRACE_EXIT();
1831         return;
1832 }
1833
1834 void scst_sched_session_free(struct scst_session *sess)
1835 {
1836         unsigned long flags;
1837
1838         TRACE_ENTRY();
1839
1840         if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1841                 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1842                         "shut phase %lx", sess, sess->shut_phase);
1843                 sBUG();
1844         }
1845
1846         spin_lock_irqsave(&scst_mgmt_lock, flags);
1847         TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1848         list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1849         spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1850
1851         wake_up(&scst_mgmt_waitQ);
1852
1853         TRACE_EXIT();
1854         return;
1855 }
1856
1857 void scst_cmd_get(struct scst_cmd *cmd)
1858 {
1859         __scst_cmd_get(cmd);
1860 }
1861 EXPORT_SYMBOL(scst_cmd_get);
1862
1863 void scst_cmd_put(struct scst_cmd *cmd)
1864 {
1865         __scst_cmd_put(cmd);
1866 }
1867 EXPORT_SYMBOL(scst_cmd_put);
1868
1869 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1870 {
1871         struct scst_cmd *cmd;
1872
1873         TRACE_ENTRY();
1874
1875 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1876         cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1877 #else
1878         cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1879 #endif
1880         if (cmd == NULL) {
1881                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1882                 goto out;
1883         }
1884 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1885         memset(cmd, 0, sizeof(*cmd));
1886 #endif
1887
1888         cmd->state = SCST_CMD_STATE_INIT_WAIT;
1889         cmd->start_time = jiffies;
1890         atomic_set(&cmd->cmd_ref, 1);
1891         cmd->cmd_lists = &scst_main_cmd_lists;
1892         INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1893         cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1894         cmd->timeout = SCST_DEFAULT_TIMEOUT;
1895         cmd->retries = 0;
1896         cmd->data_len = -1;
1897         cmd->is_send_status = 1;
1898         cmd->resp_data_len = -1;
1899
1900         cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1901         cmd->dbl_ua_orig_resp_data_len = -1;
1902
1903 out:
1904         TRACE_EXIT();
1905         return cmd;
1906 }
1907
1908 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1909 {
1910         scst_sess_put(cmd->sess);
1911
1912         /*
1913          * At this point tgt_dev can be dead, but the pointer remains non-NULL
1914          */
1915         if (likely(cmd->tgt_dev != NULL))
1916                 __scst_put();
1917
1918         scst_destroy_cmd(cmd);
1919         return;
1920 }
1921
1922 /* No locks supposed to be held */
1923 void scst_free_cmd(struct scst_cmd *cmd)
1924 {
1925         int destroy = 1;
1926
1927         TRACE_ENTRY();
1928
1929         TRACE_DBG("Freeing cmd %p (tag %llu)",
1930                   cmd, (long long unsigned int)cmd->tag);
1931
1932         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1933                 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1934                         cmd, atomic_read(&scst_cmd_count));
1935         }
1936
1937         sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1938                 cmd->dec_on_dev_needed);
1939
1940 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1941 #if defined(CONFIG_SCST_EXTRACHECKS)
1942         if (cmd->scsi_req) {
1943                 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1944                         "scsi_req!");
1945                 scst_release_request(cmd);
1946         }
1947 #endif
1948 #endif
1949
1950         /*
1951          * Target driver can already free sg buffer before calling
1952          * scst_tgt_cmd_done(). E.g., scst_local has to do that.
1953          */
1954         if (!cmd->tgt_data_buf_alloced)
1955                 scst_check_restore_sg_buff(cmd);
1956
1957         if (cmd->tgtt->on_free_cmd != NULL) {
1958                 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1959                 cmd->tgtt->on_free_cmd(cmd);
1960                 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1961         }
1962
1963         if (likely(cmd->dev != NULL)) {
1964                 struct scst_dev_type *handler = cmd->dev->handler;
1965                 if (handler->on_free_cmd != NULL) {
1966                         TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1967                               handler->name, cmd);
1968                         handler->on_free_cmd(cmd);
1969                         TRACE_DBG("Dev handler %s on_free_cmd() returned",
1970                                 handler->name);
1971                 }
1972         }
1973
1974         scst_release_space(cmd);
1975
1976         if (unlikely(cmd->sense != NULL)) {
1977                 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1978                 mempool_free(cmd->sense, scst_sense_mempool);
1979                 cmd->sense = NULL;
1980         }
1981
1982         if (likely(cmd->tgt_dev != NULL)) {
1983 #ifdef CONFIG_SCST_EXTRACHECKS
1984                 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
1985                         PRINT_ERROR("Finishing not executed cmd %p (opcode "
1986                             "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1987                             cmd, cmd->cdb[0], cmd->tgtt->name,
1988                             (long long unsigned int)cmd->lun,
1989                             cmd->sn, cmd->tgt_dev->expected_sn);
1990                         scst_unblock_deferred(cmd->tgt_dev, cmd);
1991                 }
1992 #endif
1993
1994                 if (unlikely(cmd->out_of_sn)) {
1995                         TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1996                                 "destroy=%d", cmd,
1997                                 (long long unsigned int)cmd->tag,
1998                                 cmd->sn, destroy);
1999                         destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2000                                         &cmd->cmd_flags);
2001                 }
2002         }
2003
2004         if (likely(destroy))
2005                 scst_destroy_put_cmd(cmd);
2006
2007         TRACE_EXIT();
2008         return;
2009 }
2010
2011 /* No locks supposed to be held. */
2012 void scst_check_retries(struct scst_tgt *tgt)
2013 {
2014         int need_wake_up = 0;
2015
2016         TRACE_ENTRY();
2017
2018         /*
2019          * We don't worry about overflow of finished_cmds, because we check
2020          * only for its change.
2021          */
2022         atomic_inc(&tgt->finished_cmds);
2023         /* See comment in scst_queue_retry_cmd() */
2024         smp_mb__after_atomic_inc();
2025         if (unlikely(tgt->retry_cmds > 0)) {
2026                 struct scst_cmd *c, *tc;
2027                 unsigned long flags;
2028
2029                 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2030                       tgt->retry_cmds);
2031
2032                 spin_lock_irqsave(&tgt->tgt_lock, flags);
2033                 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2034                                 cmd_list_entry) {
2035                         tgt->retry_cmds--;
2036
2037                         TRACE_RETRY("Moving retry cmd %p to head of active "
2038                                 "cmd list (retry_cmds left %d)",
2039                                 c, tgt->retry_cmds);
2040                         spin_lock(&c->cmd_lists->cmd_list_lock);
2041                         list_move(&c->cmd_list_entry,
2042                                   &c->cmd_lists->active_cmd_list);
2043                         wake_up(&c->cmd_lists->cmd_list_waitQ);
2044                         spin_unlock(&c->cmd_lists->cmd_list_lock);
2045
2046                         need_wake_up++;
2047                         if (need_wake_up >= 2) /* "slow start" */
2048                                 break;
2049                 }
2050                 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2051         }
2052
2053         TRACE_EXIT();
2054         return;
2055 }
2056
2057 void scst_tgt_retry_timer_fn(unsigned long arg)
2058 {
2059         struct scst_tgt *tgt = (struct scst_tgt *)arg;
2060         unsigned long flags;
2061
2062         TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2063
2064         spin_lock_irqsave(&tgt->tgt_lock, flags);
2065         tgt->retry_timer_active = 0;
2066         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2067
2068         scst_check_retries(tgt);
2069
2070         TRACE_EXIT();
2071         return;
2072 }
2073
2074 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2075 {
2076         struct scst_mgmt_cmd *mcmd;
2077
2078         TRACE_ENTRY();
2079
2080         mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2081         if (mcmd == NULL) {
2082                 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2083                         "failed, some commands and their data could leak");
2084                 goto out;
2085         }
2086         memset(mcmd, 0, sizeof(*mcmd));
2087
2088 out:
2089         TRACE_EXIT();
2090         return mcmd;
2091 }
2092
2093 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2094 {
2095         unsigned long flags;
2096
2097         TRACE_ENTRY();
2098
2099         spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2100         atomic_dec(&mcmd->sess->sess_cmd_count);
2101         spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2102
2103         scst_sess_put(mcmd->sess);
2104
2105         if (mcmd->mcmd_tgt_dev != NULL)
2106                 __scst_put();
2107
2108         mempool_free(mcmd, scst_mgmt_mempool);
2109
2110         TRACE_EXIT();
2111         return;
2112 }
2113
2114 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2115 int scst_alloc_request(struct scst_cmd *cmd)
2116 {
2117         int res = 0;
2118         struct scsi_request *req;
2119         int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2120
2121         TRACE_ENTRY();
2122
2123         /* cmd->dev->scsi_dev must be non-NULL here */
2124         req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2125         if (req == NULL) {
2126                 TRACE(TRACE_OUT_OF_MEM, "%s",
2127                       "Allocation of scsi_request failed");
2128                 res = -ENOMEM;
2129                 goto out;
2130         }
2131
2132         cmd->scsi_req = req;
2133
2134         memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2135         req->sr_cmd_len = cmd->cdb_len;
2136         req->sr_data_direction = cmd->data_direction;
2137         req->sr_use_sg = cmd->sg_cnt;
2138         req->sr_bufflen = cmd->bufflen;
2139         req->sr_buffer = cmd->sg;
2140         req->sr_request->rq_disk = cmd->dev->rq_disk;
2141         req->sr_sense_buffer[0] = 0;
2142
2143         cmd->scsi_req->upper_private_data = cmd;
2144
2145 out:
2146         TRACE_EXIT();
2147         return res;
2148 }
2149
2150 void scst_release_request(struct scst_cmd *cmd)
2151 {
2152         scsi_release_request(cmd->scsi_req);
2153         cmd->scsi_req = NULL;
2154 }
2155 #endif
2156
2157 int scst_alloc_space(struct scst_cmd *cmd)
2158 {
2159         gfp_t gfp_mask;
2160         int res = -ENOMEM;
2161         int atomic = scst_cmd_atomic(cmd);
2162         int flags;
2163         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2164
2165         TRACE_ENTRY();
2166
2167         gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2168
2169         flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2170         if (cmd->no_sgv)
2171                 flags |= SCST_POOL_ALLOC_NO_CACHED;
2172
2173         cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2174                         &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2175         if (cmd->sg == NULL)
2176                 goto out;
2177
2178         if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2179                 static int ll;
2180                 if (ll < 10) {
2181                         PRINT_INFO("Unable to complete command due to "
2182                                 "SG IO count limitation (requested %d, "
2183                                 "available %d, tgt lim %d)", cmd->sg_cnt,
2184                                 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2185                         ll++;
2186                 }
2187                 goto out_sg_free;
2188         }
2189
2190         res = 0;
2191
2192 out:
2193         TRACE_EXIT();
2194         return res;
2195
2196 out_sg_free:
2197         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2198         cmd->sgv = NULL;
2199         cmd->sg = NULL;
2200         cmd->sg_cnt = 0;
2201         goto out;
2202 }
2203
2204 static void scst_release_space(struct scst_cmd *cmd)
2205 {
2206         TRACE_ENTRY();
2207
2208         if (cmd->sgv == NULL)
2209                 goto out;
2210
2211         if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2212                 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2213                 goto out;
2214         }
2215
2216         sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2217
2218         cmd->sgv = NULL;
2219         cmd->sg_cnt = 0;
2220         cmd->sg = NULL;
2221         cmd->bufflen = 0;
2222         cmd->data_len = 0;
2223
2224 out:
2225         TRACE_EXIT();
2226         return;
2227 }
2228
2229 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
2230 {
2231         struct scatterlist *src_sg, *dst_sg;
2232         unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
2233         struct page *src, *dst;
2234         unsigned int s, d, to_copy;
2235
2236         TRACE_ENTRY();
2237
2238         if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
2239                 src_sg = cmd->tgt_sg;
2240                 src_sg_cnt = cmd->tgt_sg_cnt;
2241                 dst_sg = cmd->sg;
2242                 to_copy = cmd->bufflen;
2243         } else {
2244                 src_sg = cmd->sg;
2245                 src_sg_cnt = cmd->sg_cnt;
2246                 dst_sg = cmd->tgt_sg;
2247                 to_copy = cmd->resp_data_len;
2248         }
2249
2250         TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
2251                 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
2252                 to_copy);
2253
2254         dst = sg_page(dst_sg);
2255         dst_len = dst_sg->length;
2256         dst_offs = dst_sg->offset;
2257
2258         s = 0;
2259         d = 0;
2260         src_offs = 0;
2261         while (s < src_sg_cnt) {
2262                 src = sg_page(&src_sg[s]);
2263                 src_len = src_sg[s].length;
2264                 src_offs += src_sg[s].offset;
2265
2266                 do {
2267                         unsigned int n;
2268
2269                         /*
2270                          * Himem pages are not allowed here, see the
2271                          * corresponding #warning in scst_main.c. Correct
2272                          * your target driver or dev handler to not alloc
2273                          * such pages!
2274                          */
2275                         EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
2276                                            PageHighMem(src));
2277
2278                         TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
2279                                 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
2280                                 cmd, to_copy, src, src_len, src_offs, dst,
2281                                 dst_len, dst_offs);
2282
2283                         if ((src_offs == 0) && (dst_offs == 0) &&
2284                             (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
2285                                 copy_page(page_address(dst), page_address(src));
2286                                 n = PAGE_SIZE;
2287                         } else {
2288                                 n = min(PAGE_SIZE - dst_offs,
2289                                         PAGE_SIZE - src_offs);
2290                                 n = min(n, src_len);
2291                                 n = min(n, dst_len);
2292                                 memcpy(page_address(dst) + dst_offs,
2293                                        page_address(src) + src_offs, n);
2294                                 dst_offs -= min(n, dst_offs);
2295                                 src_offs -= min(n, src_offs);
2296                         }
2297
2298                         TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
2299
2300                         to_copy -= n;
2301                         if (to_copy <= 0)
2302                                 goto out;
2303
2304                         src_len -= n;
2305                         dst_len -= n;
2306                         if (dst_len == 0) {
2307                                 d++;
2308                                 dst = sg_page(&dst_sg[d]);
2309                                 dst_len = dst_sg[d].length;
2310                                 dst_offs += dst_sg[d].offset;
2311                         }
2312                 } while (src_len > 0);
2313
2314                 s++;
2315         }
2316
2317 out:
2318         TRACE_EXIT();
2319         return;
2320 }
2321
2322 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
2323
2324 #define SCST_CDB_GROUP(opcode)   ((opcode >> 5) & 0x7)
2325 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
2326
2327 int scst_get_cdb_len(const uint8_t *cdb)
2328 {
2329         return SCST_GET_CDB_LEN(cdb[0]);
2330 }
2331
2332 /* get_trans_len_x extract x bytes from cdb as length starting from off */
2333
2334 /* for special commands */
2335 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
2336 {
2337         cmd->bufflen = 6;
2338         return 0;
2339 }
2340
2341 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
2342 {
2343         cmd->bufflen = READ_CAP_LEN;
2344         return 0;
2345 }
2346
2347 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
2348 {
2349         cmd->bufflen = 1;
2350         return 0;
2351 }
2352
2353 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
2354 {
2355         uint8_t *p = (uint8_t *)cmd->cdb + off;
2356         int res = 0;
2357
2358         cmd->bufflen = 0;
2359         cmd->bufflen |= ((u32)p[0]) << 8;
2360         cmd->bufflen |= ((u32)p[1]);
2361
2362         switch (cmd->cdb[1] & 0x1f) {
2363         case 0:
2364         case 1:
2365         case 6:
2366                 if (cmd->bufflen != 0) {
2367                         PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
2368                                 "allocation length for service action %x",
2369                                 cmd->bufflen, cmd->cdb[1] & 0x1f);
2370                         goto out_inval;
2371                 }
2372                 break;
2373         }
2374
2375         switch (cmd->cdb[1] & 0x1f) {
2376         case 0:
2377         case 1:
2378                 cmd->bufflen = 20;
2379                 break;
2380         case 6:
2381                 cmd->bufflen = 32;
2382                 break;
2383         case 8:
2384                 cmd->bufflen = max(28, cmd->bufflen);
2385                 break;
2386         default:
2387                 PRINT_ERROR("READ POSITION: Invalid service action %x",
2388                         cmd->cdb[1] & 0x1f);
2389                 goto out_inval;
2390         }
2391
2392 out:
2393         return res;
2394
2395 out_inval:
2396         scst_set_cmd_error(cmd,
2397                 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
2398         res = 1;
2399         goto out;
2400 }
2401
2402 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
2403 {
2404         cmd->bufflen = (u32)cmd->cdb[off];
2405         return 0;
2406 }
2407
2408 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
2409 {
2410         cmd->bufflen = (u32)cmd->cdb[off];
2411         if (cmd->bufflen == 0)
2412                 cmd->bufflen = 256;
2413         return 0;
2414 }
2415
2416 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
2417 {
2418         const uint8_t *p = cmd->cdb + off;
2419
2420         cmd->bufflen = 0;
2421         cmd->bufflen |= ((u32)p[0]) << 8;
2422         cmd->bufflen |= ((u32)p[1]);
2423
2424         return 0;
2425 }
2426
2427 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
2428 {
2429         const uint8_t *p = cmd->cdb + off;
2430
2431         cmd->bufflen = 0;
2432         cmd->bufflen |= ((u32)p[0]) << 16;
2433         cmd->bufflen |= ((u32)p[1]) << 8;
2434         cmd->bufflen |= ((u32)p[2]);
2435
2436         return 0;
2437 }
2438
2439 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
2440 {
2441         const uint8_t *p = cmd->cdb + off;
2442
2443         cmd->bufflen = 0;
2444         cmd->bufflen |= ((u32)p[0]) << 24;
2445         cmd->bufflen |= ((u32)p[1]) << 16;
2446         cmd->bufflen |= ((u32)p[2]) << 8;
2447         cmd->bufflen |= ((u32)p[3]);
2448
2449         return 0;
2450 }
2451
2452 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
2453 {
2454         cmd->bufflen = 0;
2455         return 0;
2456 }
2457
2458 int scst_get_cdb_info(struct scst_cmd *cmd)
2459 {
2460         int dev_type = cmd->dev->handler->type;
2461         int i, res = 0;
2462         uint8_t op;
2463         const struct scst_sdbops *ptr = NULL;
2464
2465         TRACE_ENTRY();
2466
2467         op = cmd->cdb[0];       /* get clear opcode */
2468
2469         TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
2470                 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
2471                 dev_type);
2472
2473         i = scst_scsi_op_list[op];
2474         while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
2475                 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
2476                         ptr = &scst_scsi_op_table[i];
2477                         TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
2478                               ptr->ops, ptr->devkey[0], /* disk     */
2479                               ptr->devkey[1],   /* tape     */
2480                               ptr->devkey[2],   /* printer */
2481                               ptr->devkey[3],   /* cpu      */
2482                               ptr->devkey[4],   /* cdr      */
2483                               ptr->devkey[5],   /* cdrom    */
2484                               ptr->devkey[6],   /* scanner */
2485                               ptr->devkey[7],   /* worm     */
2486                               ptr->devkey[8],   /* changer */
2487                               ptr->devkey[9],   /* commdev */
2488                               ptr->op_name);
2489                         TRACE_DBG("direction=%d flags=%d off=%d",
2490                               ptr->direction,
2491                               ptr->flags,
2492                               ptr->off);
2493                         break;
2494                 }
2495                 i++;
2496         }
2497
2498         if (ptr == NULL) {
2499                 /* opcode not found or now not used !!! */
2500                 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
2501                       dev_type);
2502                 res = -1;
2503                 cmd->op_flags = SCST_INFO_INVALID;
2504                 goto out;
2505         }
2506
2507         cmd->cdb_len = SCST_GET_CDB_LEN(op);
2508         cmd->op_name = ptr->op_name;
2509         cmd->data_direction = ptr->direction;
2510         cmd->op_flags = ptr->flags;
2511         res = (*ptr->get_trans_len)(cmd, ptr->off);
2512
2513 out:
2514         TRACE_EXIT();
2515         return res;
2516 }
2517 EXPORT_SYMBOL(scst_get_cdb_info);
2518
2519 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
2520 uint64_t scst_pack_lun(const uint64_t lun)
2521 {
2522         uint64_t res;
2523         uint16_t *p = (uint16_t *)&res;
2524
2525         res = lun;
2526         *p = cpu_to_be16(*p);
2527
2528         TRACE_EXIT_HRES((unsigned long)res);
2529         return res;
2530 }
2531
2532 /*
2533  * Routine to extract a lun number from an 8-byte LUN structure
2534  * in network byte order (BE).
2535  * (see SAM-2, Section 4.12.3 page 40)
2536  * Supports 2 types of lun unpacking: peripheral and logical unit.
2537  */
2538 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2539 {
2540         uint64_t res = NO_SUCH_LUN;
2541         int address_method;
2542
2543         TRACE_ENTRY();
2544
2545         TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2546
2547         if (unlikely(len < 2)) {
2548                 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2549                         "more", len);
2550                 goto out;
2551         }
2552
2553         if (len > 2) {
2554                 switch (len) {
2555                 case 8:
2556                         if ((*((uint64_t *)lun) &
2557                           __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2558                                 goto out_err;
2559                         break;
2560                 case 4:
2561                         if (*((uint16_t *)&lun[2]) != 0)
2562                                 goto out_err;
2563                         break;
2564                 case 6:
2565                         if (*((uint32_t *)&lun[2]) != 0)
2566                                 goto out_err;
2567                         break;
2568                 default:
2569                         goto out_err;
2570                 }
2571         }
2572
2573         address_method = (*lun) >> 6;   /* high 2 bits of byte 0 */
2574         switch (address_method) {
2575         case 0: /* peripheral device addressing method */
2576 #if 0
2577                 if (*lun) {
2578                         PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2579                              "peripheral device addressing method 0x%02x, "
2580                              "expected 0", *lun);
2581                         break;
2582                 }
2583                 res = *(lun + 1);
2584                 break;
2585 #else
2586                 /*
2587                  * Looks like it's legal to use it as flat space addressing
2588                  * method as well
2589                  */
2590
2591                 /* go through */
2592 #endif
2593
2594         case 1: /* flat space addressing method */
2595                 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2596                 break;
2597
2598         case 2: /* logical unit addressing method */
2599                 if (*lun & 0x3f) {
2600                         PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2601                                     "addressing method 0x%02x, expected 0",
2602                                     *lun & 0x3f);
2603                         break;
2604                 }
2605                 if (*(lun + 1) & 0xe0) {
2606                         PRINT_ERROR("Illegal TARGET in LUN logical unit "
2607                                     "addressing method 0x%02x, expected 0",
2608                                     (*(lun + 1) & 0xf8) >> 5);
2609                         break;
2610                 }
2611                 res = *(lun + 1) & 0x1f;
2612                 break;
2613
2614         case 3: /* extended logical unit addressing method */
2615         default:
2616                 PRINT_ERROR("Unimplemented LUN addressing method %u",
2617                             address_method);
2618                 break;
2619         }
2620
2621 out:
2622         TRACE_EXIT_RES((int)res);
2623         return res;
2624
2625 out_err:
2626         PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2627         goto out;
2628 }
2629
2630 int scst_calc_block_shift(int sector_size)
2631 {
2632         int block_shift = 0;
2633         int t;
2634
2635         if (sector_size == 0)
2636                 sector_size = 512;
2637
2638         t = sector_size;
2639         while (1) {
2640                 if ((t & 1) != 0)
2641                         break;
2642                 t >>= 1;
2643                 block_shift++;
2644         }
2645         if (block_shift < 9) {
2646                 PRINT_ERROR("Wrong sector size %d", sector_size);
2647                 block_shift = -1;
2648         }
2649
2650         TRACE_EXIT_RES(block_shift);
2651         return block_shift;
2652 }
2653 EXPORT_SYMBOL(scst_calc_block_shift);
2654
2655 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2656         int (*get_block_shift)(struct scst_cmd *cmd))
2657 {
2658         int res = 0;
2659
2660         TRACE_ENTRY();
2661
2662         /*
2663          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2664          * therefore change them only if necessary
2665          */
2666
2667         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2668               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2669
2670         switch (cmd->cdb[0]) {
2671         case SERVICE_ACTION_IN:
2672                 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2673                         cmd->bufflen = READ_CAP16_LEN;
2674                         cmd->data_direction = SCST_DATA_READ;
2675                 }
2676                 break;
2677         case VERIFY_6:
2678         case VERIFY:
2679         case VERIFY_12:
2680         case VERIFY_16:
2681                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2682                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2683                         cmd->bufflen = 0;
2684                         goto set_timeout;
2685                 } else
2686                         cmd->data_len = 0;
2687                 break;
2688         default:
2689                 /* It's all good */
2690                 break;
2691         }
2692
2693         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2694                 /*
2695                  * No need for locks here, since *_detach() can not be
2696                  * called, when there are existing commands.
2697                  */
2698                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2699         }
2700
2701 set_timeout:
2702         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2703                 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2704         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2705                 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2706         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2707                 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2708
2709         TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2710               res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2711
2712         TRACE_EXIT_RES(res);
2713         return res;
2714 }
2715 EXPORT_SYMBOL(scst_sbc_generic_parse);
2716
2717 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2718         int (*get_block_shift)(struct scst_cmd *cmd))
2719 {
2720         int res = 0;
2721
2722         TRACE_ENTRY();
2723
2724         /*
2725          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2726          * therefore change them only if necessary
2727          */
2728
2729         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2730               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2731
2732         cmd->cdb[1] &= 0x1f;
2733
2734         switch (cmd->cdb[0]) {
2735         case VERIFY_6:
2736         case VERIFY:
2737         case VERIFY_12:
2738         case VERIFY_16:
2739                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2740                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2741                         cmd->bufflen = 0;
2742                         goto set_timeout;
2743                 }
2744                 break;
2745         default:
2746                 /* It's all good */
2747                 break;
2748         }
2749
2750         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2751                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2752
2753 set_timeout:
2754         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2755                 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2756         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2757                 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2758         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2759                 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2760
2761         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2762                 cmd->data_direction);
2763
2764         TRACE_EXIT();
2765         return res;
2766 }
2767 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2768
2769 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2770         int (*get_block_shift)(struct scst_cmd *cmd))
2771 {
2772         int res = 0;
2773
2774         TRACE_ENTRY();
2775
2776         /*
2777          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2778          * therefore change them only if necessary
2779          */
2780
2781         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2782               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2783
2784         cmd->cdb[1] &= 0x1f;
2785
2786         switch (cmd->cdb[0]) {
2787         case VERIFY_6:
2788         case VERIFY:
2789         case VERIFY_12:
2790         case VERIFY_16:
2791                 if ((cmd->cdb[1] & BYTCHK) == 0) {
2792                         cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2793                         cmd->bufflen = 0;
2794                         goto set_timeout;
2795                 }
2796                 break;
2797         default:
2798                 /* It's all good */
2799                 break;
2800         }
2801
2802         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2803                 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2804
2805 set_timeout:
2806         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2807                 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2808         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2809                 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2810         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2811                 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2812
2813         TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2814                 cmd->data_direction);
2815
2816         TRACE_EXIT_RES(res);
2817         return res;
2818 }
2819 EXPORT_SYMBOL(scst_modisk_generic_parse);
2820
2821 int scst_tape_generic_parse(struct scst_cmd *cmd,
2822         int (*get_block_size)(struct scst_cmd *cmd))
2823 {
2824         int res = 0;
2825
2826         TRACE_ENTRY();
2827
2828         /*
2829          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2830          * therefore change them only if necessary
2831          */
2832
2833         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2834               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2835
2836         if (cmd->cdb[0] == READ_POSITION) {
2837                 int tclp = cmd->cdb[1] & 4;
2838                 int long_bit = cmd->cdb[1] & 2;
2839                 int bt = cmd->cdb[1] & 1;
2840
2841                 if ((tclp == long_bit) && (!bt || !long_bit)) {
2842                         cmd->bufflen =
2843                             tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2844                         cmd->data_direction = SCST_DATA_READ;
2845                 } else {
2846                         cmd->bufflen = 0;
2847                         cmd->data_direction = SCST_DATA_NONE;
2848                 }
2849         }
2850
2851         if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2852                 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2853
2854         if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2855                 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2856         else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2857                 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2858         else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2859                 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2860
2861         TRACE_EXIT_RES(res);
2862         return res;
2863 }
2864 EXPORT_SYMBOL(scst_tape_generic_parse);
2865
2866 static int scst_null_parse(struct scst_cmd *cmd)
2867 {
2868         int res = 0;
2869
2870         TRACE_ENTRY();
2871
2872         /*
2873          * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2874          * therefore change them only if necessary
2875          */
2876
2877         TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2878               cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2879 #if 0
2880         switch (cmd->cdb[0]) {
2881         default:
2882                 /* It's all good */
2883                 break;
2884         }
2885 #endif
2886         TRACE_DBG("res %d bufflen %d direct %d",
2887               res, cmd->bufflen, cmd->data_direction);
2888
2889         TRACE_EXIT();
2890         return res;
2891 }
2892
2893 int scst_changer_generic_parse(struct scst_cmd *cmd,
2894         int (*nothing)(struct scst_cmd *cmd))
2895 {
2896         int res = scst_null_parse(cmd);
2897
2898         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2899                 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2900         else
2901                 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2902
2903         return res;
2904 }
2905 EXPORT_SYMBOL(scst_changer_generic_parse);
2906
2907 int scst_processor_generic_parse(struct scst_cmd *cmd,
2908         int (*nothing)(struct scst_cmd *cmd))
2909 {
2910         int res = scst_null_parse(cmd);
2911
2912         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2913                 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2914         else
2915                 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2916
2917         return res;
2918 }
2919 EXPORT_SYMBOL(scst_processor_generic_parse);
2920
2921 int scst_raid_generic_parse(struct scst_cmd *cmd,
2922         int (*nothing)(struct scst_cmd *cmd))
2923 {
2924         int res = scst_null_parse(cmd);
2925
2926         if (cmd->op_flags & SCST_LONG_TIMEOUT)
2927                 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2928         else
2929                 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2930
2931         return res;
2932 }
2933 EXPORT_SYMBOL(scst_raid_generic_parse);
2934
2935 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2936         void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2937 {
2938         int opcode = cmd->cdb[0];
2939         int status = cmd->status;
2940         int res = SCST_CMD_STATE_DEFAULT;
2941
2942         TRACE_ENTRY();
2943
2944         /*
2945          * SCST sets good defaults for cmd->is_send_status and
2946          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2947          * therefore change them only if necessary
2948          */
2949
2950         if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2951                 switch (opcode) {
2952                 case READ_CAPACITY:
2953                 {
2954                         /* Always keep track of disk capacity */
2955                         int buffer_size, sector_size, sh;
2956                         uint8_t *buffer;
2957
2958                         buffer_size = scst_get_buf_first(cmd, &buffer);
2959                         if (unlikely(buffer_size <= 0)) {
2960                                 if (buffer_size < 0) {
2961                                         PRINT_ERROR("%s: Unable to get the"
2962                                         " buffer (%d)", __func__, buffer_size);
2963                                 }
2964                                 goto out;
2965                         }
2966
2967                         sector_size =
2968                             ((buffer[4] << 24) | (buffer[5] << 16) |
2969                              (buffer[6] << 8) | (buffer[7] << 0));
2970                         scst_put_buf(cmd, buffer);
2971                         if (sector_size != 0)
2972                                 sh = scst_calc_block_shift(sector_size);
2973                         else
2974                                 sh = 0;
2975                         set_block_shift(cmd, sh);
2976                         TRACE_DBG("block_shift %d", sh);
2977                         break;
2978                 }
2979                 default:
2980                         /* It's all good */
2981                         break;
2982                 }
2983         }
2984
2985         TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2986               "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2987
2988 out:
2989         TRACE_EXIT_RES(res);
2990         return res;
2991 }
2992 EXPORT_SYMBOL(scst_block_generic_dev_done);
2993
2994 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2995         void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2996 {
2997         int opcode = cmd->cdb[0];
2998         int res = SCST_CMD_STATE_DEFAULT;
2999         int buffer_size, bs;
3000         uint8_t *buffer = NULL;
3001
3002         TRACE_ENTRY();
3003
3004         /*
3005          * SCST sets good defaults for cmd->is_send_status and
3006          * cmd->resp_data_len based on cmd->status and cmd->data_direction,
3007          * therefore change them only if necessary
3008          */
3009
3010         switch (opcode) {
3011         case MODE_SENSE:
3012         case MODE_SELECT:
3013                 buffer_size = scst_get_buf_first(cmd, &buffer);
3014                 if (unlikely(buffer_size <= 0)) {
3015                         if (buffer_size < 0) {
3016                                 PRINT_ERROR("%s: Unable to get the buffer (%d)",
3017                                         __func__, buffer_size);
3018                         }
3019                         goto out;
3020                 }
3021                 break;
3022         }
3023
3024         switch (opcode) {
3025         case MODE_SENSE:
3026                 TRACE_DBG("%s", "MODE_SENSE");
3027                 if ((cmd->cdb[2] & 0xC0) == 0) {
3028                         if (buffer[3] == 8) {
3029                                 bs = (buffer[9] << 16) |
3030                                     (buffer[10] << 8) | buffer[11];
3031                                 set_block_size(cmd, bs);
3032                         }
3033                 }
3034                 break;
3035         case MODE_SELECT:
3036                 TRACE_DBG("%s", "MODE_SELECT");
3037                 if (buffer[3] == 8) {
3038                         bs = (buffer[9] << 16) | (buffer[10] << 8) |
3039                             (buffer[11]);
3040                         set_block_size(cmd, bs);
3041                 }
3042                 break;
3043         default:
3044                 /* It's all good */
3045                 break;
3046         }
3047
3048         switch (opcode) {
3049         case MODE_SENSE:
3050         case MODE_SELECT:
3051                 scst_put_buf(cmd, buffer);
3052                 break;
3053         }
3054
3055 out:
3056         TRACE_EXIT_RES(res);
3057         return res;
3058 }
3059 EXPORT_SYMBOL(scst_tape_generic_dev_done);
3060
3061 static void scst_check_internal_sense(struct scst_device *dev, int result,
3062         uint8_t *sense, int sense_len)
3063 {
3064         TRACE_ENTRY();
3065
3066         if (host_byte(result) == DID_RESET) {
3067                 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
3068                         "reset UA");
3069                 scst_set_sense(sense, sense_len, dev->d_sense,
3070                         SCST_LOAD_SENSE(scst_sense_reset_UA));
3071                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
3072         } else if ((status_byte(result) == CHECK_CONDITION) &&
3073                    SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
3074                 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
3075
3076         TRACE_EXIT();
3077         return;
3078 }
3079
3080 int scst_obtain_device_parameters(struct scst_device *dev)
3081 {
3082         int res = 0, i;
3083         uint8_t cmd[16];
3084         uint8_t buffer[4+0x0A];
3085         uint8_t sense_buffer[SCSI_SENSE_BUFFERSIZE];
3086
3087         TRACE_ENTRY();
3088
3089         EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
3090
3091         for (i = 0; i < 5; i++) {
3092                 /* Get control mode page */
3093                 memset(cmd, 0, sizeof(cmd));
3094                 cmd[0] = MODE_SENSE;
3095                 cmd[1] = 8; /* DBD */
3096                 cmd[2] = 0x0A;
3097                 cmd[4] = sizeof(buffer);
3098
3099                 memset(buffer, 0, sizeof(buffer));
3100                 memset(sense_buffer, 0, sizeof(sense_buffer));
3101
3102                 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
3103                 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
3104                                 sizeof(buffer), sense_buffer, 15, 0, 0);
3105
3106                 TRACE_DBG("MODE_SENSE done: %x", res);
3107
3108                 if (scsi_status_is_good(res)) {
3109                         int q;
3110
3111                         PRINT_BUFF_FLAG(TRACE_SCSI,
3112                                 "Returned control mode page data",
3113                                 buffer, sizeof(buffer));
3114
3115                         dev->tst = buffer[4+2] >> 5;
3116                         q = buffer[4+3] >> 4;
3117                         if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
3118                                 PRINT_ERROR("Too big QUEUE ALG %x, dev "
3119                                         "%d:%d:%d:%d", dev->queue_alg,
3120                                         dev->scsi_dev->host->host_no,
3121                                         dev->scsi_dev->channel,
3122                                         dev->scsi_dev->id, dev->scsi_dev->lun);
3123                         }
3124                         dev->queue_alg = q;
3125                         dev->swp = (buffer[4+4] & 0x8) >> 3;
3126                         dev->tas = (buffer[4+5] & 0x40) >> 6;
3127                         dev->d_sense = (buffer[4+2] & 0x4) >> 2;
3128
3129                         /*
3130                          * Unfortunately, SCSI ML doesn't provide a way to
3131                          * specify commands task attribute, so we can rely on
3132                          * device's restricted reordering only.
3133                          */
3134                         dev->has_own_order_mgmt = !dev->queue_alg;
3135
3136                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3137                                 "Device %d:%d:%d:%d: TST %x, "
3138                                 "QUEUE ALG %x, SWP %x, TAS %x, D_SENSE %d"
3139                                 "has_own_order_mgmt %d",
3140                                 dev->scsi_dev->host->host_no,
3141                                 dev->scsi_dev->channel, dev->scsi_dev->id,
3142                                 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
3143                                 dev->swp, dev->tas, dev->d_sense,
3144                                 dev->has_own_order_mgmt);
3145
3146                         goto out;
3147                 } else {
3148 #if 0
3149                         if ((status_byte(res) == CHECK_CONDITION) &&
3150 #else
3151                         /*
3152                          * 3ware controller is buggy and returns CONDITION_GOOD
3153                          * instead of CHECK_CONDITION
3154                          */
3155                         if (
3156 #endif
3157                             SCST_SENSE_VALID(sense_buffer)) {
3158                                 if (scst_analyze_sense(sense_buffer,
3159                                                 sizeof(sense_buffer),
3160                                                 SCST_SENSE_KEY_VALID,
3161                                                 ILLEGAL_REQUEST, 0, 0)) {
3162                                         TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3163                                                 "Device %d:%d:%d:%d doesn't "
3164                                                 "support control mode page, "
3165                                                 "using defaults: TST %x, "
3166                                                 "QUEUE ALG %x, SWP %x, "
3167                                                 "TAS %x, D_SENSE %d, "
3168                                                 "has_own_order_mgmt %d ",
3169                                                 dev->scsi_dev->host->host_no,
3170                                                 dev->scsi_dev->channel,
3171                                                 dev->scsi_dev->id,
3172                                                 dev->scsi_dev->lun,
3173                                                 dev->tst, dev->queue_alg,
3174                                                 dev->swp, dev->tas,
3175                                                 dev->d_sense,
3176                                                 dev->has_own_order_mgmt);
3177                                         res = 0;
3178                                         goto out;
3179                                 } else if (scst_analyze_sense(sense_buffer,
3180                                                 sizeof(sense_buffer),
3181                                                 SCST_SENSE_KEY_VALID,
3182                                                 NOT_READY, 0, 0)) {
3183                                         TRACE(TRACE_SCSI,
3184                                                 "Device %d:%d:%d:%d not ready",
3185                                                 dev->scsi_dev->host->host_no,
3186                                                 dev->scsi_dev->channel,
3187                                                 dev->scsi_dev->id,
3188                                                 dev->scsi_dev->lun);
3189                                         res = 0;
3190                                         goto out;
3191                                 }
3192                         } else {
3193                                 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3194                                         "Internal MODE SENSE to "
3195                                         "device %d:%d:%d:%d failed: %x",
3196                                         dev->scsi_dev->host->host_no,
3197                                         dev->scsi_dev->channel,
3198                                         dev->scsi_dev->id,
3199                                         dev->scsi_dev->lun, res);
3200                                 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
3201                                         "MODE SENSE sense",
3202                                         sense_buffer, sizeof(sense_buffer));
3203                         }
3204                         scst_check_internal_sense(dev, res, sense_buffer,
3205                                         sizeof(sense_buffer));
3206                 }
3207         }
3208         res = -ENODEV;
3209
3210 out:
3211         TRACE_EXIT_RES(res);
3212         return res;
3213 }
3214 EXPORT_SYMBOL(scst_obtain_device_parameters);
3215
3216 /* Called under dev_lock and BH off */
3217 void scst_process_reset(struct scst_device *dev,
3218         struct scst_session *originator, struct scst_cmd *exclude_cmd,
3219         struct scst_mgmt_cmd *mcmd, bool setUA)
3220 {
3221         struct scst_tgt_dev *tgt_dev;
3222         struct scst_cmd *cmd, *tcmd;
3223
3224         TRACE_ENTRY();
3225
3226         /* Clear RESERVE'ation, if necessary */
3227         if (dev->dev_reserved) {
3228                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3229                                     dev_tgt_dev_list_entry) {
3230                         TRACE(TRACE_MGMT_MINOR, "Clearing RESERVE'ation for "
3231                                 "tgt_dev lun %lld",
3232                                 (long long unsigned int)tgt_dev->lun);
3233                         clear_bit(SCST_TGT_DEV_RESERVED,
3234                                   &tgt_dev->tgt_dev_flags);
3235                 }
3236                 dev->dev_reserved = 0;
3237                 /*
3238                  * There is no need to send RELEASE, since the device is going
3239                  * to be resetted. Actually, since we can be in RESET TM
3240                  * function, it might be dangerous.
3241                  */
3242         }
3243
3244         dev->dev_double_ua_possible = 1;
3245
3246         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3247                 dev_tgt_dev_list_entry) {
3248                 struct scst_session *sess = tgt_dev->sess;
3249
3250                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
3251                 scst_free_all_UA(tgt_dev);
3252                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
3253
3254                 spin_lock_irq(&sess->sess_list_lock);
3255
3256                 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
3257                 list_for_each_entry(cmd, &sess->search_cmd_list,
3258                                 search_cmd_list_entry) {
3259                         if (cmd == exclude_cmd)
3260                                 continue;
3261                         if ((cmd->tgt_dev == tgt_dev) ||
3262                             ((cmd->tgt_dev == NULL) &&
3263                              (cmd->lun == tgt_dev->lun))) {
3264                                 scst_abort_cmd(cmd, mcmd,
3265                                         (tgt_dev->sess != originator), 0);
3266                         }
3267                 }
3268                 spin_unlock_irq(&sess->sess_list_lock);
3269         }
3270
3271         list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3272                                 blocked_cmd_list_entry) {
3273                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3274                         list_del(&cmd->blocked_cmd_list_entry);
3275                         TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
3276                                 "to active cmd list", cmd);
3277                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3278                         list_add_tail(&cmd->cmd_list_entry,
3279                                 &cmd->cmd_lists->active_cmd_list);
3280                         wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3281                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3282                 }
3283         }
3284
3285         if (setUA) {
3286                 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
3287                 scst_set_sense(sense_buffer, sizeof(sense_buffer),
3288                         dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
3289                 scst_dev_check_set_local_UA(dev, exclude_cmd, sense_buffer,
3290                         sizeof(sense_buffer));
3291         }
3292
3293         TRACE_EXIT();
3294         return;
3295 }
3296
3297 int scst_set_pending_UA(struct scst_cmd *cmd)
3298 {
3299         int res = 0, i;
3300         struct scst_tgt_dev_UA *UA_entry;
3301         bool first = true, global_unlock = false;
3302         struct scst_session *sess = cmd->sess;
3303
3304         TRACE_ENTRY();
3305
3306         TRACE(TRACE_MGMT_MINOR, "Setting pending UA cmd %p", cmd);
3307
3308         spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
3309
3310 again:
3311         /* UA list could be cleared behind us, so retest */
3312         if (list_empty(&cmd->tgt_dev->UA_list)) {
3313                 TRACE_DBG("%s",
3314                       "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
3315                 res = -1;
3316                 goto out_unlock;
3317         }
3318
3319         UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
3320                               UA_list_entry);
3321
3322         TRACE_DBG("next %p UA_entry %p",
3323               cmd->tgt_dev->UA_list.next, UA_entry);
3324
3325         if (UA_entry->global_UA && first) {
3326                 TRACE_MGMT_DBG("Global UA %p detected", UA_entry);
3327
3328                 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
3329
3330                 mutex_lock(&scst_mutex);
3331
3332                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
3333                         struct list_head *sess_tgt_dev_list_head =
3334                                 &sess->sess_tgt_dev_list_hash[i];
3335                         struct scst_tgt_dev *tgt_dev;
3336                         list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3337                                         sess_tgt_dev_list_entry) {
3338                                 spin_lock_bh(&tgt_dev->tgt_dev_lock);
3339                         }
3340                 }
3341
3342                 first = false;
3343                 global_unlock = true;
3344                 goto again;
3345         }
3346
3347         scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
3348                 sizeof(UA_entry->UA_sense_buffer));
3349
3350         cmd->ua_ignore = 1;
3351
3352         list_del(&UA_entry->UA_list_entry);
3353
3354         if (UA_entry->global_UA) {
3355                 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
3356                         struct list_head *sess_tgt_dev_list_head =
3357                                 &sess->sess_tgt_dev_list_hash[i];
3358                         struct scst_tgt_dev *tgt_dev;
3359
3360                         list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3361                                         sess_tgt_dev_list_entry) {
3362                                 struct scst_tgt_dev_UA *ua;
3363                                 list_for_each_entry(ua, &tgt_dev->UA_list,
3364                                                         UA_list_entry) {
3365                                         if (ua->global_UA &&
3366                                             memcmp(ua->UA_sense_buffer,
3367                                                 UA_entry->UA_sense_buffer,
3368                                              sizeof(ua->UA_sense_buffer)) == 0) {
3369                                                 TRACE_MGMT_DBG("Freeing not "
3370                                                         "needed global UA %p",
3371                                                         ua);
3372                                                 list_del(&ua->UA_list_entry);
3373                                                 mempool_free(ua, scst_ua_mempool);
3374                                                 break;
3375                                         }
3376                                 }
3377                         }
3378                 }
3379         }
3380
3381         mempool_free(UA_entry, scst_ua_mempool);
3382
3383         if (list_empty(&cmd->tgt_dev->UA_list)) {
3384                 clear_bit(SCST_TGT_DEV_UA_PENDING,
3385                           &cmd->tgt_dev->tgt_dev_flags);
3386         }
3387
3388 out_unlock:
3389         if (global_unlock) {
3390                 for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
3391                         struct list_head *sess_tgt_dev_list_head =
3392                                 &sess->sess_tgt_dev_list_hash[i];
3393                         struct scst_tgt_dev *tgt_dev;
3394                         list_for_each_entry_reverse(tgt_dev, sess_tgt_dev_list_head,
3395                                         sess_tgt_dev_list_entry) {
3396                                 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
3397                         }
3398                 }
3399
3400                 mutex_unlock(&scst_mutex);
3401
3402                 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
3403         }
3404
3405         spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
3406
3407         TRACE_EXIT_RES(res);
3408         return res;
3409 }
3410
3411 /* Called under tgt_dev_lock and BH off */
3412 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
3413         const uint8_t *sense, int sense_len, int flags)
3414 {
3415         struct scst_tgt_dev_UA *UA_entry = NULL;
3416
3417         TRACE_ENTRY();
3418
3419         UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
3420         if (UA_entry == NULL) {
3421                 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
3422                      "allocation failed. The UNIT ATTENTION "
3423                      "on some sessions will be missed");
3424                 PRINT_BUFFER("Lost UA", sense, sense_len);
3425                 goto out;
3426         }
3427         memset(UA_entry, 0, sizeof(*UA_entry));
3428
3429         UA_entry->global_UA = (flags & SCST_SET_UA_FLAG_GLOBAL) != 0;
3430         if (UA_entry->global_UA)
3431                 TRACE_MGMT_DBG("Queuing global UA %p", UA_entry);
3432
3433         if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
3434                 sense_len = sizeof(UA_entry->UA_sense_buffer);
3435         memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
3436
3437         set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
3438
3439         TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
3440
3441         if (flags & SCST_SET_UA_FLAG_AT_HEAD)
3442                 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
3443         else
3444                 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
3445
3446 out:
3447         TRACE_EXIT();
3448         return;
3449 }
3450
3451 /* tgt_dev_lock supposed to be held and BH off */
3452 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
3453         const uint8_t *sense, int sense_len, int flags)
3454 {
3455         int skip_UA = 0;
3456         struct scst_tgt_dev_UA *UA_entry_tmp;
3457         int len = min((int)sizeof(UA_entry_tmp->UA_sense_buffer), sense_len);
3458
3459         TRACE_ENTRY();
3460
3461         list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
3462                             UA_list_entry) {
3463                 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, len) == 0) {
3464                         TRACE_MGMT_DBG("%s", "UA already exists");
3465                         skip_UA = 1;
3466                         break;
3467                 }
3468         }
3469
3470         if (skip_UA == 0)
3471                 scst_alloc_set_UA(tgt_dev, sense, len, flags);
3472
3473         TRACE_EXIT();
3474         return;
3475 }
3476
3477 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
3478         const uint8_t *sense, int sense_len, int flags)
3479 {
3480         TRACE_ENTRY();
3481
3482         spin_lock_bh(&tgt_dev->tgt_dev_lock);
3483         __scst_check_set_UA(tgt_dev, sense, sense_len, flags);
3484         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
3485
3486         TRACE_EXIT();
3487         return;
3488 }
3489
3490 /* Called under dev_lock and BH off */
3491 void scst_dev_check_set_local_UA(struct scst_device *dev,
3492         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
3493 {
3494         struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
3495
3496         TRACE_ENTRY();
3497
3498         if (exclude != NULL)
3499                 exclude_tgt_dev = exclude->tgt_dev;
3500
3501         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3502                         dev_tgt_dev_list_entry) {
3503                 if (tgt_dev != exclude_tgt_dev)
3504                         scst_check_set_UA(tgt_dev, sense, sense_len, 0);
3505         }
3506
3507         TRACE_EXIT();
3508         return;
3509 }
3510
3511 /* Called under dev_lock and BH off */
3512 void __scst_dev_check_set_UA(struct scst_device *dev,
3513         struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
3514 {
3515         TRACE_ENTRY();
3516
3517         TRACE(TRACE_MGMT_MINOR, "Processing UA dev %p", dev);
3518
3519         /* Check for reset UA */
3520         if (scst_analyze_sense(sense, sense_len, SCST_SENSE_ASC_VALID,
3521                                 0, SCST_SENSE_ASC_UA_RESET, 0))
3522                 scst_process_reset(dev,
3523                                    (exclude != NULL) ? exclude->sess : NULL,
3524                                    exclude, NULL, false);
3525
3526         scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
3527
3528         TRACE_EXIT();
3529         return;
3530 }
3531
3532 /* Called under tgt_dev_lock or when tgt_dev is unused */
3533 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
3534 {
3535         struct scst_tgt_dev_UA *UA_entry, *t;
3536
3537         TRACE_ENTRY();
3538
3539         list_for_each_entry_safe(UA_entry, t,
3540                                  &tgt_dev->UA_list, UA_list_entry) {
3541                 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
3542                                (long long unsigned int)tgt_dev->lun);
3543                 list_del(&UA_entry->UA_list_entry);
3544                 mempool_free(UA_entry, scst_ua_mempool);
3545         }
3546         INIT_LIST_HEAD(&tgt_dev->UA_list);
3547         clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
3548
3549         TRACE_EXIT();
3550         return;
3551 }
3552
3553 /* No locks */
3554 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
3555 {
3556         struct scst_cmd *res = NULL, *cmd, *t;
3557         typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
3558
3559         spin_lock_irq(&tgt_dev->sn_lock);
3560
3561         if (unlikely(tgt_dev->hq_cmd_count != 0))
3562                 goto out_unlock;
3563
3564 restart:
3565         list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
3566                                 sn_cmd_list_entry) {
3567                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3568                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3569                 if (cmd->sn == expected_sn) {
3570                         TRACE_SN("Deferred command %p (sn %ld, set %d) found",
3571                                 cmd, cmd->sn, cmd->sn_set);
3572                         tgt_dev->def_cmd_count--;
3573                         list_del(&cmd->sn_cmd_list_entry);
3574                         if (res == NULL)
3575                                 res = cmd;
3576                         else {
3577                                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3578                                 TRACE_SN("Adding cmd %p to active cmd list",
3579                                         cmd);
3580                                 list_add_tail(&cmd->cmd_list_entry,
3581                                         &cmd->cmd_lists->active_cmd_list);
3582                                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3583                                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3584                         }
3585                 }
3586         }
3587         if (res != NULL)
3588                 goto out_unlock;
3589
3590         list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
3591                                 sn_cmd_list_entry) {
3592                 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3593                         SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3594                 if (cmd->sn == expected_sn) {
3595                         atomic_t *slot = cmd->sn_slot;
3596                         /*
3597                          * !! At this point any pointer in cmd, except !!
3598                          * !! sn_slot and sn_cmd_list_entry, could be   !!
3599                          * !! already destroyed                         !!
3600                          */
3601                         TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
3602                                  cmd,
3603                                  (long long unsigned int)cmd->tag,
3604                                  cmd->sn);
3605                         tgt_dev->def_cmd_count--;
3606                         list_del(&cmd->sn_cmd_list_entry);
3607                         spin_unlock_irq(&tgt_dev->sn_lock);
3608                         if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
3609                                              &cmd->cmd_flags))
3610                                 scst_destroy_put_cmd(cmd);
3611                         scst_inc_expected_sn(tgt_dev, slot);
3612                         expected_sn = tgt_dev->expected_sn;
3613                         spin_lock_irq(&tgt_dev->sn_lock);
3614                         goto restart;
3615                 }
3616         }
3617
3618 out_unlock:
3619         spin_unlock_irq(&tgt_dev->sn_lock);
3620         return res;
3621 }
3622
3623 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
3624         struct scst_thr_data_hdr *data,
3625         void (*free_fn) (struct scst_thr_data_hdr *data))
3626 {
3627         data->owner_thr = current;
3628         atomic_set(&data->ref, 1);
3629         EXTRACHECKS_BUG_ON(free_fn == NULL);
3630         data->free_fn = free_fn;
3631         spin_lock(&tgt_dev->thr_data_lock);
3632         list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
3633         spin_unlock(&tgt_dev->thr_data_lock);
3634 }
3635 EXPORT_SYMBOL(scst_add_thr_data);
3636
3637 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
3638 {
3639         spin_lock(&tgt_dev->thr_data_lock);
3640         while (!list_empty(&tgt_dev->thr_data_list)) {
3641                 struct scst_thr_data_hdr *d = list_entry(
3642                                 tgt_dev->thr_data_list.next, typeof(*d),
3643                                 thr_data_list_entry);
3644                 list_del(&d->thr_data_list_entry);
3645                 spin_unlock(&tgt_dev->thr_data_lock);
3646                 scst_thr_data_put(d);
3647                 spin_lock(&tgt_dev->thr_data_lock);
3648         }
3649         spin_unlock(&tgt_dev->thr_data_lock);
3650         return;
3651 }
3652 EXPORT_SYMBOL(scst_del_all_thr_data);
3653
3654 void scst_dev_del_all_thr_data(struct scst_device *dev)
3655 {
3656         struct scst_tgt_dev *tgt_dev;
3657
3658         TRACE_ENTRY();
3659
3660         mutex_lock(&scst_mutex);
3661
3662         list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3663                                 dev_tgt_dev_list_entry) {
3664                 scst_del_all_thr_data(tgt_dev);
3665         }
3666
3667         mutex_unlock(&scst_mutex);
3668
3669         TRACE_EXIT();
3670         return;
3671 }
3672 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
3673
3674 struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
3675         struct task_struct *tsk)
3676 {
3677         struct scst_thr_data_hdr *res = NULL, *d;
3678
3679         spin_lock(&tgt_dev->thr_data_lock);
3680         list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
3681                 if (d->owner_thr == tsk) {
3682                         res = d;
3683                         scst_thr_data_get(res);
3684                         break;
3685                 }
3686         }
3687         spin_unlock(&tgt_dev->thr_data_lock);
3688         return res;
3689 }
3690 EXPORT_SYMBOL(__scst_find_thr_data);
3691
3692 /* dev_lock supposed to be held and BH disabled */
3693 void __scst_block_dev(struct scst_device *dev)
3694 {
3695         dev->block_count++;
3696         TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
3697 }
3698
3699 /* No locks */
3700 static void scst_block_dev(struct scst_device *dev, int outstanding)
3701 {
3702         spin_lock_bh(&dev->dev_lock);
3703         __scst_block_dev(dev);
3704         spin_unlock_bh(&dev->dev_lock);
3705
3706         /*
3707          * Memory barrier is necessary here, because we need to read
3708          * on_dev_count in wait_event() below after we increased block_count.
3709          * Otherwise, we can miss wake up in scst_dec_on_dev_cmd().
3710          * We use the explicit barrier, because spin_unlock_bh() doesn't
3711          * provide the necessary memory barrier functionality.
3712          */
3713         smp_mb();
3714
3715         TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
3716                 "%d)", outstanding, atomic_read(&dev->on_dev_count));
3717         wait_event(dev->on_dev_waitQ,
3718                 atomic_read(&dev->on_dev_count) <= outstanding);
3719         TRACE_MGMT_DBG("%s", "wait_event() returned");
3720 }
3721
3722 /* No locks */
3723 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3724 {
3725         sBUG_ON(cmd->needs_unblocking);
3726
3727         cmd->needs_unblocking = 1;
3728         TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3729                        cmd, (long long unsigned int)cmd->tag);
3730
3731         scst_block_dev(cmd->dev, outstanding);
3732 }
3733
3734 /* No locks */
3735 void scst_unblock_dev(struct scst_device *dev)
3736 {
3737         spin_lock_bh(&dev->dev_lock);
3738         TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3739                 dev->block_count-1, dev);
3740         if (--dev->block_count == 0)
3741                 scst_unblock_cmds(dev);
3742         spin_unlock_bh(&dev->dev_lock);
3743         sBUG_ON(dev->block_count < 0);
3744 }
3745
3746 /* No locks */
3747 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3748 {
3749         scst_unblock_dev(cmd->dev);
3750         cmd->needs_unblocking = 0;
3751 }
3752
3753 /* No locks */
3754 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3755 {
3756         int res = 0;
3757         struct scst_device *dev = cmd->dev;
3758
3759         TRACE_ENTRY();
3760
3761         sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3762
3763         atomic_inc(&dev->on_dev_count);
3764         cmd->dec_on_dev_needed = 1;
3765         TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3766