e53046f4c371ddd4d795c9123dfdf816b9db8cdd
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28
29 #include "scst_debug.h"
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static int scst_do_job_init(struct list_head *init_cmd_list);
34
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
36         int left_locked);
37
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39         struct scst_mgmt_cmd *mcmd);
40
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43         unsigned long *pflags, int left_locked)
44 {
45         int res;
46
47         TRACE_ENTRY();
48
49         TRACE_DBG("Moving cmd %p to cmd list", cmd);
50         list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
51
52         /* This is an inline func., so unneeded code will be optimized out */
53         if (pflags)
54                 spin_unlock_irqrestore(&scst_list_lock, *pflags);
55         else
56                 spin_unlock_irq(&scst_list_lock);
57
58         res = __scst_process_active_cmd(cmd, context, left_locked);
59
60         TRACE_EXIT_RES(res);
61         return res;
62 }
63
64 static inline void scst_schedule_tasklet(void)
65 {
66         struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
67
68 #if 0 /* Looks like #else is better for performance */
69         if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
70                 tasklet_schedule(t);
71         else {
72                 /* 
73                  * We suppose that other CPU(s) are rather idle, so we
74                  * ask one of them to help
75                  */
76                 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77                         "instead", smp_processor_id());
78                 wake_up(&scst_list_waitQ);
79         }
80 #else
81         tasklet_schedule(t);
82 #endif
83 }
84
85 /* 
86  * Must not been called in parallel with scst_unregister_session() for the 
87  * same sess
88  */
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90                              const uint8_t *lun, int lun_len,
91                              const uint8_t *cdb, int cdb_len, int atomic)
92 {
93         struct scst_cmd *cmd;
94
95         TRACE_ENTRY();
96
97 #ifdef EXTRACHECKS
98         if (unlikely(sess->shutting_down)) {
99                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
100                 BUG();
101         }
102 #endif
103
104         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
105         if (cmd == NULL)
106                 goto out;
107
108         cmd->sess = sess;
109         cmd->tgt = sess->tgt;
110         cmd->tgtt = sess->tgt->tgtt;
111         cmd->state = SCST_CMD_STATE_INIT_WAIT;
112
113         /* 
114          * For both wrong lun and CDB defer the error reporting for
115          * scst_cmd_init_done()
116          */
117
118         cmd->lun = scst_unpack_lun(lun, lun_len);
119
120         if (cdb_len <= MAX_COMMAND_SIZE) {
121                 memcpy(cmd->cdb, cdb, cdb_len);
122                 cmd->cdb_len = cdb_len;
123         }
124
125         TRACE_DBG("cmd %p, sess %p", cmd, sess);
126         scst_sess_get(sess);
127
128 out:
129         TRACE_EXIT();
130         return cmd;
131 }
132
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
134 {
135         int res = 0;
136         unsigned long flags = 0;
137         struct scst_session *sess = cmd->sess;
138
139         TRACE_ENTRY();
140
141         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142         TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag, 
143                 (uint64_t)cmd->lun, cmd->cdb_len);
144         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145                 cmd->cdb, cmd->cdb_len);
146
147         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
149         {
150                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
152                         cmd->tgtt->name);
153                 pref_context = SCST_CONTEXT_TASKLET;
154         }
155
156         spin_lock_irqsave(&scst_list_lock, flags);
157
158         /* Let's make it here, this will save us a lock or atomic */
159         sess->sess_cmd_count++;
160
161         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
162
163         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164                 switch(sess->init_phase) {
165                 case SCST_SESS_IPH_SUCCESS:
166                         break;
167                 case SCST_SESS_IPH_INITING:
168                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169                         list_add_tail(&cmd->cmd_list_entry, 
170                                 &sess->init_deferred_cmd_list);
171                         goto out_unlock_flags;
172                 case SCST_SESS_IPH_FAILED:
173                         scst_set_busy(cmd);
174                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
175                         TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176                         list_add_tail(&cmd->cmd_list_entry, 
177                                 &scst_active_cmd_list);
178                         goto active;
179                 default:
180                         BUG();
181                 }
182         }
183
184         if (unlikely(cmd->lun == (lun_t)-1)) {
185                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186                 scst_set_cmd_error(cmd,
187                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
191                 goto active;
192         }
193
194         if (unlikely(cmd->cdb_len == 0)) {
195                 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196                 scst_set_cmd_error(cmd,
197                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
201                 goto active;
202         }
203
204         cmd->state = SCST_CMD_STATE_INIT;
205
206         TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207         list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
208
209         switch (pref_context) {
210         case SCST_CONTEXT_DIRECT:
211         case SCST_CONTEXT_DIRECT_ATOMIC:
212                 res = scst_do_job_init(&scst_init_cmd_list);
213                 if (res > 0)
214                         goto out_unlock_flags;
215                 break;
216
217         case SCST_CONTEXT_THREAD:
218                 goto out_thread_unlock_flags;
219
220         case SCST_CONTEXT_TASKLET:
221                 scst_schedule_tasklet();
222                 goto out_unlock_flags;
223
224         default:
225                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
226                             pref_context);
227                 goto out_thread_unlock_flags;
228         }
229
230 active:
231         switch (pref_context) {
232         case SCST_CONTEXT_DIRECT:
233         case SCST_CONTEXT_DIRECT_ATOMIC:
234                 scst_process_active_cmd(cmd, pref_context, &flags, 0);
235                 break;
236
237         case SCST_CONTEXT_THREAD:
238                 goto out_thread_unlock_flags;
239
240         case SCST_CONTEXT_TASKLET:
241                 scst_schedule_tasklet();
242                 goto out_unlock_flags;
243
244         default:
245                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
246                             pref_context);
247                 goto out_thread_unlock_flags;
248         }
249
250 out:
251         TRACE_EXIT();
252         return;
253
254 out_unlock_flags:
255         spin_unlock_irqrestore(&scst_list_lock, flags);
256         goto out;
257
258 out_thread_unlock_flags:
259         cmd->non_atomic_only = 1;
260         spin_unlock_irqrestore(&scst_list_lock, flags);
261         wake_up(&scst_list_waitQ);
262         goto out;
263 }
264
265 static int scst_parse_cmd(struct scst_cmd *cmd)
266 {
267         int res = SCST_CMD_STATE_RES_CONT_SAME;
268         int state;
269         struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270         struct scst_device *dev = cmd->dev;
271         struct scst_info_cdb cdb_info;
272         int atomic = scst_cmd_atomic(cmd);
273         int set_dir = 1;
274
275         TRACE_ENTRY();
276
277         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278                 TRACE_DBG("ABORTED set, returning ABORTED "
279                         "for cmd %p", cmd);
280                 goto out_xmit;
281         }
282
283         if (atomic && !dev->handler->parse_atomic) {
284                 TRACE_DBG("Dev handler %s parse() can not be "
285                       "called in atomic context, rescheduling to the thread",
286                       dev->handler->name);
287                 res = SCST_CMD_STATE_RES_NEED_THREAD;
288                 goto out;
289         }
290
291         /*
292          * Expected transfer data supplied by the SCSI transport via the
293          * target driver are untrusted, so we prefer to fetch them from CDB.
294          * Additionally, not all transports support supplying the expected
295          * transfer data.
296          */
297
298         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
299                         &cdb_info) != 0)) 
300         {
301                 static int t;
302                 if (t < 10) {
303                         t++;
304                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305                                 "Should you update scst_scsi_op_table?",
306                                 cmd->cdb[0], dev->handler->name);
307                 }
308                 if (scst_cmd_is_expected_set(cmd)) {
309                         TRACE(TRACE_MINOR, "Using initiator supplied values: "
310                                 "direction %d, transfer_len %d",
311                                 cmd->expected_data_direction,
312                                 cmd->expected_transfer_len);
313                         cmd->data_direction = cmd->expected_data_direction;
314                         cmd->bufflen = cmd->expected_transfer_len;
315                         /* Restore (most probably) lost CDB length */
316                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317                         if (cmd->cdb_len == -1) {
318                                 PRINT_ERROR_PR("Unable to get CDB length for "
319                                         "opcode 0x%02x. Returning INVALID "
320                                         "OPCODE", cmd->cdb[0]);
321                                 scst_set_cmd_error(cmd,
322                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
323                                 goto out_xmit;
324                         }
325                 }
326                 else {
327                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328                              "target %s not supplied expected values. "
329                              "Returning INVALID OPCODE.", cmd->cdb[0], 
330                              dev->handler->name, cmd->tgtt->name);
331                         scst_set_cmd_error(cmd,
332                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
333                         goto out_xmit;
334                 }
335         } else {
336                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337                         "set %s), transfer_len=%d (expected len %d), flags=%d",
338                         cdb_info.op_name, cdb_info.direction,
339                         cmd->expected_data_direction,
340                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341                         cdb_info.transfer_len, cmd->expected_transfer_len,
342                         cdb_info.flags);
343
344                 /* Restore (most probably) lost CDB length */
345                 cmd->cdb_len = cdb_info.cdb_len;
346
347                 cmd->data_direction = cdb_info.direction;
348                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349                         cmd->bufflen = cdb_info.transfer_len;
350                 /* else cmd->bufflen remained as it was inited in 0 */
351         }
352
353         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355                             "(opcode 0x%02x)", cmd->cdb[0]);
356                 scst_set_cmd_error(cmd,
357                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
358                 goto out_xmit;
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362                 PRINT_ERROR_PR("Linked commands are not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (likely(!scst_is_cmd_local(cmd))) {
370                 TRACE_DBG("Calling dev handler %s parse(%p)",
371                       dev->handler->name, cmd);
372                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373                 state = dev->handler->parse(cmd, &cdb_info);
374                 TRACE_DBG("Dev handler %s parse() returned %d",
375                         dev->handler->name, state);
376
377                 if (state == SCST_CMD_STATE_DEFAULT)
378                         state = SCST_CMD_STATE_PREPARE_SPACE;
379         }
380         else
381                 state = SCST_CMD_STATE_PREPARE_SPACE;
382
383         if (scst_cmd_is_expected_set(cmd)) {
384                 if (cmd->expected_transfer_len < cmd->bufflen) {
385                         TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
386                                 "cmd->bufflen(%d), using expected_transfer_len "
387                                 "instead", cmd->expected_transfer_len,
388                                 cmd->bufflen);
389                         cmd->bufflen = cmd->expected_transfer_len;
390                 }
391         }
392
393         if (cmd->data_len == -1)
394                 cmd->data_len = cmd->bufflen;
395
396 #ifdef EXTRACHECKS
397         if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
398                 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
399                         (state != SCST_CMD_STATE_DEV_PARSE)) ||
400                     ((cmd->bufflen != 0) && 
401                         (cmd->data_direction == SCST_DATA_NONE)) ||
402                     ((cmd->bufflen == 0) && 
403                         (cmd->data_direction != SCST_DATA_NONE)) ||
404                     ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
405                         (state > SCST_CMD_STATE_PREPARE_SPACE))) 
406                 {
407                         PRINT_ERROR_PR("Dev handler %s parse() returned "
408                                        "invalid cmd data_direction %d, "
409                                        "bufflen %zd or state %d (opcode 0x%x)",
410                                        dev->handler->name, 
411                                        cmd->data_direction, cmd->bufflen,
412                                        state, cmd->cdb[0]);
413                         goto out_error;
414                 }
415         }
416 #endif
417
418         switch (state) {
419         case SCST_CMD_STATE_PREPARE_SPACE:
420         case SCST_CMD_STATE_DEV_PARSE:
421         case SCST_CMD_STATE_RDY_TO_XFER:
422         case SCST_CMD_STATE_SEND_TO_MIDLEV:
423         case SCST_CMD_STATE_DEV_DONE:
424         case SCST_CMD_STATE_XMIT_RESP:
425         case SCST_CMD_STATE_FINISHED:
426                 cmd->state = state;
427                 res = SCST_CMD_STATE_RES_CONT_SAME;
428                 break;
429
430         case SCST_CMD_STATE_REINIT:
431                 cmd->tgt_dev_saved = tgt_dev_saved;
432                 cmd->state = state;
433                 res = SCST_CMD_STATE_RES_RESTART;
434                 set_dir = 0;
435                 break;
436
437         case SCST_CMD_STATE_NEED_THREAD_CTX:
438                 TRACE_DBG("Dev handler %s parse() requested thread "
439                       "context, rescheduling", dev->handler->name);
440                 res = SCST_CMD_STATE_RES_NEED_THREAD;
441                 set_dir = 0;
442                 break;
443
444         default:
445                 if (state >= 0) {
446                         PRINT_ERROR_PR("Dev handler %s parse() returned "
447                              "invalid cmd state %d (opcode %d)", 
448                              dev->handler->name, state, cmd->cdb[0]);
449                 } else {
450                         PRINT_ERROR_PR("Dev handler %s parse() returned "
451                                 "error %d (opcode %d)", dev->handler->name, 
452                                 state, cmd->cdb[0]);
453                 }
454                 goto out_error;
455         }
456
457         if ((cmd->resp_data_len == -1) && set_dir) {
458                 if (cmd->data_direction == SCST_DATA_READ)
459                         cmd->resp_data_len = cmd->bufflen;
460                 else
461                          cmd->resp_data_len = 0;
462         }
463         
464 out:
465         TRACE_EXIT_HRES(res);
466         return res;
467
468 out_error:
469         /* dev_done() will be called as part of the regular cmd's finish */
470         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
471         cmd->state = SCST_CMD_STATE_DEV_DONE;
472         res = SCST_CMD_STATE_RES_CONT_SAME;
473         goto out;
474
475 out_xmit:
476         cmd->state = SCST_CMD_STATE_XMIT_RESP;
477         res = SCST_CMD_STATE_RES_CONT_SAME;
478         goto out;
479 }
480
481 void scst_cmd_mem_work_fn(void *p)
482 {
483         TRACE_ENTRY();
484
485         spin_lock_bh(&scst_cmd_mem_lock);
486
487         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
488         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
489                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
490                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
491         } else {
492                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
493                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
494         }
495         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
496
497         spin_unlock_bh(&scst_cmd_mem_lock);
498
499         TRACE_EXIT();
500         return;
501 }
502
503 int scst_check_mem(struct scst_cmd *cmd)
504 {
505         int res = 0;
506
507         TRACE_ENTRY();
508
509         if (cmd->mem_checked)
510                 goto out;
511
512         spin_lock_bh(&scst_cmd_mem_lock);
513
514         scst_cur_cmd_mem += cmd->bufflen;
515         cmd->mem_checked = 1;
516         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
517                 goto out_unlock;
518
519         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
520                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
521                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
522                 (cmd->sess->initiator_name[0] == '\0') ?
523                   "Anonymous" : cmd->sess->initiator_name,
524                 scst_cur_max_cmd_mem >> 10);
525
526         scst_cur_cmd_mem -= cmd->bufflen;
527         cmd->mem_checked = 0;
528         scst_set_busy(cmd);
529         cmd->state = SCST_CMD_STATE_XMIT_RESP;
530         res = 1;
531
532 out_unlock:
533         spin_unlock_bh(&scst_cmd_mem_lock);
534
535 out:
536         TRACE_EXIT_RES(res);
537         return res;
538 }
539
540 static void scst_low_cur_max_cmd_mem(void)
541 {
542         TRACE_ENTRY();
543
544         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
545                 cancel_delayed_work(&scst_cmd_mem_work);
546                 flush_scheduled_work();
547                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
548         }
549
550         spin_lock_bh(&scst_cmd_mem_lock);
551
552         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
553                                 (scst_cur_cmd_mem >> 2);
554         if (scst_cur_max_cmd_mem < 16*1024*1024)
555                 scst_cur_max_cmd_mem = 16*1024*1024;
556
557         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
558                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
559                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
560                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
561         }
562
563         spin_unlock_bh(&scst_cmd_mem_lock);
564
565         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
566
567         TRACE_EXIT();
568         return;
569 }
570
571 static int scst_prepare_space(struct scst_cmd *cmd)
572 {
573         int r, res = SCST_CMD_STATE_RES_CONT_SAME;
574
575         TRACE_ENTRY();
576
577         if (cmd->data_direction == SCST_DATA_NONE) {
578                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
579                 goto out;
580         }
581
582         r = scst_check_mem(cmd);
583         if (unlikely(r != 0))
584                 goto out;
585
586         if (cmd->data_buf_tgt_alloc) {
587                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
588                 r = cmd->tgtt->alloc_data_buf(cmd);
589                 cmd->data_buf_alloced = (r == 0);
590         } else
591                 r = scst_alloc_space(cmd);
592
593         if (r != 0) {
594                 if (scst_cmd_atomic(cmd)) {
595                         TRACE_MEM("%s", "Atomic memory allocation failed, "
596                               "rescheduling to the thread");
597                         res = SCST_CMD_STATE_RES_NEED_THREAD;
598                         goto out;
599                 } else
600                         goto out_no_space;
601         }
602
603         switch (cmd->data_direction) {
604         case SCST_DATA_WRITE:
605                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
606                 break;
607
608         default:
609                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
610                 break;
611         }
612
613 out:
614         TRACE_EXIT_HRES(res);
615         return res;
616
617 out_no_space:
618         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
619                 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
620         scst_low_cur_max_cmd_mem();
621         scst_set_busy(cmd);
622         cmd->state = SCST_CMD_STATE_DEV_DONE;
623         res = SCST_CMD_STATE_RES_CONT_SAME;
624         goto out;
625 }
626
627 /* No locks */
628 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
629 {
630         struct scst_tgt *tgt = cmd->sess->tgt;
631         int res = 0;
632         unsigned long flags;
633
634         TRACE_ENTRY();
635
636         spin_lock_irqsave(&tgt->tgt_lock, flags);
637         tgt->retry_cmds++;
638         smp_mb();
639         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
640               tgt->retry_cmds);
641         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
642                 /* At least one cmd finished, so try again */
643                 tgt->retry_cmds--;
644                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
645                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
646                       "retry_cmds=%d)", finished_cmds,
647                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
648                 res = -1;
649                 goto out_unlock_tgt;
650         }
651
652         TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
653         /* IRQ already off */
654         spin_lock(&scst_list_lock);
655         list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
656         spin_unlock(&scst_list_lock);
657
658         if (!tgt->retry_timer_active) {
659                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
660                 add_timer(&tgt->retry_timer);
661                 tgt->retry_timer_active = 1;
662         }
663
664 out_unlock_tgt:
665         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
666
667         TRACE_EXIT_RES(res);
668         return res;
669 }
670
671 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
672 {
673         int res, rc;
674         int atomic = scst_cmd_atomic(cmd);
675
676         TRACE_ENTRY();
677
678         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
679         {
680                 TRACE_DBG("ABORTED set, returning ABORTED for "
681                         "cmd %p", cmd);
682                 goto out_dev_done;
683         }
684
685         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
686                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
687                       "called in atomic context, rescheduling to the thread");
688                 res = SCST_CMD_STATE_RES_NEED_THREAD;
689                 goto out;
690         }
691
692         while (1) {
693                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
694
695                 res = SCST_CMD_STATE_RES_CONT_NEXT;
696                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
697
698                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
699 #ifdef DEBUG_RETRY
700                 if (((scst_random() % 100) == 75))
701                         rc = SCST_TGT_RES_QUEUE_FULL;
702                 else
703 #endif
704                         rc = cmd->tgtt->rdy_to_xfer(cmd);
705                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
706
707                 if (likely(rc == SCST_TGT_RES_SUCCESS))
708                         goto out;
709
710                 /* Restore the previous state */
711                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
712
713                 switch (rc) {
714                 case SCST_TGT_RES_QUEUE_FULL:
715                 {
716                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
717                                 break;
718                         else
719                                 continue;
720                 }
721
722                 case SCST_TGT_RES_NEED_THREAD_CTX:
723                 {
724                         TRACE_DBG("Target driver %s "
725                               "rdy_to_xfer() requested thread "
726                               "context, rescheduling", cmd->tgtt->name);
727                         res = SCST_CMD_STATE_RES_NEED_THREAD;
728                         break;
729                 }
730
731                 default:
732                         goto out_error_rc;
733                 }
734                 break;
735         }
736
737 out:
738         TRACE_EXIT_HRES(res);
739         return res;
740
741 out_error_rc:
742         if (rc == SCST_TGT_RES_FATAL_ERROR) {
743                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
744                      "fatal error", cmd->tgtt->name);
745         } else {
746                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
747                             "value %d", cmd->tgtt->name, rc);
748         }
749         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
750
751 out_dev_done:
752         cmd->state = SCST_CMD_STATE_DEV_DONE;
753         res = SCST_CMD_STATE_RES_CONT_SAME;
754         goto out;
755 }
756
757 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
758         int check_retries)
759 {
760         unsigned long flags;
761         int rc;
762
763         TRACE_ENTRY();
764
765         TRACE_DBG("Context: %d", context);
766
767         switch(context) {
768         case SCST_CONTEXT_DIRECT:
769         case SCST_CONTEXT_DIRECT_ATOMIC:
770                 if (check_retries)
771                         scst_check_retries(cmd->tgt, 0);
772                 cmd->non_atomic_only = 0;
773                 rc = __scst_process_active_cmd(cmd, context, 0);
774                 if (rc == SCST_CMD_STATE_RES_NEED_THREAD)
775                         goto out_thread;
776                 break;
777
778         default:
779                 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
780                             context);
781                 /* go through */
782         case SCST_CONTEXT_THREAD:
783                 if (check_retries)
784                         scst_check_retries(cmd->tgt, 1);
785                 goto out_thread;
786
787         case SCST_CONTEXT_TASKLET:
788                 if (check_retries)
789                         scst_check_retries(cmd->tgt, 1);
790                 cmd->non_atomic_only = 0;
791                 spin_lock_irqsave(&scst_list_lock, flags);
792                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
793                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
794                 spin_unlock_irqrestore(&scst_list_lock, flags);
795                 scst_schedule_tasklet();
796                 break;
797         }
798 out:
799         TRACE_EXIT();
800         return;
801
802 out_thread:
803         cmd->non_atomic_only = 1;
804         spin_lock_irqsave(&scst_list_lock, flags);
805         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
806         list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
807         spin_unlock_irqrestore(&scst_list_lock, flags);
808         wake_up(&scst_list_waitQ);
809         goto out;
810 }
811
812 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
813 {
814         TRACE_ENTRY();
815
816         TRACE_DBG("Preferred context: %d", pref_context);
817         TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
818         cmd->non_atomic_only = 0;
819
820         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
821                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
822         {
823                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
824                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
825                         cmd->tgtt->name);
826                 pref_context = SCST_CONTEXT_TASKLET;
827         }
828
829         switch (status) {
830         case SCST_RX_STATUS_SUCCESS:
831                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
832                 break;
833
834         case SCST_RX_STATUS_ERROR_SENSE_SET:
835                 cmd->state = SCST_CMD_STATE_DEV_DONE;
836                 break;
837
838         case SCST_RX_STATUS_ERROR_FATAL:
839                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
840                 /* go through */
841         case SCST_RX_STATUS_ERROR:
842                 scst_set_cmd_error(cmd,
843                            SCST_LOAD_SENSE(scst_sense_hardw_error));
844                 cmd->state = SCST_CMD_STATE_DEV_DONE;
845                 break;
846
847         default:
848                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
849                         status);
850                 cmd->state = SCST_CMD_STATE_DEV_DONE;
851                 break;
852         }
853
854         scst_proccess_redirect_cmd(cmd, pref_context, 1);
855
856         TRACE_EXIT();
857         return;
858 }
859
860 /* No locks supposed to be held */
861 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
862         int rq_sense_len, int *next_state)
863 {
864         int sense_valid;
865         struct scst_device *dev = cmd->dev;
866         int dbl_ua_possible, ua_sent = 0;
867
868         TRACE_ENTRY();
869
870         /* If we had a internal bus reset behind us, set the command error UA */
871         if ((dev->scsi_dev != NULL) &&
872             unlikely(cmd->host_status == DID_RESET) &&
873             scst_is_ua_command(cmd))
874         {
875                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
876                       dev->scsi_dev->was_reset, cmd->host_status);
877                 scst_set_cmd_error(cmd,
878                    SCST_LOAD_SENSE(scst_sense_reset_UA));
879                 /* just in case */
880                 cmd->ua_ignore = 0;
881                 /* It looks like it is safe to clear was_reset here */
882                 dev->scsi_dev->was_reset = 0;
883                 smp_mb();
884         }
885
886         if (rq_sense != NULL) {
887                 sense_valid = SCST_SENSE_VALID(rq_sense);
888                 if (sense_valid) {
889                         memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
890                         /* 
891                          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
892                          * in init_scst()
893                          */
894                         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
895                 }
896         } else
897                 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
898
899         dbl_ua_possible = dev->dev_double_ua_possible;
900         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
901         if (unlikely(dbl_ua_possible)) {
902                 spin_lock_bh(&dev->dev_lock);
903                 barrier(); /* to reread dev_double_ua_possible */
904                 dbl_ua_possible = dev->dev_double_ua_possible;
905                 if (dbl_ua_possible)
906                         ua_sent = dev->dev_reset_ua_sent;
907                 else
908                         spin_unlock_bh(&dev->dev_lock);
909         }
910
911         if (sense_valid) {
912                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
913                              sizeof(cmd->sense_buffer));
914                 /* Check Unit Attention Sense Key */
915                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
916                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
917                                 if (dbl_ua_possible) 
918                                 {
919                                         if (ua_sent) {
920                                                 TRACE(TRACE_MGMT, "%s", 
921                                                         "Double UA detected");
922                                                 /* Do retry */
923                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
924                                                         "(tag %d)", cmd, cmd->tag);
925                                                 cmd->status = 0;
926                                                 cmd->masked_status = 0;
927                                                 cmd->msg_status = 0;
928                                                 cmd->host_status = DID_OK;
929                                                 cmd->driver_status = 0;
930                                                 memset(cmd->sense_buffer, 0,
931                                                         sizeof(cmd->sense_buffer));
932                                                 cmd->retry = 1;
933                                                 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
934                                                 /* 
935                                                  * Dev is still blocked by this cmd, so
936                                                  * it's OK to clear SCST_DEV_SERIALIZED
937                                                  * here.
938                                                  */
939                                                 dev->dev_double_ua_possible = 0;
940                                                 dev->dev_serialized = 0;
941                                                 dev->dev_reset_ua_sent = 0;
942                                                 goto out_unlock;
943                                         } else
944                                                 dev->dev_reset_ua_sent = 1;
945                                 }
946                         }
947                         if (cmd->ua_ignore == 0) {
948                                 if (unlikely(dbl_ua_possible)) {
949                                         __scst_process_UA(dev, cmd,
950                                                 cmd->sense_buffer,
951                                                 sizeof(cmd->sense_buffer), 0);
952                                 } else {
953                                         scst_process_UA(dev, cmd,
954                                                 cmd->sense_buffer,
955                                                 sizeof(cmd->sense_buffer), 0);
956                                 }
957                         }
958                 }
959         }
960
961         if (unlikely(dbl_ua_possible)) {
962                 if (ua_sent && scst_is_ua_command(cmd)) {
963                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
964                         dev->dev_double_ua_possible = 0;
965                         dev->dev_serialized = 0;
966                         dev->dev_reset_ua_sent = 0;
967                 }
968                 spin_unlock_bh(&dev->dev_lock);
969         }
970
971 out:
972         TRACE_EXIT();
973         return;
974
975 out_unlock:
976         spin_unlock_bh(&dev->dev_lock);
977         goto out;
978 }
979
980 static int scst_check_auto_sense(struct scst_cmd *cmd)
981 {
982         int res = 0;
983
984         TRACE_ENTRY();
985
986         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
987             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
988              SCST_NO_SENSE(cmd->sense_buffer)))
989         {
990                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
991                       "cmd->status=%x, cmd->masked_status=%x, "
992                       "cmd->msg_status=%x, cmd->host_status=%x, "
993                       "cmd->driver_status=%x", cmd->status, cmd->masked_status, 
994                       cmd->msg_status, cmd->host_status, cmd->driver_status);
995                 res = 1;
996         } else if (unlikely(cmd->host_status)) {
997                 if ((cmd->host_status == DID_REQUEUE) ||
998                     (cmd->host_status == DID_IMM_RETRY) ||
999                     (cmd->host_status == DID_SOFT_ERROR)) {
1000                         scst_set_busy(cmd);
1001                 } else {
1002                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
1003                                 "received, returning HARDWARE ERROR instead",
1004                                 cmd->host_status);
1005                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1006                 }
1007         }
1008
1009         TRACE_EXIT_RES(res);
1010         return res;
1011 }
1012
1013 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1014         const uint8_t *rq_sense, int rq_sense_len, int *next_state)
1015 {
1016         unsigned char type;
1017
1018         TRACE_ENTRY();
1019
1020         cmd->status = result & 0xff;
1021         cmd->masked_status = status_byte(result);
1022         cmd->msg_status = msg_byte(result);
1023         cmd->host_status = host_byte(result);
1024         cmd->driver_status = driver_byte(result);
1025         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, "
1026               "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
1027               "cmd->driver_status=%x", result, cmd->status,
1028               cmd->masked_status, cmd->msg_status, cmd->host_status,
1029               cmd->driver_status);
1030
1031         cmd->completed = 1;
1032
1033         scst_dec_on_dev_cmd(cmd);
1034
1035         type = cmd->dev->handler->type;
1036         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1037             cmd->tgt_dev->acg_dev->rd_only_flag &&
1038             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1039              type == TYPE_TAPE)) {
1040                 int32_t length;
1041                 uint8_t *address;
1042
1043                 length = scst_get_buf_first(cmd, &address);
1044                 TRACE_DBG("length %d", length);
1045                 if (unlikely(length <= 0)) {
1046                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1047                                 __func__);
1048                         goto next;
1049                 }
1050                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1051                         address[2] |= 0x80;   /* Write Protect*/
1052                 }
1053                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1054                         address[3] |= 0x80;   /* Write Protect*/
1055                 }
1056                 scst_put_buf(cmd, address);
1057         }
1058
1059 next:
1060         scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1061
1062         TRACE_EXIT();
1063         return;
1064 }
1065
1066 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1067 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1068                                             struct scsi_request **req)
1069 {
1070         struct scst_cmd *cmd = NULL;
1071
1072         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1073                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1074
1075         if (cmd == NULL) {
1076                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1077                 if (*req)
1078                         scsi_release_request(*req);
1079         }
1080
1081         return cmd;
1082 }
1083
1084 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1085 {
1086         struct scsi_request *req = NULL;
1087         struct scst_cmd *cmd;
1088         int next_state;
1089
1090         TRACE_ENTRY();
1091
1092         WARN_ON(in_irq());
1093
1094         /*
1095          * We don't use scsi_cmd->resid, because:
1096          * 1. Many low level initiator drivers don't use (set) this field
1097          * 2. We determine the command's buffer size directly from CDB, 
1098          *    so scsi_cmd->resid is not relevant for us, and target drivers 
1099          *    should know the residual, if necessary, by comparing expected 
1100          *    and actual transfer sizes.
1101          */
1102
1103         cmd = scst_get_cmd(scsi_cmd, &req);
1104         if (cmd == NULL)
1105                 goto out;
1106
1107         next_state = SCST_CMD_STATE_DEV_DONE;
1108         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1109                 sizeof(req->sr_sense_buffer), &next_state);
1110
1111         /* Clear out request structure */
1112         req->sr_use_sg = 0;
1113         req->sr_sglist_len = 0;
1114         req->sr_bufflen = 0;
1115         req->sr_buffer = NULL;
1116         req->sr_underflow = 0;
1117         req->sr_request->rq_disk = NULL; /* disown request blk */
1118
1119         cmd->bufflen = req->sr_bufflen; //??
1120
1121         scst_release_request(cmd);
1122
1123         cmd->state = next_state;
1124         cmd->non_atomic_only = 0;
1125
1126         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1127
1128 out:
1129         TRACE_EXIT();
1130         return;
1131 }
1132 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1133 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1134 {
1135         struct scst_cmd *cmd;
1136         int next_state;
1137
1138         TRACE_ENTRY();
1139
1140         WARN_ON(in_irq());
1141
1142         /*
1143          * We don't use resid, because:
1144          * 1. Many low level initiator drivers don't use (set) this field
1145          * 2. We determine the command's buffer size directly from CDB,
1146          *    so resid is not relevant for us, and target drivers
1147          *    should know the residual, if necessary, by comparing expected
1148          *    and actual transfer sizes.
1149          */
1150
1151         cmd = (struct scst_cmd *)data;
1152         if (cmd == NULL)
1153                 goto out;
1154
1155         next_state = SCST_CMD_STATE_DEV_DONE;
1156         scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE,
1157                 &next_state);
1158
1159         cmd->state = next_state;
1160         cmd->non_atomic_only = 0;
1161
1162         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1163
1164 out:
1165         TRACE_EXIT();
1166         return;
1167 }
1168 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1169
1170 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1171 {
1172         TRACE_ENTRY();
1173
1174         BUG_ON(in_irq());
1175
1176         scst_dec_on_dev_cmd(cmd);
1177
1178         if (next_state == SCST_CMD_STATE_DEFAULT)
1179                 next_state = SCST_CMD_STATE_DEV_DONE;
1180
1181         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1182 #if defined(DEBUG) || defined(TRACING)
1183                 if (cmd->sg) {
1184                         int i;
1185                         struct scatterlist *sg = cmd->sg;
1186                         TRACE(TRACE_RECV_TOP, 
1187                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1188                               cmd->sg_cnt, sg, (void*)sg[0].page);
1189                         for(i = 0; i < cmd->sg_cnt; ++i) {
1190                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1191                                         "Exec'd sg", page_address(sg[i].page),
1192                                         sg[i].length);
1193                         }
1194                 }
1195 #endif
1196         }
1197
1198
1199 #ifdef EXTRACHECKS
1200         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1201             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1202             (next_state != SCST_CMD_STATE_FINISHED)) 
1203         {
1204                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1205                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1206                 scst_set_cmd_error(cmd,
1207                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1208                 next_state = SCST_CMD_STATE_DEV_DONE;
1209         }
1210
1211         if (scst_check_auto_sense(cmd)) {
1212                 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1213                         "opcode %d", cmd->cdb[0]);
1214         }
1215 #endif
1216
1217         scst_check_sense(cmd, NULL, 0, &next_state);
1218
1219         cmd->state = next_state;
1220         cmd->non_atomic_only = 0;
1221
1222         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1223
1224         TRACE_EXIT();
1225         return;
1226 }
1227
1228 static int scst_report_luns_local(struct scst_cmd *cmd)
1229 {
1230         int res = SCST_EXEC_COMPLETED;
1231         int dev_cnt = 0;
1232         int buffer_size;
1233         struct scst_tgt_dev *tgt_dev = NULL;
1234         uint8_t *buffer;
1235
1236         TRACE_ENTRY();
1237
1238         cmd->status = 0;
1239         cmd->masked_status = 0;
1240         cmd->msg_status = 0;
1241         cmd->host_status = DID_OK;
1242         cmd->driver_status = 0;
1243
1244         /* ToDo: use full SG buffer, not only the first entry */
1245         buffer_size = scst_get_buf_first(cmd, &buffer);
1246         if (unlikely(buffer_size <= 0))
1247                 goto out_err;
1248
1249         if (buffer_size < 16) {
1250                 goto out_put_err;
1251         }
1252
1253         memset(buffer, 0, buffer_size);
1254
1255         /* sess->sess_tgt_dev_list is protected by suspended activity */
1256         list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1257                             sess_tgt_dev_list_entry) 
1258         {
1259                 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1260                         buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1261                         buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1262                 }
1263                 dev_cnt++;
1264                 /* Tmp, until ToDo above done */
1265                 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1266                         break;
1267         }
1268
1269         /* Set the response header */
1270         dev_cnt *= 8;
1271         buffer[0] = (dev_cnt >> 24) & 0xff;
1272         buffer[1] = (dev_cnt >> 16) & 0xff;
1273         buffer[2] = (dev_cnt >> 8) & 0xff;
1274         buffer[3] = dev_cnt & 0xff;
1275
1276         dev_cnt += 8;
1277
1278         scst_put_buf(cmd, buffer);
1279
1280         if (buffer_size > dev_cnt)
1281                 scst_set_resp_data_len(cmd, dev_cnt);
1282         
1283 out_done:
1284         cmd->completed = 1;
1285
1286         /* Report the result */
1287         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1288
1289         TRACE_EXIT_RES(res);
1290         return res;
1291         
1292 out_put_err:
1293         scst_put_buf(cmd, buffer);
1294
1295 out_err:
1296         scst_set_cmd_error(cmd,
1297                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1298         goto out_done;
1299 }
1300
1301 static int scst_pre_select(struct scst_cmd *cmd)
1302 {
1303         int res = SCST_EXEC_NOT_COMPLETED;
1304
1305         TRACE_ENTRY();
1306
1307         if (scst_cmd_atomic(cmd)) {
1308                 res = SCST_EXEC_NEED_THREAD;
1309                 goto out;
1310         }
1311
1312         scst_block_dev(cmd->dev, 1);
1313         /* Device will be unblocked in scst_done_cmd_check() */
1314
1315         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1316                 int rc = scst_set_pending_UA(cmd);
1317                 if (rc == 0) {
1318                         res = SCST_EXEC_COMPLETED;
1319                         cmd->completed = 1;
1320                         /* Report the result */
1321                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1322                         goto out;
1323                 }
1324         }
1325
1326 out:
1327         TRACE_EXIT_RES(res);
1328         return res;
1329 }
1330
1331 static inline void scst_report_reserved(struct scst_cmd *cmd)
1332 {
1333         TRACE_ENTRY();
1334
1335         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1336         cmd->completed = 1;
1337         /* Report the result */
1338         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1339
1340         TRACE_EXIT();
1341         return;
1342 }
1343
1344 static int scst_reserve_local(struct scst_cmd *cmd)
1345 {
1346         int res = SCST_EXEC_NOT_COMPLETED;
1347         struct scst_device *dev;
1348         struct scst_tgt_dev *tgt_dev_tmp;
1349
1350         TRACE_ENTRY();
1351
1352         if (scst_cmd_atomic(cmd)) {
1353                 res = SCST_EXEC_NEED_THREAD;
1354                 goto out;
1355         }
1356
1357         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1358                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1359                      "(lun=%Ld)", (uint64_t)cmd->lun);
1360                 scst_set_cmd_error(cmd,
1361                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1362                 cmd->completed = 1;
1363                 res = SCST_EXEC_COMPLETED;
1364                 goto out;
1365         }
1366
1367         dev = cmd->dev;
1368         scst_block_dev(dev, 1);
1369         /* Device will be unblocked in scst_done_cmd_check() */
1370
1371         spin_lock_bh(&dev->dev_lock);
1372
1373         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1374                 scst_report_reserved(cmd);
1375                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1376                 res = SCST_EXEC_COMPLETED;
1377                 goto out_unlock;
1378         }
1379
1380         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1381                             dev_tgt_dev_list_entry) 
1382         {
1383                 if (cmd->tgt_dev != tgt_dev_tmp)
1384                         set_bit(SCST_TGT_DEV_RESERVED, 
1385                                 &tgt_dev_tmp->tgt_dev_flags);
1386         }
1387         dev->dev_reserved = 1;
1388
1389 out_unlock:
1390         spin_unlock_bh(&dev->dev_lock);
1391         
1392 out:
1393         TRACE_EXIT_RES(res);
1394         return res;
1395 }
1396
1397 static int scst_release_local(struct scst_cmd *cmd)
1398 {
1399         int res = SCST_EXEC_NOT_COMPLETED;
1400         struct scst_tgt_dev *tgt_dev_tmp;
1401         struct scst_device *dev;
1402
1403         TRACE_ENTRY();
1404
1405         dev = cmd->dev;
1406
1407         scst_block_dev(dev, 1);
1408         cmd->blocking = 1;
1409         TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1410
1411         spin_lock_bh(&dev->dev_lock);
1412
1413         /* 
1414          * The device could be RELEASED behind us, if RESERVING session 
1415          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1416          * matter, so use lock and no retest for DEV_RESERVED bits again
1417          */
1418         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1419                 res = SCST_EXEC_COMPLETED;
1420                 cmd->status = 0;
1421                 cmd->masked_status = 0;
1422                 cmd->msg_status = 0;
1423                 cmd->host_status = DID_OK;
1424                 cmd->driver_status = 0;
1425         } else {
1426                 list_for_each_entry(tgt_dev_tmp,
1427                                     &dev->dev_tgt_dev_list,
1428                                     dev_tgt_dev_list_entry) 
1429                 {
1430                         clear_bit(SCST_TGT_DEV_RESERVED, 
1431                                 &tgt_dev_tmp->tgt_dev_flags);
1432                 }
1433                 dev->dev_reserved = 0;
1434         }
1435
1436         spin_unlock_bh(&dev->dev_lock);
1437
1438         if (res == SCST_EXEC_COMPLETED) {
1439                 cmd->completed = 1;
1440                 /* Report the result */
1441                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1442         }
1443
1444         TRACE_EXIT_RES(res);
1445         return res;
1446 }
1447
1448 /* 
1449  * The result of cmd execution, if any, should be reported 
1450  * via scst_cmd_done_local() 
1451  */
1452 static int scst_pre_exec(struct scst_cmd *cmd)
1453 {
1454         int res = SCST_EXEC_NOT_COMPLETED, rc;
1455         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1456
1457         TRACE_ENTRY();
1458
1459         /* Reserve check before Unit Attention */
1460         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1461             (cmd->cdb[0] != INQUIRY) &&
1462             (cmd->cdb[0] != REPORT_LUNS) &&
1463             (cmd->cdb[0] != RELEASE) &&
1464             (cmd->cdb[0] != RELEASE_10) &&
1465             (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1466             (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1467             (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) 
1468         {
1469                 scst_report_reserved(cmd);
1470                 res = SCST_EXEC_COMPLETED;
1471                 goto out;
1472         }
1473
1474         /* If we had a internal bus reset, set the command error unit attention */
1475         if ((cmd->dev->scsi_dev != NULL) &&
1476             unlikely(cmd->dev->scsi_dev->was_reset) &&
1477             scst_is_ua_command(cmd)) 
1478         {
1479                 struct scst_device *dev = cmd->dev;
1480                 int done = 0;
1481                 /* Prevent more than 1 cmd to be triggered by was_reset */
1482                 spin_lock_bh(&dev->dev_lock);
1483                 barrier(); /* to reread was_reset */
1484                 if (dev->scsi_dev->was_reset) {
1485                         TRACE(TRACE_MGMT, "was_reset is %d", 1);
1486                         scst_set_cmd_error(cmd,
1487                                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1488                         /* It looks like it is safe to clear was_reset here */
1489                         dev->scsi_dev->was_reset = 0;
1490                         smp_mb();
1491                         done = 1;
1492                 }
1493                 spin_unlock_bh(&dev->dev_lock);
1494
1495                 if (done)
1496                         goto out_done;
1497         }
1498
1499         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1500             scst_is_ua_command(cmd)) 
1501         {
1502                 rc = scst_set_pending_UA(cmd);
1503                 if (rc == 0)
1504                         goto out_done;
1505         }
1506
1507         /* Check READ_ONLY device status */
1508         if (tgt_dev->acg_dev->rd_only_flag &&
1509             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1510              cmd->cdb[0] == WRITE_10 ||
1511              cmd->cdb[0] == WRITE_12 ||
1512              cmd->cdb[0] == WRITE_16 ||
1513              cmd->cdb[0] == WRITE_VERIFY ||
1514              cmd->cdb[0] == WRITE_VERIFY_12 ||
1515              cmd->cdb[0] == WRITE_VERIFY_16 ||
1516              (cmd->dev->handler->type == TYPE_TAPE &&
1517               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1518         {
1519                 scst_set_cmd_error(cmd,
1520                            SCST_LOAD_SENSE(scst_sense_data_protect));
1521                 goto out_done;
1522         }
1523 out:
1524         TRACE_EXIT_RES(res);
1525         return res;
1526
1527 out_done:
1528         res = SCST_EXEC_COMPLETED;
1529         cmd->completed = 1;
1530         /* Report the result */
1531         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1532         goto out;
1533 }
1534
1535 /* 
1536  * The result of cmd execution, if any, should be reported 
1537  * via scst_cmd_done_local() 
1538  */
1539 static inline int scst_local_exec(struct scst_cmd *cmd)
1540 {
1541         int res = SCST_EXEC_NOT_COMPLETED;
1542
1543         TRACE_ENTRY();
1544
1545         /*
1546          * Adding new commands here don't forget to update
1547          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1548          */
1549
1550         switch (cmd->cdb[0]) {
1551         case MODE_SELECT:
1552         case MODE_SELECT_10:
1553         case LOG_SELECT:
1554                 res = scst_pre_select(cmd);
1555                 break;
1556         case RESERVE:
1557         case RESERVE_10:
1558                 res = scst_reserve_local(cmd);
1559                 break;
1560         case RELEASE:
1561         case RELEASE_10:
1562                 res = scst_release_local(cmd);
1563                 break;
1564         case REPORT_LUNS:
1565                 res = scst_report_luns_local(cmd);
1566                 break;
1567         }
1568
1569         TRACE_EXIT_RES(res);
1570         return res;
1571 }
1572
1573 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1574 {
1575         int rc = SCST_EXEC_NOT_COMPLETED;
1576
1577         TRACE_ENTRY();
1578
1579         cmd->sent_to_midlev = 1;
1580         cmd->state = SCST_CMD_STATE_EXECUTING;
1581         cmd->scst_cmd_done = scst_cmd_done_local;
1582
1583         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1584         smp_mb__after_set_bit();
1585
1586         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1587                 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1588                 goto out_aborted;
1589         }
1590
1591         rc = scst_pre_exec(cmd);
1592         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1593         if (rc != SCST_EXEC_NOT_COMPLETED) {
1594                 if (rc == SCST_EXEC_COMPLETED)
1595                         goto out;
1596                 else if (rc == SCST_EXEC_NEED_THREAD)
1597                         goto out_clear;
1598                 else
1599                         goto out_rc_error;
1600         }
1601
1602         rc = scst_local_exec(cmd);
1603         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1604         if (rc != SCST_EXEC_NOT_COMPLETED) {
1605                 if (rc == SCST_EXEC_COMPLETED)
1606                         goto out;
1607                 else if (rc == SCST_EXEC_NEED_THREAD)
1608                         goto out_clear;
1609                 else
1610                         goto out_rc_error;
1611         }
1612
1613         if (cmd->dev->handler->exec) {
1614                 struct scst_device *dev = cmd->dev;
1615                 TRACE_DBG("Calling dev handler %s exec(%p)",
1616                       dev->handler->name, cmd);
1617                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1618                 cmd->scst_cmd_done = scst_cmd_done_local;
1619                 rc = dev->handler->exec(cmd);
1620                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1621                 TRACE_DBG("Dev handler %s exec() returned %d",
1622                       dev->handler->name, rc);
1623                 if (rc != SCST_EXEC_NOT_COMPLETED) {
1624                         if (rc == SCST_EXEC_COMPLETED)
1625                                 goto out;
1626                         else if (rc == SCST_EXEC_NEED_THREAD)
1627                                 goto out_clear;
1628                         else
1629                                 goto out_rc_error;
1630                 }
1631         }
1632
1633         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1634         
1635         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1636                 PRINT_ERROR_PR("Command for virtual device must be "
1637                         "processed by device handler (lun %Ld)!",
1638                         (uint64_t)cmd->lun);
1639                 goto out_error;
1640         }
1641
1642 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1643         if (scst_alloc_request(cmd) != 0) {
1644                 PRINT_INFO_PR("%s", "Unable to allocate request, "
1645                         "sending BUSY status");
1646                 goto out_busy;
1647         }
1648         
1649         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1650                     (void *)cmd->scsi_req->sr_buffer,
1651                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1652                     cmd->retries);
1653 #else
1654         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1655                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1656                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1657                         GFP_KERNEL);
1658         if (rc) {
1659                 PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1660                 goto out_error;
1661         }
1662 #endif
1663
1664         rc = SCST_EXEC_COMPLETED;
1665
1666 out:
1667         TRACE_EXIT();
1668         return rc;
1669
1670 out_clear:
1671         /* Restore the state */
1672         cmd->sent_to_midlev = 0;
1673         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1674         goto out;
1675
1676 out_rc_error:
1677         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1678                     "invalid code %d", cmd->dev->handler->name, rc);
1679         /* go through */
1680
1681 out_error:
1682         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1683         cmd->completed = 1;
1684         cmd->state = SCST_CMD_STATE_DEV_DONE;
1685         rc = SCST_EXEC_COMPLETED;
1686         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1687         goto out;
1688
1689 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1690 out_busy:
1691         scst_set_busy(cmd);
1692         cmd->completed = 1;
1693         cmd->state = SCST_CMD_STATE_DEV_DONE;
1694         rc = SCST_EXEC_COMPLETED;
1695         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1696         goto out;
1697 #endif
1698
1699 out_aborted:
1700         rc = SCST_EXEC_COMPLETED;
1701         /* Report the result. The cmd is not completed */
1702         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1703         goto out;
1704 }
1705
1706 static int scst_send_to_midlev(struct scst_cmd *cmd)
1707 {
1708         int res, rc;
1709         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1710         struct scst_device *dev = cmd->dev;
1711         int expected_sn;
1712         int count;
1713         int atomic = scst_cmd_atomic(cmd);
1714
1715         TRACE_ENTRY();
1716
1717         res = SCST_CMD_STATE_RES_CONT_NEXT;
1718
1719         if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1720                 TRACE_DBG("Dev handler %s exec() can not be "
1721                       "called in atomic context, rescheduling to the thread",
1722                       dev->handler->name);
1723                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1724                 goto out;
1725         }
1726
1727         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1728                 goto out;
1729
1730         scst_inc_cmd_count(); /* protect dev & tgt_dev */
1731
1732         if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1733                 rc = scst_do_send_to_midlev(cmd);
1734                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1735                 if (rc == SCST_EXEC_NEED_THREAD) {
1736                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1737                               "thread context, rescheduling");
1738                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1739                         scst_dec_on_dev_cmd(cmd);
1740                         goto out_dec_cmd_count;
1741                 } else {
1742                         BUG_ON(rc != SCST_EXEC_COMPLETED);
1743                         goto out_unplug;
1744                 }
1745         }
1746
1747         expected_sn = tgt_dev->expected_sn;
1748         if (cmd->sn != expected_sn) {
1749                 spin_lock_bh(&tgt_dev->sn_lock);
1750                 tgt_dev->def_cmd_count++;
1751                 smp_mb();
1752                 barrier(); /* to reread expected_sn */
1753                 expected_sn = tgt_dev->expected_sn;
1754                 if (cmd->sn != expected_sn) {
1755                         scst_dec_on_dev_cmd(cmd);
1756                         TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1757                               "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1758                         list_add_tail(&cmd->sn_cmd_list_entry,
1759                                       &tgt_dev->deferred_cmd_list);
1760                         spin_unlock_bh(&tgt_dev->sn_lock);
1761                         /* !! At this point cmd can be already freed !! */
1762                         goto out_dec_cmd_count;
1763                 } else {
1764                         TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1765                               "expected_sn %d, continuing", expected_sn);
1766                         tgt_dev->def_cmd_count--;
1767                         spin_unlock_bh(&tgt_dev->sn_lock);
1768                 }
1769         }
1770
1771         count = 0;
1772         while(1) {
1773                 rc = scst_do_send_to_midlev(cmd);
1774                 if (rc == SCST_EXEC_NEED_THREAD) {
1775                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1776                               "thread context, rescheduling");
1777                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1778                         scst_dec_on_dev_cmd(cmd);
1779                         if (count != 0)
1780                                 goto out_unplug;
1781                         else
1782                                 goto out_dec_cmd_count;
1783                 }
1784                 BUG_ON(rc != SCST_EXEC_COMPLETED);
1785                 /* !! At this point cmd can be already freed !! */
1786                 count++;
1787                 expected_sn = __scst_inc_expected_sn(tgt_dev);
1788                 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1789                 if (cmd == NULL)
1790                         break;
1791                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1792                         break;
1793         }
1794
1795 out_unplug:
1796         if (dev->scsi_dev != NULL)
1797                 generic_unplug_device(dev->scsi_dev->request_queue);
1798
1799 out_dec_cmd_count:
1800         scst_dec_cmd_count();
1801         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1802
1803 out:
1804         TRACE_EXIT_HRES(res);
1805         return res;
1806 }
1807
1808 static struct scst_cmd *scst_create_prepare_internal_cmd(
1809         struct scst_cmd *orig_cmd, int bufsize)
1810 {
1811         struct scst_cmd *res;
1812         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1813
1814         TRACE_ENTRY();
1815
1816         res = scst_alloc_cmd(gfp_mask);
1817         if (unlikely(res == NULL)) {
1818                 goto out;
1819         }
1820
1821         res->sess = orig_cmd->sess;
1822         res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1823         res->atomic = scst_cmd_atomic(orig_cmd);
1824         res->internal = 1;
1825         res->tgtt = orig_cmd->tgtt;
1826         res->tgt = orig_cmd->tgt;
1827         res->dev = orig_cmd->dev;
1828         res->tgt_dev = orig_cmd->tgt_dev;
1829         res->lun = orig_cmd->lun;
1830         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1831         res->data_direction = SCST_DATA_UNKNOWN;
1832         res->orig_cmd = orig_cmd;
1833
1834         res->bufflen = bufsize;
1835         if (bufsize > 0) {
1836                 if (scst_alloc_space(res) != 0)
1837                         PRINT_ERROR("Unable to create buffer (size %d) for "
1838                                 "internal cmd", bufsize);
1839                         goto out_free_res;
1840         }
1841
1842 out:
1843         TRACE_EXIT_HRES((unsigned long)res);
1844         return res;
1845
1846 out_free_res:
1847         scst_destroy_cmd(res);
1848         res = NULL;
1849         goto out;
1850 }
1851
1852 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1853 {
1854         TRACE_ENTRY();
1855
1856         if (cmd->bufflen > 0)
1857                 scst_release_space(cmd);
1858         scst_destroy_cmd(cmd);
1859
1860         TRACE_EXIT();
1861         return;
1862 }
1863
1864 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1865 {
1866         int res = SCST_CMD_STATE_RES_RESTART;
1867 #define sbuf_size 252
1868         static const unsigned char request_sense[6] =
1869             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1870         struct scst_cmd *rs_cmd;
1871
1872         TRACE_ENTRY();
1873
1874         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1875         if (rs_cmd != 0)
1876                 goto out_error;
1877
1878         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1879         rs_cmd->cdb_len = sizeof(request_sense);
1880         rs_cmd->data_direction = SCST_DATA_READ;
1881
1882         spin_lock_irq(&scst_list_lock);
1883         list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1884         spin_unlock_irq(&scst_list_lock);
1885
1886 out:
1887         TRACE_EXIT_RES(res);
1888         return res;
1889
1890 out_error:
1891         res = -1;
1892         goto out;
1893 #undef sbuf_size
1894 }
1895
1896 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1897 {
1898         struct scst_cmd *orig_cmd = cmd->orig_cmd;
1899         uint8_t *buf;
1900         int len;
1901
1902         TRACE_ENTRY();
1903
1904         BUG_ON(orig_cmd);
1905
1906         len = scst_get_buf_first(cmd, &buf);
1907
1908         if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1909             (!SCST_NO_SENSE(buf))) 
1910         {
1911                 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
1912                         buf, len);
1913                 memcpy(orig_cmd->sense_buffer, buf,
1914                         (sizeof(orig_cmd->sense_buffer) > len) ?
1915                                 len : sizeof(orig_cmd->sense_buffer));
1916         } else {
1917                 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1918                         "REQUEST SENSE, returning HARDWARE ERROR");
1919                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1920         }
1921
1922         scst_put_buf(cmd, buf);
1923
1924         scst_free_internal_cmd(cmd);
1925
1926         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1927         return orig_cmd;
1928 }
1929
1930 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1931 {
1932         int res = 0, rc;
1933         unsigned char type;
1934
1935         TRACE_ENTRY();
1936
1937         if (cmd->cdb[0] == REQUEST_SENSE) {
1938                 if (cmd->internal)
1939                         cmd = scst_complete_request_sense(cmd);
1940         } else if (scst_check_auto_sense(cmd)) {
1941                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1942                             "without sense data (opcode 0x%x), issuing "
1943                             "REQUEST SENSE", cmd->cdb[0]);
1944                 rc = scst_prepare_request_sense(cmd);
1945                 if (res > 0) {
1946                         *pres = rc;
1947                         res = 1;
1948                         goto out;
1949                 } else {
1950                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1951                                     "returning HARDWARE ERROR");
1952                         scst_set_cmd_error(cmd,
1953                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
1954                 }
1955         }
1956
1957         type = cmd->dev->handler->type;
1958         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1959             cmd->tgt_dev->acg_dev->rd_only_flag &&
1960             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1961              type == TYPE_TAPE))
1962         {
1963                 int32_t length;
1964                 uint8_t *address;
1965
1966                 length = scst_get_buf_first(cmd, &address);
1967                 if (length <= 0)
1968                         goto out;
1969                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1970                         address[2] |= 0x80;   /* Write Protect*/
1971                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1972                         address[3] |= 0x80;   /* Write Protect*/
1973                 scst_put_buf(cmd, address);
1974         }
1975
1976         /* 
1977          * Check and clear NormACA option for the device, if necessary,
1978          * since we don't support ACA
1979          */
1980         if ((cmd->cdb[0] == INQUIRY) &&
1981             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1982             (cmd->resp_data_len > SCST_INQ_BYTE3))
1983         {
1984                 uint8_t *buffer;
1985                 int buflen;
1986
1987                 /* ToDo: all pages ?? */
1988                 buflen = scst_get_buf_first(cmd, &buffer);
1989                 if (buflen > 0) {
1990                         if (buflen > SCST_INQ_BYTE3) {
1991 #ifdef EXTRACHECKS
1992                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1993                                         PRINT_INFO_PR("NormACA set for device: "
1994                                             "lun=%Ld, type 0x%02x", 
1995                                             (uint64_t)cmd->lun, buffer[0]);
1996                                 }
1997 #endif
1998                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1999                         } else
2000                                 scst_set_cmd_error(cmd,
2001                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
2002
2003                         scst_put_buf(cmd, buffer);
2004                 }
2005         }
2006
2007         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
2008                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
2009                                                 &cmd->tgt_dev->tgt_dev_flags)) {
2010                         struct scst_tgt_dev *tgt_dev_tmp;
2011                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2012                               (uint64_t)cmd->lun, cmd->masked_status);
2013                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2014                                      sizeof(cmd->sense_buffer));
2015                         /* Clearing the reservation */
2016                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2017                                             dev_tgt_dev_list_entry) {
2018                                 clear_bit(SCST_TGT_DEV_RESERVED, 
2019                                         &tgt_dev_tmp->tgt_dev_flags);
2020                         }
2021                         cmd->dev->dev_reserved = 0;
2022                 }
2023                 scst_unblock_dev(cmd->dev);
2024         }
2025         
2026         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
2027                      (cmd->cdb[0] == MODE_SELECT_10) ||
2028                      (cmd->cdb[0] == LOG_SELECT)))
2029         {
2030                 if (cmd->status == 0) {
2031                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2032                                 "setting the SELECT UA (lun=%Ld)", 
2033                                 (uint64_t)cmd->lun);
2034                         spin_lock_bh(&scst_temp_UA_lock);
2035                         if (cmd->cdb[0] == LOG_SELECT) {
2036                                 scst_set_sense(scst_temp_UA,
2037                                         sizeof(scst_temp_UA),
2038                                         UNIT_ATTENTION, 0x2a, 0x02);
2039                         } else {
2040                                 scst_set_sense(scst_temp_UA,
2041                                         sizeof(scst_temp_UA),
2042                                         UNIT_ATTENTION, 0x2a, 0x01);
2043                         }
2044                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2045                                 sizeof(scst_temp_UA), 1);
2046                         spin_unlock_bh(&scst_temp_UA_lock);
2047                 }
2048                 scst_unblock_dev(cmd->dev);
2049         }
2050
2051 out:
2052         TRACE_EXIT_RES(res);
2053         return res;
2054 }
2055
2056 static int scst_dev_done(struct scst_cmd *cmd)
2057 {
2058         int res = SCST_CMD_STATE_RES_CONT_SAME;
2059         int state;
2060         int atomic = scst_cmd_atomic(cmd);
2061
2062         TRACE_ENTRY();
2063
2064         if (atomic && !cmd->dev->handler->dev_done_atomic &&
2065             cmd->dev->handler->dev_done) 
2066         {
2067                 TRACE_DBG("Dev handler %s dev_done() can not be "
2068                       "called in atomic context, rescheduling to the thread",
2069                       cmd->dev->handler->name);
2070                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2071                 goto out;
2072         }
2073
2074         if (scst_done_cmd_check(cmd, &res))
2075                 goto out;
2076
2077         state = SCST_CMD_STATE_XMIT_RESP;
2078         if (likely(!scst_is_cmd_local(cmd)) && 
2079             likely(cmd->dev->handler->dev_done != NULL))
2080         {
2081                 int rc;
2082                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2083                       cmd->dev->handler->name, cmd);
2084                 rc = cmd->dev->handler->dev_done(cmd);
2085                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2086                       cmd->dev->handler->name, rc);
2087                 if (rc != SCST_CMD_STATE_DEFAULT)
2088                         state = rc;
2089         }
2090
2091         switch (state) {
2092         case SCST_CMD_STATE_REINIT:
2093                 cmd->state = state;
2094                 res = SCST_CMD_STATE_RES_RESTART;
2095                 break;
2096
2097         case SCST_CMD_STATE_DEV_PARSE:
2098         case SCST_CMD_STATE_PREPARE_SPACE:
2099         case SCST_CMD_STATE_RDY_TO_XFER:
2100         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2101         case SCST_CMD_STATE_DEV_DONE:
2102         case SCST_CMD_STATE_XMIT_RESP:
2103         case SCST_CMD_STATE_FINISHED:
2104                 cmd->state = state;
2105                 res = SCST_CMD_STATE_RES_CONT_SAME;
2106                 break;
2107
2108         case SCST_CMD_STATE_NEED_THREAD_CTX:
2109                 TRACE_DBG("Dev handler %s dev_done() requested "
2110                       "thread context, rescheduling",
2111                       cmd->dev->handler->name);
2112                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2113                 break;
2114
2115         default:
2116                 if (state >= 0) {
2117                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2118                                 "invalid cmd state %d", 
2119                                 cmd->dev->handler->name, state);
2120                 } else {
2121                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2122                                 "error %d", cmd->dev->handler->name, 
2123                                 state);
2124                 }
2125                 scst_set_cmd_error(cmd,
2126                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2127                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2128                 res = SCST_CMD_STATE_RES_CONT_SAME;
2129                 break;
2130         }
2131
2132 out:
2133         TRACE_EXIT_HRES(res);
2134         return res;
2135 }
2136
2137 static int scst_xmit_response(struct scst_cmd *cmd)
2138 {
2139         int res, rc;
2140         int atomic = scst_cmd_atomic(cmd);
2141
2142         TRACE_ENTRY();
2143
2144         /* 
2145          * Check here also in order to avoid unnecessary delays of other
2146          * commands.
2147          */
2148         if (unlikely(cmd->sent_to_midlev == 0) &&
2149             (cmd->tgt_dev != NULL))
2150         {
2151                 TRACE(TRACE_SCSI_SERIALIZING,
2152                       "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2153                 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2154                 cmd->sent_to_midlev = 1;
2155         }
2156
2157         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2158                 TRACE_DBG("%s", "xmit_response() can not be "
2159                       "called in atomic context, rescheduling to the thread");
2160                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2161                 goto out;
2162         }
2163
2164         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2165         smp_mb__after_set_bit();
2166
2167         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2168                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2169                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2170                                 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2171                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2172                 }
2173         }
2174
2175         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2176                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2177                         cmd, cmd->tag);
2178                 cmd->state = SCST_CMD_STATE_FINISHED;
2179                 res = SCST_CMD_STATE_RES_CONT_SAME;
2180                 goto out;
2181         }
2182
2183 #ifdef DEBUG_TM
2184         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2185                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2186                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2187                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2188                         goto out;
2189                 }
2190                 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2191                         cmd, cmd->tag);
2192                 schedule_timeout_uninterruptible(HZ);
2193         }
2194 #endif
2195
2196         while (1) {
2197                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2198
2199                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2200                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2201
2202                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2203
2204 #if defined(DEBUG) || defined(TRACING)
2205                 if (cmd->sg) {
2206                         int i;
2207                         struct scatterlist *sg = cmd->sg;
2208                         TRACE(TRACE_SEND_BOT, 
2209                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2210                               cmd->sg_cnt, sg, (void*)sg[0].page);
2211                         for(i = 0; i < cmd->sg_cnt; ++i) {
2212                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2213                                     "Xmitting sg", page_address(sg[i].page),
2214                                     sg[i].length);
2215                         }
2216                 }
2217 #endif
2218
2219 #ifdef DEBUG_RETRY
2220                 if (((scst_random() % 100) == 77))
2221                         rc = SCST_TGT_RES_QUEUE_FULL;
2222                 else
2223 #endif
2224                         rc = cmd->tgtt->xmit_response(cmd);
2225                 TRACE_DBG("xmit_response() returned %d", rc);
2226
2227                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2228                         goto out;
2229
2230                 /* Restore the previous state */
2231                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2232
2233                 switch (rc) {
2234                 case SCST_TGT_RES_QUEUE_FULL:
2235                 {
2236                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2237                                 break;
2238                         else
2239                                 continue;
2240                 }
2241
2242                 case SCST_TGT_RES_NEED_THREAD_CTX:
2243                 {
2244                         TRACE_DBG("Target driver %s xmit_response() "
2245                               "requested thread context, rescheduling",
2246                               cmd->tgtt->name);
2247                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2248                         break;
2249                 }
2250
2251                 default:
2252                         goto out_error;
2253                 }
2254                 break;
2255         }
2256
2257 out:
2258         /* Caution: cmd can be already dead here */
2259         TRACE_EXIT_HRES(res);
2260         return res;
2261
2262 out_error:
2263         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2264                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2265                         "fatal error", cmd->tgtt->name);
2266         } else {
2267                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2268                         "invalid value %d", cmd->tgtt->name, rc);
2269         }
2270         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2271         cmd->state = SCST_CMD_STATE_FINISHED;
2272         res = SCST_CMD_STATE_RES_CONT_SAME;
2273         goto out;
2274 }
2275
2276 static int scst_finish_cmd(struct scst_cmd *cmd)
2277 {
2278         int res;
2279
2280         TRACE_ENTRY();
2281
2282         if (cmd->mem_checked) {
2283                 spin_lock_bh(&scst_cmd_mem_lock);
2284                 scst_cur_cmd_mem -= cmd->bufflen;
2285                 spin_unlock_bh(&scst_cmd_mem_lock);
2286         }
2287
2288         spin_lock_irq(&scst_list_lock);
2289
2290         TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2291         list_del(&cmd->cmd_list_entry);
2292
2293         if (cmd->mgmt_cmnd)
2294                 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2295
2296         if (likely(cmd->tgt_dev != NULL))
2297                 cmd->tgt_dev->cmd_count--;
2298
2299         cmd->sess->sess_cmd_count--;
2300
2301         list_del(&cmd->search_cmd_list_entry);
2302
2303         spin_unlock_irq(&scst_list_lock);
2304
2305         scst_free_cmd(cmd);
2306
2307         res = SCST_CMD_STATE_RES_CONT_NEXT;
2308
2309         TRACE_EXIT_HRES(res);
2310         return res;
2311 }
2312
2313 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2314 {
2315         TRACE_ENTRY();
2316
2317         BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2318
2319         cmd->state = SCST_CMD_STATE_FINISHED;
2320         scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2321
2322         TRACE_EXIT();
2323         return;
2324 }
2325
2326 /*
2327  * Returns 0 on success, > 0 when we need to wait for unblock,
2328  * < 0 if there is no device (lun) or device type handler.
2329  * Called under scst_list_lock and IRQs disabled
2330  */
2331 static int scst_translate_lun(struct scst_cmd *cmd)
2332 {
2333         struct scst_tgt_dev *tgt_dev = NULL;
2334         int res = 0;
2335
2336         TRACE_ENTRY();
2337
2338         scst_inc_cmd_count();   
2339
2340         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2341                 res = -1;
2342                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2343                       (uint64_t)cmd->lun);
2344                 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2345                                     sess_tgt_dev_list_entry) 
2346                 {
2347                         if (tgt_dev->acg_dev->lun == cmd->lun) {
2348                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2349
2350                                 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2351                                         PRINT_INFO_PR("Dev handler for device "
2352                                           "%Ld is NULL, the device will not be "
2353                                           "visible remotely", (uint64_t)cmd->lun);
2354                                         break;
2355                                 }
2356                                 
2357                                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2358                                         cmd->tgt_dev_saved->cmd_count--;
2359                                         TRACE(TRACE_SCSI_SERIALIZING,
2360                                               "SCST_CMD_STATE_REINIT: "
2361                                               "incrementing expected_sn on tgt_dev_saved %p",
2362                                               cmd->tgt_dev_saved);
2363                                         scst_inc_expected_sn_unblock(
2364                                                 cmd->tgt_dev_saved, cmd, 1);
2365                                 }
2366                                 cmd->tgt_dev = tgt_dev;
2367                                 tgt_dev->cmd_count++;
2368                                 cmd->dev = tgt_dev->acg_dev->dev;
2369
2370                                 /* ToDo: cmd->queue_type */
2371
2372                                 /* scst_list_lock is enough to protect that */
2373                                 cmd->sn = tgt_dev->next_sn;
2374                                 tgt_dev->next_sn++;
2375
2376                                 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2377                                         "cmd->sn: %d", cmd->sn);
2378
2379                                 res = 0;
2380                                 break;
2381                         }
2382                 }
2383                 if (res != 0) {
2384                         TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2385                                 "unexisting LU?", (uint64_t)cmd->lun);
2386                         scst_dec_cmd_count();
2387                 }
2388         } else {
2389                 if ( !cmd->sess->waiting) {
2390                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2391                               cmd->sess);
2392                         list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2393                                       &scst_dev_wait_sess_list);
2394                         cmd->sess->waiting = 1;
2395                 }
2396                 scst_dec_cmd_count();
2397                 res = 1;
2398         }
2399
2400         TRACE_EXIT_RES(res);
2401         return res;
2402 }
2403
2404 /* Called under scst_list_lock and IRQs disabled */
2405 static int scst_process_init_cmd(struct scst_cmd *cmd)
2406 {
2407         int res = 0;
2408
2409         TRACE_ENTRY();
2410
2411         res = scst_translate_lun(cmd);
2412         if (likely(res == 0)) {
2413                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2414                 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2415                         TRACE(TRACE_RETRY, "Too many pending commands in "
2416                                 "session, returning BUSY to initiator \"%s\"",
2417                                 (cmd->sess->initiator_name[0] == '\0') ?
2418                                   "Anonymous" : cmd->sess->initiator_name);
2419                         scst_set_busy(cmd);
2420                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2421                 }
2422                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2423                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2424         } else if (res < 0) {
2425                 TRACE_DBG("Finishing cmd %p", cmd);
2426                 scst_set_cmd_error(cmd,
2427                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2428                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2429                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2430                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2431         }
2432
2433         TRACE_EXIT_RES(res);
2434         return res;
2435 }
2436
2437 /* 
2438  * Called under scst_list_lock and IRQs disabled
2439  * We don't drop it anywhere inside, because command execution
2440  * have to be serialized, i.e. commands must be executed in order
2441  * of their arrival, and we set this order inside scst_translate_lun().
2442  */
2443 static int scst_do_job_init(struct list_head *init_cmd_list)
2444 {
2445         int res = 1;
2446
2447         TRACE_ENTRY();
2448
2449         if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2450                 while (!list_empty(init_cmd_list)) {
2451                         struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2452                                                           typeof(*cmd),
2453                                                           cmd_list_entry);
2454                         res = scst_process_init_cmd(cmd);
2455                         if (res > 0)
2456                                 break;
2457                 }
2458         }
2459
2460         TRACE_EXIT_RES(res);
2461         return res;
2462 }
2463
2464 /* Called with no locks held */
2465 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2466         int left_locked)
2467 {
2468         int res;
2469
2470         TRACE_ENTRY();
2471
2472 #ifdef EXTRACHECKS
2473         BUG_ON(in_irq());
2474 #endif
2475
2476         cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2477                         SCST_CONTEXT_DIRECT_ATOMIC);
2478         cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2479
2480         do {
2481                 switch (cmd->state) {
2482                 case SCST_CMD_STATE_DEV_PARSE:
2483                         res = scst_parse_cmd(cmd);
2484                         break;
2485
2486                 case SCST_CMD_STATE_PREPARE_SPACE:
2487                         res = scst_prepare_space(cmd);
2488                         break;
2489
2490                 case SCST_CMD_STATE_RDY_TO_XFER:
2491                         res = scst_rdy_to_xfer(cmd);
2492                         break;
2493
2494                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2495                         res = scst_send_to_midlev(cmd);
2496                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2497                         break;
2498
2499                 case SCST_CMD_STATE_DEV_DONE:
2500                         res = scst_dev_done(cmd);
2501                         break;
2502
2503                 case SCST_CMD_STATE_XMIT_RESP:
2504                         res = scst_xmit_response(cmd);
2505                         break;
2506
2507                 case SCST_CMD_STATE_FINISHED:
2508                         res = scst_finish_cmd(cmd);
2509                         break;
2510
2511                 default:
2512                         PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2513                                cmd, cmd->state);
2514                         BUG();
2515                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2516                         break;
2517                 }
2518         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2519
2520         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2521                 if (left_locked)
2522                         spin_lock_irq(&scst_list_lock);
2523         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2524                 spin_lock_irq(&scst_list_lock);
2525
2526                 switch (cmd->state) {
2527                 case SCST_CMD_STATE_DEV_PARSE:
2528                 case SCST_CMD_STATE_PREPARE_SPACE:
2529                 case SCST_CMD_STATE_RDY_TO_XFER:
2530                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2531                 case SCST_CMD_STATE_DEV_DONE:
2532                 case SCST_CMD_STATE_XMIT_RESP:
2533                 case SCST_CMD_STATE_FINISHED:
2534                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2535                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2536                         break;
2537 #ifdef EXTRACHECKS
2538                 /* not very valid commands */
2539                 case SCST_CMD_STATE_DEFAULT:
2540                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2541                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2542                                 "useful list (left on scst cmd list)", cmd, 
2543                                 cmd->state);
2544                         spin_unlock_irq(&scst_list_lock);
2545                         BUG();
2546                         spin_lock_irq(&scst_list_lock);
2547                         break;
2548 #endif
2549                 default:
2550                         break;
2551                 }
2552                 cmd->non_atomic_only = 1;
2553                 if (!left_locked)
2554                         spin_unlock_irq(&scst_list_lock);
2555                 wake_up(&scst_list_waitQ);
2556         } else if (res == SCST_CMD_STATE_RES_RESTART) {
2557                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2558                         spin_lock_irq(&scst_list_lock);
2559                         TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2560                         list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2561                         if (!left_locked)
2562                                 spin_unlock_irq(&scst_list_lock);
2563                 } else
2564                         BUG();
2565         } else
2566                 BUG();
2567
2568         TRACE_EXIT_RES(res);
2569         return res;
2570 }
2571
2572 /* Called under scst_list_lock and IRQs disabled */
2573 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2574 {
2575         int res;
2576         struct scst_cmd *cmd;
2577         int atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2578                         SCST_CONTEXT_DIRECT_ATOMIC);
2579
2580         TRACE_ENTRY();
2581
2582 #ifdef EXTRACHECKS
2583         {
2584                 int c = (context & ~SCST_PROCESSIBLE_ENV);
2585                 WARN_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) && 
2586                         (c != SCST_CONTEXT_DIRECT));
2587         }
2588 #endif
2589
2590         tm_dbg_check_released_cmds();
2591
2592 restart:
2593         list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2594                 if (atomic && cmd->non_atomic_only) {
2595                         TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2596                         continue;
2597                 }
2598                 if (tm_dbg_check_cmd(cmd) != 0)
2599                         goto restart;
2600                 res = scst_process_active_cmd(cmd, context, NULL, 1);
2601                 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2602                         goto restart;
2603                 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2604                         goto restart;
2605                 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2606                         break;
2607                 } else
2608                         BUG();
2609         }
2610
2611         TRACE_EXIT();
2612         return;
2613 }
2614
2615 static inline int test_cmd_lists(void)
2616 {
2617         int res = !list_empty(&scst_active_cmd_list) ||
2618             (!list_empty(&scst_init_cmd_list) &&
2619              !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2620             test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2621             unlikely(scst_shut_threads_count > 0) ||
2622             tm_dbg_is_release();
2623         return res;
2624 }
2625
2626 int scst_cmd_thread(void *arg)
2627 {
2628         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2629         int n;
2630
2631         TRACE_ENTRY();
2632
2633         spin_lock(&lock);
2634         n = scst_thread_num++;
2635         spin_unlock(&lock);
2636         daemonize("scsi_tgt%d", n);
2637         recalc_sigpending();
2638         set_user_nice(current, 10);
2639         current->flags |= PF_NOFREEZE;
2640
2641         spin_lock_irq(&scst_list_lock);
2642         while (1) {
2643                 wait_queue_t wait;
2644                 init_waitqueue_entry(&wait, current);
2645
2646                 if (!test_cmd_lists()) {
2647                         add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2648                         for (;;) {
2649                                 set_current_state(TASK_INTERRUPTIBLE);
2650                                 if (test_cmd_lists())
2651                                         break;
2652                                 spin_unlock_irq(&scst_list_lock);
2653                                 schedule();
2654                                 spin_lock_irq(&scst_list_lock);
2655                         }
2656                         set_current_state(TASK_RUNNING);
2657                         remove_wait_queue(&scst_list_waitQ, &wait);
2658                 }
2659
2660                 scst_do_job_init(&scst_init_cmd_list);
2661                 scst_do_job_active(&scst_active_cmd_list,
2662                                    SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
2663
2664                 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2665                     list_empty(&scst_cmd_list) &&
2666                     list_empty(&scst_active_cmd_list) &&
2667                     list_empty(&scst_init_cmd_list)) {
2668                         break;
2669                 }
2670                 
2671                 if (unlikely(scst_shut_threads_count > 0)) {
2672                         scst_shut_threads_count--;
2673                         break;
2674                 }
2675         }
2676         spin_unlock_irq(&scst_list_lock);
2677
2678         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2679                 smp_mb__after_atomic_dec();
2680                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2681                 up(scst_shutdown_mutex);
2682         }
2683
2684         TRACE_EXIT();
2685         return 0;
2686 }
2687
2688 void scst_cmd_tasklet(long p)
2689 {
2690         TRACE_ENTRY();
2691
2692         spin_lock_irq(&scst_list_lock);
2693
2694         scst_do_job_init(&scst_init_cmd_list);
2695         scst_do_job_active(&scst_active_cmd_list, 
2696                 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2697
2698         spin_unlock_irq(&scst_list_lock);
2699
2700         TRACE_EXIT();
2701         return;
2702 }
2703
2704 /*
2705  * Returns 0 on success, < 0 if there is no device handler or
2706  * > 0 if SCST_FLAG_SUSPENDED set.
2707  */
2708 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2709 {
2710         struct scst_tgt_dev *tgt_dev = NULL;
2711         int res = -1;
2712
2713         TRACE_ENTRY();
2714
2715         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2716               (uint64_t)mcmd->lun);
2717
2718         spin_lock_irq(&scst_list_lock);
2719         scst_inc_cmd_count();   
2720         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2721                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2722                                     sess_tgt_dev_list_entry) 
2723                 {
2724                         if (tgt_dev->acg_dev->lun == mcmd->lun) {
2725                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2726                                 mcmd->mcmd_tgt_dev = tgt_dev;
2727                                 res = 0;
2728                                 break;
2729                         }
2730                 }
2731                 if (mcmd->mcmd_tgt_dev == NULL)
2732                         scst_dec_cmd_count();
2733         } else {
2734                 if ( !mcmd->sess->waiting) {
2735                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2736                               mcmd->sess);
2737                         list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2738                                       &scst_dev_wait_sess_list);
2739                         mcmd->sess->waiting = 1;
2740                 }
2741                 scst_dec_cmd_count();
2742                 res = 1;
2743         }
2744         spin_unlock_irq(&scst_list_lock);
2745
2746         TRACE_EXIT_HRES(res);
2747         return res;
2748 }
2749
2750 /* Called under scst_list_lock and IRQ off */
2751 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2752         struct scst_mgmt_cmd *mcmd)
2753 {
2754         TRACE_ENTRY();
2755
2756         TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2757                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2758                 mcmd->cmd_wait_count);
2759
2760         cmd->mgmt_cmnd = NULL;
2761
2762         if (cmd->completed)
2763                 mcmd->completed_cmd_count++;
2764
2765         mcmd->cmd_wait_count--;
2766         if (mcmd->cmd_wait_count > 0) {
2767                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2768                         mcmd->cmd_wait_count);
2769                 goto out;
2770         }
2771
2772         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2773
2774         if (mcmd->completed) {
2775                 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2776                         mcmd);
2777                 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2778                         &scst_active_mgmt_cmd_list);
2779         }
2780
2781         wake_up(&scst_mgmt_cmd_list_waitQ);
2782
2783 out:
2784         TRACE_EXIT();
2785         return;
2786 }
2787
2788 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2789         struct scst_tgt_dev *tgt_dev, int set_status)
2790 {
2791         int res = SCST_DEV_TM_NOT_COMPLETED;
2792         if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2793                 int irq = irqs_disabled();
2794                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2795                         tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2796 #ifdef EXTRACHECKS
2797                 BUG_ON(in_irq());
2798 #endif
2799                 if (!irq)
2800                         local_bh_disable();
2801                 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd, 
2802                         tgt_dev);
2803                 if (!irq)
2804                         local_bh_enable();
2805                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2806                       tgt_dev->acg_dev->dev->handler->name, res);
2807                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2808                         mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ? 
2809                                                 SCST_MGMT_STATUS_SUCCESS :
2810                                                 SCST_MGMT_STATUS_FAILED;
2811                 }
2812         }
2813         return res;
2814 }
2815
2816 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2817 {
2818         switch(mgmt_fn) {
2819                 case SCST_ABORT_TASK:
2820                 case SCST_ABORT_TASK_SET:
2821                 case SCST_CLEAR_TASK_SET:
2822                         return 1;
2823                 default:
2824                         return 0;
2825         }
2826 }
2827
2828 /* 
2829  * Called under scst_list_lock and IRQ off (to protect cmd
2830  * from being destroyed) + BHs also off
2831  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2832  */
2833 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2834         int other_ini, int call_dev_task_mgmt_fn)
2835 {
2836         TRACE_ENTRY();
2837
2838         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2839
2840         if (other_ini) {
2841                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2842                 smp_mb__after_set_bit();
2843         }
2844         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2845         smp_mb__after_set_bit();
2846
2847         if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2848                  scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2849
2850         if (mcmd) {
2851                 int defer;
2852                 if (cmd->tgtt->tm_sync_reply)
2853                         defer = 1;
2854                 else {
2855                         if (scst_is_strict_mgmt_fn(mcmd->fn))
2856                                 defer = test_bit(SCST_CMD_EXECUTING,
2857                                         &cmd->cmd_flags);
2858                         else
2859                                 defer = test_bit(SCST_CMD_XMITTING,
2860                                         &cmd->cmd_flags);
2861                 }
2862
2863                 if (defer) {
2864                         /*
2865                          * Delay the response until the command's finish in
2866                          * order to guarantee that "no further responses from
2867                          * the task are sent to the SCSI initiator port" after
2868                          * response from the TM function is sent (SAM)
2869                          */
2870                         TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2871                                 "xmitted (state %d), deferring ABORT...", cmd,
2872                                 cmd->tag, cmd->state);
2873 #ifdef EXTRACHECKS
2874                         if (cmd->mgmt_cmnd) {
2875                                 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2876                                         "has non-NULL mgmt_cmnd %p!!! Current "
2877                                         "mcmd %p\n", cmd, cmd->tag, cmd->state,
2878                                         cmd->mgmt_cmnd, mcmd);
2879                         }
2880 #endif
2881                         BUG_ON(cmd->mgmt_cmnd);
2882                         mcmd->cmd_wait_count++;
2883                         cmd->mgmt_cmnd = mcmd;
2884                 }
2885         }
2886
2887         tm_dbg_release_cmd(cmd);
2888
2889         TRACE_EXIT();
2890         return;
2891 }
2892
2893 /* Called under scst_list_lock and IRQ off */
2894 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2895 {
2896         int res;
2897         if (mcmd->cmd_wait_count != 0) {
2898                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2899                         "wait", mcmd->cmd_wait_count);
2900                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2901                 res = -1;
2902         } else {
2903                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2904                 res = 0;
2905         }
2906         mcmd->completed = 1;
2907         return res;
2908 }
2909
2910 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2911 {
2912         struct scst_device *dev;
2913         int wake = 0;
2914
2915         TRACE_ENTRY();
2916
2917         if (!scst_mutex_held)
2918                 down(&scst_mutex);
2919
2920         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2921                 struct scst_cmd *cmd, *tcmd;
2922                 spin_lock_bh(&dev->dev_lock);
2923                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2924                                         blocked_cmd_list_entry) {
2925                         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2926                                 list_del(&cmd->blocked_cmd_list_entry);
2927                                 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2928                                         "to active cmd list", cmd);
2929                                 spin_lock_irq(&scst_list_lock);
2930                                 list_move_tail(&cmd->cmd_list_entry,
2931                                         &scst_active_cmd_list);
2932                                 spin_unlock_irq(&scst_list_lock);
2933                                 wake = 1;
2934                         }
2935                 }
2936                 spin_unlock_bh(&dev->dev_lock);
2937         }
2938
2939         if (!scst_mutex_held)
2940                 up(&scst_mutex);
2941
2942         if (wake)
2943                 wake_up(&scst_list_waitQ);
2944
2945         TRACE_EXIT();
2946         return;
2947 }
2948
2949 /* Returns 0 if the command processing should be continued, <0 otherwise */
2950 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2951         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2952 {
2953         struct scst_cmd *cmd;
2954         struct scst_session *sess = tgt_dev->sess;
2955
2956         TRACE_ENTRY();
2957
2958         local_bh_disable();
2959         spin_lock_irq(&scst_list_lock);
2960
2961         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2962         list_for_each_entry(cmd, &sess->search_cmd_list, 
2963                         search_cmd_list_entry) {
2964                 if ((cmd->tgt_dev == NULL) && 
2965                     (cmd->lun == tgt_dev->acg_dev->lun))
2966                         continue;
2967                 if (cmd->tgt_dev != tgt_dev)
2968                         continue;
2969                 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2970         }
2971         spin_unlock_irq(&scst_list_lock);
2972         local_bh_enable();
2973
2974         scst_unblock_aborted_cmds(scst_mutex_held);
2975
2976         TRACE_EXIT();
2977         return;
2978 }
2979
2980 /* Returns 0 if the command processing should be continued, <0 otherwise */
2981 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2982 {
2983         int res;
2984         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2985         struct scst_device *dev = tgt_dev->acg_dev->dev;
2986
2987         TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2988                 tgt_dev->acg_dev->lun, mcmd);
2989
2990         spin_lock_bh(&dev->dev_lock);
2991         __scst_block_dev(dev);
2992         spin_unlock_bh(&dev->dev_lock);
2993
2994         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2995         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2996
2997         res = scst_set_mcmd_next_state(mcmd);
2998
2999         TRACE_EXIT_RES(res);
3000         return res;
3001 }
3002
3003 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
3004 {
3005         /*
3006          * No need for special protection for SCST_FLAG_TM_ACTIVE, since
3007          * we could be called from the only thread.
3008          */
3009         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
3010                 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
3011                         mcmd);
3012                 if (!locked)
3013                         spin_lock_irq(&scst_list_lock);
3014                 list_move_tail(&mcmd->mgmt_cmd_list_entry, 
3015                         &scst_delayed_mgmt_cmd_list);
3016                 if (!locked)
3017                         spin_unlock_irq(&scst_list_lock);
3018                 return -1;
3019         } else {
3020                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3021                 return 0;
3022         }
3023 }
3024
3025 /* Returns 0 if the command processing should be continued, 
3026  * >0, if it should be requeued, <0 otherwise */
3027 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3028 {
3029         int res = 0;
3030
3031         TRACE_ENTRY();
3032
3033         res = scst_check_delay_mgmt_cmd(mcmd, 1);
3034         if (res != 0)
3035                 goto out;
3036
3037         if (mcmd->fn == SCST_ABORT_TASK) {
3038                 struct scst_session *sess = mcmd->sess;
3039                 struct scst_cmd *cmd;
3040
3041                 local_bh_disable();
3042                 spin_lock_irq(&scst_list_lock);
3043                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3044                 if (cmd == NULL) {
3045                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3046                                 "tag %d not found", mcmd->tag);
3047                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3048                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3049                 } else {
3050                         TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
3051                                 "aborting it", cmd, mcmd->tag, cmd->sn);
3052                         mcmd->cmd_to_abort = cmd;
3053                         scst_abort_cmd(cmd, mcmd, 0, 1);
3054                         res = scst_set_mcmd_next_state(mcmd);
3055                         mcmd->cmd_to_abort = NULL; /* just in case */
3056                 }
3057                 spin_unlock_irq(&scst_list_lock);
3058                 local_bh_enable();
3059         } else {
3060                 int rc;
3061                 rc = scst_mgmt_translate_lun(mcmd);
3062                 if (rc < 0) {
3063                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3064                                 "found", (uint64_t)mcmd->lun);
3065                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3066                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3067                 } else if (rc == 0)
3068                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
3069                 else
3070                         res = rc;
3071         }
3072
3073 out:
3074         TRACE_EXIT_RES(res);
3075         return res;
3076 }
3077
3078 /* Returns 0 if the command processing should be continued, <0 otherwise */
3079 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3080 {
3081         int res, rc;
3082         struct scst_device *dev, *d;
3083         struct scst_tgt_dev *tgt_dev;
3084         int cont, c;
3085         LIST_HEAD(host_devs);
3086
3087         TRACE_ENTRY();
3088
3089         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3090                 mcmd, mcmd->sess->sess_cmd_count);
3091
3092         down(&scst_mutex);
3093
3094         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3095                 int found = 0;
3096
3097                 spin_lock_bh(&dev->dev_lock);
3098                 __scst_block_dev(dev);
3099                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3100                 spin_unlock_bh(&dev->dev_lock);
3101
3102                 cont = 0;
3103                 c = 0;
3104                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3105                         dev_tgt_dev_list_entry) 
3106                 {
3107                         cont = 1;
3108                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3109                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3110                                 c = 1;
3111                         else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3112                                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3113                 }
3114                 if (cont && !c)
3115                         continue;
3116                 
3117                 if (dev->scsi_dev == NULL)
3118                         continue;
3119
3120                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3121                         if (dev->scsi_dev->host->host_no ==
3122                                     d->scsi_dev->host->host_no) 
3123                         {
3124                                 found = 1;
3125                                 break;
3126                         }
3127                 }
3128                 if (!found)
3129                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3130         }
3131
3132         /*
3133          * We suppose here that for all commands that already on devices
3134          * on/after scsi_reset_provider() completion callbacks will be called.
3135          */
3136
3137         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3138                 /* dev->scsi_dev must be non-NULL here */
3139                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3140                       dev->scsi_dev->host->host_no);
3141                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3142                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3143                       dev->scsi_dev->host->host_no,
3144                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3145                 if (rc != SUCCESS) {
3146                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3147                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3148                 }
3149         }
3150
3151         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3152                 if (dev->scsi_dev != NULL)
3153                         dev->scsi_dev->was_reset = 0;
3154         }
3155
3156         up(&scst_mutex);
3157
3158         spin_lock_irq(&scst_list_lock);
3159         tm_dbg_task_mgmt("TARGET RESET");
3160         res = scst_set_mcmd_next_state(mcmd);
3161         spin_unlock_irq(&scst_list_lock);
3162
3163         TRACE_EXIT_RES(res);
3164         return res;
3165 }
3166
3167 /* Returns 0 if the command processing should be continued, <0 otherwise */
3168 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3169 {
3170         int res, rc;
3171         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3172         struct scst_device *dev = tgt_dev->acg_dev->dev;
3173
3174         TRACE_ENTRY();
3175
3176         TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3177                 mcmd);
3178
3179         spin_lock_bh(&dev->dev_lock);
3180         __scst_block_dev(dev);
3181         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3182         spin_unlock_bh(&dev->dev_lock);
3183
3184         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3185         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3186                 goto out_tm_dbg;
3187
3188         if (dev->scsi_dev != NULL) {
3189                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3190                       dev->scsi_dev->host->host_no);
3191                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3192                 if (rc != SUCCESS)
3193                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3194                 dev->scsi_dev->was_reset = 0;
3195         }
3196
3197 out_tm_dbg:
3198         spin_lock_irq(&scst_list_lock);
3199         tm_dbg_task_mgmt("LUN RESET");
3200         res = scst_set_mcmd_next_state(mcmd);
3201         spin_unlock_irq(&scst_list_lock);
3202
3203         TRACE_EXIT_RES(res);
3204         return res;
3205 }
3206
3207 /* Returns 0 if the command processing should be continued, <0 otherwise */
3208 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3209         int nexus_loss)
3210 {
3211         int res;
3212         struct scst_session *sess = mcmd->sess;
3213         struct scst_tgt_dev *tgt_dev;
3214
3215         TRACE_ENTRY();
3216
3217         if (nexus_loss) {
3218                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3219                         mcmd);
3220         } else {
3221                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3222                         mcmd);
3223         }
3224
3225         down(&scst_mutex);
3226         list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3227                 sess_tgt_dev_list_entry) 
3228         {
3229                 struct scst_device *dev = tgt_dev->acg_dev->dev;
3230                 int rc;
3231
3232                 spin_lock_bh(&dev->dev_lock);
3233                 __scst_block_dev(dev);
3234                 spin_unlock_bh(&dev->dev_lock);
3235
3236                 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3237                 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3238                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3239
3240                 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3241                 if (nexus_loss)
3242                         scst_reset_tgt_dev(tgt_dev, 1);
3243         }
3244         up(&scst_mutex);
3245
3246         spin_lock_irq(&scst_list_lock);
3247         res = scst_set_mcmd_next_state(mcmd);
3248         spin_unlock_irq(&scst_list_lock);
3249
3250         TRACE_EXIT_RES(res);
3251         return res;
3252 }
3253
3254 /* Returns 0 if the command processing should be continued, <0 otherwise */
3255 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3256         int nexus_loss)
3257 {
3258         int res;
3259         struct scst_tgt *tgt = mcmd->sess->tgt;
3260         struct scst_session *sess;
3261         struct scst_device *dev;
3262         struct scst_tgt_dev *tgt_dev;
3263
3264         TRACE_ENTRY();
3265
3266         if (nexus_loss) {
3267                 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3268                         mcmd);
3269         } else {
3270                 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3271                         mcmd);
3272         }
3273
3274         down(&scst_mutex);
3275
3276         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3277                 spin_lock_bh(&dev->dev_lock);
3278                 __scst_block_dev(dev);
3279                 spin_unlock_bh(&dev->dev_lock);
3280         }
3281
3282         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3283                 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3284                         sess_tgt_dev_list_entry) 
3285                 {
3286                         int rc;
3287
3288                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3289                         if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3290                                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3291
3292                         __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3293                         if (nexus_loss)
3294                                 scst_reset_tgt_dev(tgt_dev, 1);
3295                 }
3296         }
3297
3298         up(&scst_mutex);
3299
3300         spin_lock_irq(&scst_list_lock);
3301         res = scst_set_mcmd_next_state(mcmd);
3302         spin_unlock_irq(&scst_list_lock);
3303
3304         TRACE_EXIT_RES(res);
3305         return res;
3306 }
3307
3308 /* Returns 0 if the command processing should be continued, <0 otherwise */
3309 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3310 {
3311         int res = 0;
3312
3313         TRACE_ENTRY();
3314
3315         mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3316
3317         switch (mcmd->fn) {
3318         case SCST_ABORT_TASK_SET:
3319         case SCST_CLEAR_TASK_SET:
3320                 res = scst_abort_task_set(mcmd);
3321                 break;
3322
3323         case SCST_LUN_RESET:
3324                 res = scst_lun_reset(mcmd);
3325                 break;
3326
3327         case SCST_TARGET_RESET:
3328                 res = scst_target_reset(mcmd);
3329                 break;
3330
3331         case SCST_ABORT_ALL_TASKS_SESS:
3332                 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3333                 break;
3334
3335         case SCST_NEXUS_LOSS_SESS:
3336                 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3337                 break;
3338
3339         case SCST_ABORT_ALL_TASKS:
3340                 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3341                 break;
3342
3343         case SCST_NEXUS_LOSS:
3344                 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3345                 break;
3346
3347         case SCST_CLEAR_ACA:
3348                 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3349                 /* Nothing to do (yet) */
3350                 break;
3351
3352         default:
3353                 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3354                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3355                 break;
3356         }
3357
3358         TRACE_EXIT_RES(res);
3359         return res;
3360 }
3361
3362 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3363 {
3364         struct scst_device *dev;
3365         struct scst_tgt_dev *tgt_dev;
3366
3367         TRACE_ENTRY();
3368
3369         clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3370         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3371                 struct scst_mgmt_cmd *m;
3372                 spin_lock_irq(&scst_list_lock);
3373                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3374                                 mgmt_cmd_list_entry);
3375                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3376                         "cmd list", m);
3377                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3378                 spin_unlock_irq(&scst_list_lock);
3379         }
3380
3381         mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3382         if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3383                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3384
3385         if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3386                 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3387                       mcmd->sess->tgt->tgtt->name);
3388                 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3389                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3390                       mcmd->sess->tgt->tgtt->name);
3391         }
3392
3393         switch (mcmd->fn) {
3394         case SCST_ABORT_TASK_SET:
3395         case SCST_CLEAR_TASK_SET:
3396         case SCST_LUN_RESET:
3397                 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3398                 break;
3399
3400         case SCST_TARGET_RESET:
3401         case SCST_ABORT_ALL_TASKS:
3402         case SCST_NEXUS_LOSS:
3403                 down(&scst_mutex);
3404                 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3405                         scst_unblock_dev(dev);
3406                 }
3407                 up(&scst_mutex);
3408                 break;
3409
3410         case SCST_NEXUS_LOSS_SESS:
3411         case SCST_ABORT_ALL_TASKS_SESS:
3412                 down(&scst_mutex);
3413                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3414                                 sess_tgt_dev_list_entry) {
3415                         scst_unblock_dev(tgt_dev->acg_dev->dev);
3416                 }
3417                 up(&scst_mutex);
3418                 break;
3419
3420         case SCST_CLEAR_ACA:
3421         default:
3422                 break;
3423         }
3424
3425         mcmd->tgt_priv = NULL;
3426
3427         TRACE_EXIT();
3428         return;
3429 }
3430
3431 /* Returns >0, if cmd should be requeued */
3432 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3433 {
3434         int res = 0;
3435
3436         TRACE_ENTRY();
3437
3438         TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3439
3440         while (1) {
3441                 switch (mcmd->state) {
3442                 case SCST_MGMT_CMD_STATE_INIT:
3443                         res = scst_mgmt_cmd_init(mcmd);
3444                         if (res)
3445                                 goto out;
3446                         break;
3447
3448                 case SCST_MGMT_CMD_STATE_READY:
3449                         if (scst_mgmt_cmd_exec(mcmd))
3450                                 goto out;
3451                         break;
3452
3453                 case SCST_MGMT_CMD_STATE_DONE:
3454                         scst_mgmt_cmd_send_done(mcmd);
3455                         break;
3456
3457                 case SCST_MGMT_CMD_STATE_FINISHED:
3458                         goto out_free;
3459
3460 #ifdef EXTRACHECKS
3461                 case SCST_MGMT_CMD_STATE_EXECUTING:
3462                         BUG();
3463 #endif
3464
3465                 default:
3466                         PRINT_ERROR_PR("Unknown state %d of management command",
3467                                     mcmd->state);
3468                         res = -1;
3469                         goto out_free;
3470                 }
3471         }
3472
3473 out:
3474         TRACE_EXIT_RES(res);
3475         return res;
3476
3477 out_free:
3478         scst_free_mgmt_cmd(mcmd, 1);
3479         goto out;
3480 }
3481
3482 static inline int test_mgmt_cmd_list(void)
3483 {
3484         int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3485                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3486                   test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3487         return res;
3488 }
3489
3490 int scst_mgmt_cmd_thread(void *arg)
3491 {
3492         struct scst_mgmt_cmd *mcmd;
3493
3494         TRACE_ENTRY();
3495
3496         daemonize("scsi_tgt_mc");
3497         recalc_sigpending();
3498         current->flags |= PF_NOFREEZE;
3499
3500         spin_lock_irq(&scst_list_lock);
3501         while (1) {
3502                 wait_queue_t wait;
3503                 init_waitqueue_entry(&wait, current);
3504
3505                 if (!test_mgmt_cmd_list()) {
3506                         add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3507                                                  &wait);
3508                         for (;;) {
3509                                 set_current_state(TASK_INTERRUPTIBLE);
3510                                 if (test_mgmt_cmd_list())
3511                                         break;
3512                                 spin_unlock_irq(&scst_list_lock);
3513                                 schedule();
3514                                 spin_lock_irq(&scst_list_lock);
3515                         }
3516                         set_current_state(TASK_RUNNING);
3517                         remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3518                 }
3519
3520                 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3521                        !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3522                 {
3523                         int rc;
3524                         mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3525                                           typeof(*mcmd), mgmt_cmd_list_entry);
3526                         TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3527                               mcmd);
3528                         list_move_tail(&mcmd->mgmt_cmd_list_entry,
3529                                        &scst_mgmt_cmd_list);
3530                         spin_unlock_irq(&scst_list_lock);
3531                         rc = scst_process_mgmt_cmd(mcmd);
3532                         spin_lock_irq(&scst_list_lock);
3533                         if (rc > 0) {
3534                                 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3535                                         "of active mgmt cmd list", mcmd);
3536                                 list_move(&mcmd->mgmt_cmd_list_entry,
3537                                        &scst_active_mgmt_cmd_list);
3538                         }
3539                 }
3540
3541                 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3542                     list_empty(&scst_active_mgmt_cmd_list)) 
3543                 {
3544                         break;
3545                 }
3546         }
3547         spin_unlock_irq(&scst_list_lock);
3548
3549         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3550                 smp_mb__after_atomic_dec();
3551                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3552                 up(scst_shutdown_mutex);
3553         }
3554
3555         TRACE_EXIT();
3556         return 0;
3557 }
3558
3559 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3560         *sess, int fn, int atomic, void *tgt_priv)
3561 {
3562         struct scst_mgmt_cmd *mcmd = NULL;
3563
3564         TRACE_ENTRY();
3565
3566         if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3567                 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3568                             "(target %s)", sess->tgt->tgtt->name);
3569                 goto out;
3570         }
3571
3572         mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3573         if (mcmd == NULL)
3574                 goto out;
3575
3576         mcmd->sess = sess;
3577         mcmd->fn = fn;
3578         mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3579         mcmd->tgt_priv = tgt_priv;
3580
3581 out:
3582         TRACE_EXIT();
3583         return mcmd;
3584 }
3585
3586 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3587         struct scst_mgmt_cmd *mcmd)
3588 {
3589         unsigned long flags;
3590         int res = 0;
3591
3592         TRACE_ENTRY();
3593
3594         scst_sess_get(sess);
3595
3596         spin_lock_irqsave(&scst_list_lock, flags);
3597
3598         sess->sess_cmd_count++;
3599
3600 #ifdef EXTRACHECKS
3601         if (unlikely(sess->shutting_down)) {
3602                 PRINT_ERROR_PR("%s",
3603                         "New mgmt cmd while shutting down the session");
3604                 BUG();
3605         }
3606 #endif
3607
3608         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3609                 switch(sess->init_phase) {
3610                 case SCST_SESS_IPH_INITING:
3611                         TRACE_DBG("Adding mcmd %p to init deferred mcmd list", 
3612                                 mcmd);
3613                         list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3614                                 &sess->init_deferred_mcmd_list);
3615                         goto out_unlock;
3616                 case SCST_SESS_IPH_SUCCESS:
3617                         break;
3618                 case SCST_SESS_IPH_FAILED:
3619                         res = -1;
3620                         goto out_unlock;
3621                 default:
3622                         BUG();
3623                 }
3624         }
3625
3626         TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3627         list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);