Pass-through mode resid handlning updated
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28
29 #include "scst_debug.h"
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static int scst_do_job_init(struct list_head *init_cmd_list);
34
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
36         int left_locked);
37
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39         struct scst_mgmt_cmd *mcmd);
40
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43         unsigned long *pflags, int left_locked)
44 {
45         int res;
46
47         TRACE_ENTRY();
48
49         TRACE_DBG("Moving cmd %p to cmd list", cmd);
50         list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
51
52         /* This is an inline func., so unneeded code will be optimized out */
53         if (pflags)
54                 spin_unlock_irqrestore(&scst_list_lock, *pflags);
55         else
56                 spin_unlock_irq(&scst_list_lock);
57
58         res = __scst_process_active_cmd(cmd, context, left_locked);
59
60         TRACE_EXIT_RES(res);
61         return res;
62 }
63
64 static inline void scst_schedule_tasklet(void)
65 {
66         struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
67
68 #if 0 /* Looks like #else is better for performance */
69         if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
70                 tasklet_schedule(t);
71         else {
72                 /* 
73                  * We suppose that other CPU(s) are rather idle, so we
74                  * ask one of them to help
75                  */
76                 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77                         "instead", smp_processor_id());
78                 wake_up(&scst_list_waitQ);
79         }
80 #else
81         tasklet_schedule(t);
82 #endif
83 }
84
85 /* 
86  * Must not been called in parallel with scst_unregister_session() for the 
87  * same sess
88  */
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90                              const uint8_t *lun, int lun_len,
91                              const uint8_t *cdb, int cdb_len, int atomic)
92 {
93         struct scst_cmd *cmd;
94
95         TRACE_ENTRY();
96
97 #ifdef EXTRACHECKS
98         if (unlikely(sess->shutting_down)) {
99                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
100                 BUG();
101         }
102 #endif
103
104         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
105         if (cmd == NULL)
106                 goto out;
107
108         cmd->sess = sess;
109         cmd->tgt = sess->tgt;
110         cmd->tgtt = sess->tgt->tgtt;
111         cmd->state = SCST_CMD_STATE_INIT_WAIT;
112
113         /* 
114          * For both wrong lun and CDB defer the error reporting for
115          * scst_cmd_init_done()
116          */
117
118         cmd->lun = scst_unpack_lun(lun, lun_len);
119
120         if (cdb_len <= MAX_COMMAND_SIZE) {
121                 memcpy(cmd->cdb, cdb, cdb_len);
122                 cmd->cdb_len = cdb_len;
123         }
124
125         TRACE_DBG("cmd %p, sess %p", cmd, sess);
126         scst_sess_get(sess);
127
128 out:
129         TRACE_EXIT();
130         return cmd;
131 }
132
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
134 {
135         int res = 0;
136         unsigned long flags = 0;
137         struct scst_session *sess = cmd->sess;
138
139         TRACE_ENTRY();
140
141         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142         TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag, 
143                 (uint64_t)cmd->lun, cmd->cdb_len);
144         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145                 cmd->cdb, cmd->cdb_len);
146
147         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
149         {
150                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
152                         cmd->tgtt->name);
153                 pref_context = SCST_CONTEXT_TASKLET;
154         }
155
156         spin_lock_irqsave(&scst_list_lock, flags);
157
158         /* Let's make it here, this will save us a lock or atomic */
159         sess->sess_cmd_count++;
160
161         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
162
163         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164                 switch(sess->init_phase) {
165                 case SCST_SESS_IPH_SUCCESS:
166                         break;
167                 case SCST_SESS_IPH_INITING:
168                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169                         list_add_tail(&cmd->cmd_list_entry, 
170                                 &sess->init_deferred_cmd_list);
171                         goto out_unlock_flags;
172                 case SCST_SESS_IPH_FAILED:
173                         scst_set_busy(cmd);
174                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
175                         TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176                         list_add_tail(&cmd->cmd_list_entry, 
177                                 &scst_active_cmd_list);
178                         goto active;
179                 default:
180                         BUG();
181                 }
182         }
183
184         if (unlikely(cmd->lun == (lun_t)-1)) {
185                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186                 scst_set_cmd_error(cmd,
187                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
191                 goto active;
192         }
193
194         if (unlikely(cmd->cdb_len == 0)) {
195                 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196                 scst_set_cmd_error(cmd,
197                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
201                 goto active;
202         }
203
204         cmd->state = SCST_CMD_STATE_INIT;
205
206         TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207         list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
208
209         switch (pref_context) {
210         case SCST_CONTEXT_DIRECT:
211         case SCST_CONTEXT_DIRECT_ATOMIC:
212                 res = scst_do_job_init(&scst_init_cmd_list);
213                 if (res > 0)
214                         goto out_unlock_flags;
215                 break;
216
217         case SCST_CONTEXT_THREAD:
218                 goto out_thread_unlock_flags;
219
220         case SCST_CONTEXT_TASKLET:
221                 scst_schedule_tasklet();
222                 goto out_unlock_flags;
223
224         default:
225                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
226                             pref_context);
227                 goto out_thread_unlock_flags;
228         }
229
230 active:
231         switch (pref_context) {
232         case SCST_CONTEXT_DIRECT:
233         case SCST_CONTEXT_DIRECT_ATOMIC:
234                 scst_process_active_cmd(cmd, pref_context, &flags, 0);
235                 break;
236
237         case SCST_CONTEXT_THREAD:
238                 goto out_thread_unlock_flags;
239
240         case SCST_CONTEXT_TASKLET:
241                 scst_schedule_tasklet();
242                 goto out_unlock_flags;
243
244         default:
245                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
246                             pref_context);
247                 goto out_thread_unlock_flags;
248         }
249
250 out:
251         TRACE_EXIT();
252         return;
253
254 out_unlock_flags:
255         spin_unlock_irqrestore(&scst_list_lock, flags);
256         goto out;
257
258 out_thread_unlock_flags:
259         cmd->non_atomic_only = 1;
260         spin_unlock_irqrestore(&scst_list_lock, flags);
261         wake_up(&scst_list_waitQ);
262         goto out;
263 }
264
265 static int scst_parse_cmd(struct scst_cmd *cmd)
266 {
267         int res = SCST_CMD_STATE_RES_CONT_SAME;
268         int state;
269         struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270         struct scst_device *dev = cmd->dev;
271         struct scst_info_cdb cdb_info;
272         int atomic = scst_cmd_atomic(cmd);
273         int set_dir = 1;
274
275         TRACE_ENTRY();
276
277         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278                 TRACE_DBG("ABORTED set, returning ABORTED "
279                         "for cmd %p", cmd);
280                 goto out_xmit;
281         }
282
283         if (atomic && !dev->handler->parse_atomic) {
284                 TRACE_DBG("Dev handler %s parse() can not be "
285                       "called in atomic context, rescheduling to the thread",
286                       dev->handler->name);
287                 res = SCST_CMD_STATE_RES_NEED_THREAD;
288                 goto out;
289         }
290
291         /*
292          * Expected transfer data supplied by the SCSI transport via the
293          * target driver are untrusted, so we prefer to fetch them from CDB.
294          * Additionally, not all transports support supplying the expected
295          * transfer data.
296          */
297
298         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
299                         &cdb_info) != 0)) 
300         {
301                 static int t;
302                 if (t < 10) {
303                         t++;
304                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305                                 "Should you update scst_scsi_op_table?",
306                                 cmd->cdb[0], dev->handler->name);
307                 }
308                 if (scst_cmd_is_expected_set(cmd)) {
309                         TRACE(TRACE_MINOR, "Using initiator supplied values: "
310                                 "direction %d, transfer_len %d",
311                                 cmd->expected_data_direction,
312                                 cmd->expected_transfer_len);
313                         cmd->data_direction = cmd->expected_data_direction;
314                         cmd->bufflen = cmd->expected_transfer_len;
315                         /* Restore (most probably) lost CDB length */
316                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317                         if (cmd->cdb_len == -1) {
318                                 PRINT_ERROR_PR("Unable to get CDB length for "
319                                         "opcode 0x%02x. Returning INVALID "
320                                         "OPCODE", cmd->cdb[0]);
321                                 scst_set_cmd_error(cmd,
322                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
323                                 goto out_xmit;
324                         }
325                 }
326                 else {
327                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328                              "target %s not supplied expected values. "
329                              "Returning INVALID OPCODE.", cmd->cdb[0], 
330                              dev->handler->name, cmd->tgtt->name);
331                         scst_set_cmd_error(cmd,
332                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
333                         goto out_xmit;
334                 }
335         } else {
336                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337                         "set %s), transfer_len=%d (expected len %d), flags=%d",
338                         cdb_info.op_name, cdb_info.direction,
339                         cmd->expected_data_direction,
340                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341                         cdb_info.transfer_len, cmd->expected_transfer_len,
342                         cdb_info.flags);
343
344                 /* Restore (most probably) lost CDB length */
345                 cmd->cdb_len = cdb_info.cdb_len;
346
347                 cmd->data_direction = cdb_info.direction;
348                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349                         cmd->bufflen = cdb_info.transfer_len;
350                 /* else cmd->bufflen remained as it was inited in 0 */
351         }
352
353         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355                             "(opcode 0x%02x)", cmd->cdb[0]);
356                 scst_set_cmd_error(cmd,
357                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
358                 goto out_xmit;
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362                 PRINT_ERROR_PR("Linked commands are not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (likely(!scst_is_cmd_local(cmd))) {
370                 TRACE_DBG("Calling dev handler %s parse(%p)",
371                       dev->handler->name, cmd);
372                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373                 state = dev->handler->parse(cmd, &cdb_info);
374                 TRACE_DBG("Dev handler %s parse() returned %d",
375                         dev->handler->name, state);
376
377                 if (state == SCST_CMD_STATE_DEFAULT)
378                         state = SCST_CMD_STATE_PREPARE_SPACE;
379         }
380         else
381                 state = SCST_CMD_STATE_PREPARE_SPACE;
382
383         if (scst_cmd_is_expected_set(cmd)) {
384                 if (cmd->expected_transfer_len < cmd->bufflen) {
385                         TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
386                                 "cmd->bufflen(%d), using expected_transfer_len "
387                                 "instead", cmd->expected_transfer_len,
388                                 cmd->bufflen);
389                         cmd->bufflen = cmd->expected_transfer_len;
390                 }
391         }
392
393         if (cmd->data_len == -1)
394                 cmd->data_len = cmd->bufflen;
395
396 #ifdef EXTRACHECKS
397         if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
398                 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
399                         (state != SCST_CMD_STATE_DEV_PARSE)) ||
400                     ((cmd->bufflen != 0) && 
401                         (cmd->data_direction == SCST_DATA_NONE)) ||
402                     ((cmd->bufflen == 0) && 
403                         (cmd->data_direction != SCST_DATA_NONE)) ||
404                     ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
405                         (state > SCST_CMD_STATE_PREPARE_SPACE))) 
406                 {
407                         PRINT_ERROR_PR("Dev handler %s parse() returned "
408                                        "invalid cmd data_direction %d, "
409                                        "bufflen %zd or state %d (opcode 0x%x)",
410                                        dev->handler->name, 
411                                        cmd->data_direction, cmd->bufflen,
412                                        state, cmd->cdb[0]);
413                         goto out_error;
414                 }
415         }
416 #endif
417
418         switch (state) {
419         case SCST_CMD_STATE_PREPARE_SPACE:
420         case SCST_CMD_STATE_DEV_PARSE:
421         case SCST_CMD_STATE_RDY_TO_XFER:
422         case SCST_CMD_STATE_SEND_TO_MIDLEV:
423         case SCST_CMD_STATE_DEV_DONE:
424         case SCST_CMD_STATE_XMIT_RESP:
425         case SCST_CMD_STATE_FINISHED:
426                 cmd->state = state;
427                 res = SCST_CMD_STATE_RES_CONT_SAME;
428                 break;
429
430         case SCST_CMD_STATE_REINIT:
431                 cmd->tgt_dev_saved = tgt_dev_saved;
432                 cmd->state = state;
433                 res = SCST_CMD_STATE_RES_RESTART;
434                 set_dir = 0;
435                 break;
436
437         case SCST_CMD_STATE_NEED_THREAD_CTX:
438                 TRACE_DBG("Dev handler %s parse() requested thread "
439                       "context, rescheduling", dev->handler->name);
440                 res = SCST_CMD_STATE_RES_NEED_THREAD;
441                 set_dir = 0;
442                 break;
443
444         default:
445                 if (state >= 0) {
446                         PRINT_ERROR_PR("Dev handler %s parse() returned "
447                              "invalid cmd state %d (opcode %d)", 
448                              dev->handler->name, state, cmd->cdb[0]);
449                 } else {
450                         PRINT_ERROR_PR("Dev handler %s parse() returned "
451                                 "error %d (opcode %d)", dev->handler->name, 
452                                 state, cmd->cdb[0]);
453                 }
454                 goto out_error;
455         }
456
457         if ((cmd->resp_data_len == -1) && set_dir) {
458                 if (cmd->data_direction == SCST_DATA_READ)
459                         cmd->resp_data_len = cmd->bufflen;
460                 else
461                          cmd->resp_data_len = 0;
462         }
463         
464 out:
465         TRACE_EXIT_HRES(res);
466         return res;
467
468 out_error:
469         /* dev_done() will be called as part of the regular cmd's finish */
470         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
471         cmd->state = SCST_CMD_STATE_DEV_DONE;
472         res = SCST_CMD_STATE_RES_CONT_SAME;
473         goto out;
474
475 out_xmit:
476         cmd->state = SCST_CMD_STATE_XMIT_RESP;
477         res = SCST_CMD_STATE_RES_CONT_SAME;
478         goto out;
479 }
480
481 void scst_cmd_mem_work_fn(void *p)
482 {
483         TRACE_ENTRY();
484
485         spin_lock_bh(&scst_cmd_mem_lock);
486
487         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
488         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
489                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
490                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
491         } else {
492                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
493                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
494         }
495         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
496
497         spin_unlock_bh(&scst_cmd_mem_lock);
498
499         TRACE_EXIT();
500         return;
501 }
502
503 int scst_check_mem(struct scst_cmd *cmd)
504 {
505         int res = 0;
506
507         TRACE_ENTRY();
508
509         if (cmd->mem_checked)
510                 goto out;
511
512         spin_lock_bh(&scst_cmd_mem_lock);
513
514         scst_cur_cmd_mem += cmd->bufflen;
515         cmd->mem_checked = 1;
516         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
517                 goto out_unlock;
518
519         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
520                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
521                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
522                 (cmd->sess->initiator_name[0] == '\0') ?
523                   "Anonymous" : cmd->sess->initiator_name,
524                 scst_cur_max_cmd_mem >> 10);
525
526         scst_cur_cmd_mem -= cmd->bufflen;
527         cmd->mem_checked = 0;
528         scst_set_busy(cmd);
529         cmd->state = SCST_CMD_STATE_XMIT_RESP;
530         res = 1;
531
532 out_unlock:
533         spin_unlock_bh(&scst_cmd_mem_lock);
534
535 out:
536         TRACE_EXIT_RES(res);
537         return res;
538 }
539
540 static void scst_low_cur_max_cmd_mem(void)
541 {
542         TRACE_ENTRY();
543
544         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
545                 cancel_delayed_work(&scst_cmd_mem_work);
546                 flush_scheduled_work();
547                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
548         }
549
550         spin_lock_bh(&scst_cmd_mem_lock);
551
552         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
553                                 (scst_cur_cmd_mem >> 2);
554         if (scst_cur_max_cmd_mem < 16*1024*1024)
555                 scst_cur_max_cmd_mem = 16*1024*1024;
556
557         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
558                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
559                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
560                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
561         }
562
563         spin_unlock_bh(&scst_cmd_mem_lock);
564
565         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
566
567         TRACE_EXIT();
568         return;
569 }
570
571 static int scst_prepare_space(struct scst_cmd *cmd)
572 {
573         int r, res = SCST_CMD_STATE_RES_CONT_SAME;
574
575         TRACE_ENTRY();
576
577         if (cmd->data_direction == SCST_DATA_NONE) {
578                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
579                 goto out;
580         }
581
582         r = scst_check_mem(cmd);
583         if (unlikely(r != 0))
584                 goto out;
585
586         if (cmd->data_buf_tgt_alloc) {
587                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
588                 r = cmd->tgtt->alloc_data_buf(cmd);
589                 cmd->data_buf_alloced = (r == 0);
590         } else
591                 r = scst_alloc_space(cmd);
592
593         if (r != 0) {
594                 if (scst_cmd_atomic(cmd)) {
595                         TRACE_MEM("%s", "Atomic memory allocation failed, "
596                               "rescheduling to the thread");
597                         res = SCST_CMD_STATE_RES_NEED_THREAD;
598                         goto out;
599                 } else
600                         goto out_no_space;
601         }
602
603         switch (cmd->data_direction) {
604         case SCST_DATA_WRITE:
605                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
606                 break;
607
608         default:
609                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
610                 break;
611         }
612
613 out:
614         TRACE_EXIT_HRES(res);
615         return res;
616
617 out_no_space:
618         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
619                 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
620         scst_low_cur_max_cmd_mem();
621         scst_set_busy(cmd);
622         cmd->state = SCST_CMD_STATE_DEV_DONE;
623         res = SCST_CMD_STATE_RES_CONT_SAME;
624         goto out;
625 }
626
627 /* No locks */
628 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
629 {
630         struct scst_tgt *tgt = cmd->sess->tgt;
631         int res = 0;
632         unsigned long flags;
633
634         TRACE_ENTRY();
635
636         spin_lock_irqsave(&tgt->tgt_lock, flags);
637         tgt->retry_cmds++;
638         smp_mb();
639         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
640               tgt->retry_cmds);
641         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
642                 /* At least one cmd finished, so try again */
643                 tgt->retry_cmds--;
644                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
645                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
646                       "retry_cmds=%d)", finished_cmds,
647                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
648                 res = -1;
649                 goto out_unlock_tgt;
650         }
651
652         TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
653         /* IRQ already off */
654         spin_lock(&scst_list_lock);
655         list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
656         spin_unlock(&scst_list_lock);
657
658         if (!tgt->retry_timer_active) {
659                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
660                 add_timer(&tgt->retry_timer);
661                 tgt->retry_timer_active = 1;
662         }
663
664 out_unlock_tgt:
665         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
666
667         TRACE_EXIT_RES(res);
668         return res;
669 }
670
671 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
672 {
673         int res, rc;
674         int atomic = scst_cmd_atomic(cmd);
675
676         TRACE_ENTRY();
677
678         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
679         {
680                 TRACE_DBG("ABORTED set, returning ABORTED for "
681                         "cmd %p", cmd);
682                 goto out_dev_done;
683         }
684
685         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
686                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
687                       "called in atomic context, rescheduling to the thread");
688                 res = SCST_CMD_STATE_RES_NEED_THREAD;
689                 goto out;
690         }
691
692         while (1) {
693                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
694
695                 res = SCST_CMD_STATE_RES_CONT_NEXT;
696                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
697
698                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
699 #ifdef DEBUG_RETRY
700                 if (((scst_random() % 100) == 75))
701                         rc = SCST_TGT_RES_QUEUE_FULL;
702                 else
703 #endif
704                         rc = cmd->tgtt->rdy_to_xfer(cmd);
705                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
706
707                 if (likely(rc == SCST_TGT_RES_SUCCESS))
708                         goto out;
709
710                 /* Restore the previous state */
711                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
712
713                 switch (rc) {
714                 case SCST_TGT_RES_QUEUE_FULL:
715                 {
716                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
717                                 break;
718                         else
719                                 continue;
720                 }
721
722                 case SCST_TGT_RES_NEED_THREAD_CTX:
723                 {
724                         TRACE_DBG("Target driver %s "
725                               "rdy_to_xfer() requested thread "
726                               "context, rescheduling", cmd->tgtt->name);
727                         res = SCST_CMD_STATE_RES_NEED_THREAD;
728                         break;
729                 }
730
731                 default:
732                         goto out_error_rc;
733                 }
734                 break;
735         }
736
737 out:
738         TRACE_EXIT_HRES(res);
739         return res;
740
741 out_error_rc:
742         if (rc == SCST_TGT_RES_FATAL_ERROR) {
743                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
744                      "fatal error", cmd->tgtt->name);
745         } else {
746                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
747                             "value %d", cmd->tgtt->name, rc);
748         }
749         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
750
751 out_dev_done:
752         cmd->state = SCST_CMD_STATE_DEV_DONE;
753         res = SCST_CMD_STATE_RES_CONT_SAME;
754         goto out;
755 }
756
757 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
758         int check_retries)
759 {
760         unsigned long flags;
761         int rc;
762
763         TRACE_ENTRY();
764
765         TRACE_DBG("Context: %d", context);
766
767         switch(context) {
768         case SCST_CONTEXT_DIRECT:
769         case SCST_CONTEXT_DIRECT_ATOMIC:
770                 if (check_retries)
771                         scst_check_retries(cmd->tgt, 0);
772                 cmd->non_atomic_only = 0;
773                 rc = __scst_process_active_cmd(cmd, context, 0);
774                 if (rc == SCST_CMD_STATE_RES_NEED_THREAD)
775                         goto out_thread;
776                 break;
777
778         default:
779                 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
780                             context);
781                 /* go through */
782         case SCST_CONTEXT_THREAD:
783                 if (check_retries)
784                         scst_check_retries(cmd->tgt, 1);
785                 goto out_thread;
786
787         case SCST_CONTEXT_TASKLET:
788                 if (check_retries)
789                         scst_check_retries(cmd->tgt, 1);
790                 cmd->non_atomic_only = 0;
791                 spin_lock_irqsave(&scst_list_lock, flags);
792                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
793                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
794                 spin_unlock_irqrestore(&scst_list_lock, flags);
795                 scst_schedule_tasklet();
796                 break;
797         }
798 out:
799         TRACE_EXIT();
800         return;
801
802 out_thread:
803         cmd->non_atomic_only = 1;
804         spin_lock_irqsave(&scst_list_lock, flags);
805         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
806         list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
807         spin_unlock_irqrestore(&scst_list_lock, flags);
808         wake_up(&scst_list_waitQ);
809         goto out;
810 }
811
812 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
813 {
814         TRACE_ENTRY();
815
816         TRACE_DBG("Preferred context: %d", pref_context);
817         TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
818         cmd->non_atomic_only = 0;
819
820         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
821                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
822         {
823                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
824                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
825                         cmd->tgtt->name);
826                 pref_context = SCST_CONTEXT_TASKLET;
827         }
828
829         switch (status) {
830         case SCST_RX_STATUS_SUCCESS:
831                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
832                 break;
833
834         case SCST_RX_STATUS_ERROR_SENSE_SET:
835                 cmd->state = SCST_CMD_STATE_DEV_DONE;
836                 break;
837
838         case SCST_RX_STATUS_ERROR_FATAL:
839                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
840                 /* go through */
841         case SCST_RX_STATUS_ERROR:
842                 scst_set_cmd_error(cmd,
843                            SCST_LOAD_SENSE(scst_sense_hardw_error));
844                 cmd->state = SCST_CMD_STATE_DEV_DONE;
845                 break;
846
847         default:
848                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
849                         status);
850                 cmd->state = SCST_CMD_STATE_DEV_DONE;
851                 break;
852         }
853
854         scst_proccess_redirect_cmd(cmd, pref_context, 1);
855
856         TRACE_EXIT();
857         return;
858 }
859
860 /* No locks supposed to be held */
861 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
862         int rq_sense_len, int *next_state)
863 {
864         int sense_valid;
865         struct scst_device *dev = cmd->dev;
866         int dbl_ua_possible, ua_sent = 0;
867
868         TRACE_ENTRY();
869
870         /* If we had a internal bus reset behind us, set the command error UA */
871         if ((dev->scsi_dev != NULL) &&
872             unlikely(cmd->host_status == DID_RESET) &&
873             scst_is_ua_command(cmd))
874         {
875                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
876                       dev->scsi_dev->was_reset, cmd->host_status);
877                 scst_set_cmd_error(cmd,
878                    SCST_LOAD_SENSE(scst_sense_reset_UA));
879                 /* just in case */
880                 cmd->ua_ignore = 0;
881                 /* It looks like it is safe to clear was_reset here */
882                 dev->scsi_dev->was_reset = 0;
883                 smp_mb();
884         }
885
886         if (rq_sense != NULL) {
887                 sense_valid = SCST_SENSE_VALID(rq_sense);
888                 if (sense_valid) {
889                         memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
890                         /* 
891                          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
892                          * in init_scst()
893                          */
894                         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
895                 }
896         } else
897                 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
898
899         dbl_ua_possible = dev->dev_double_ua_possible;
900         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
901         if (unlikely(dbl_ua_possible)) {
902                 spin_lock_bh(&dev->dev_lock);
903                 barrier(); /* to reread dev_double_ua_possible */
904                 dbl_ua_possible = dev->dev_double_ua_possible;
905                 if (dbl_ua_possible)
906                         ua_sent = dev->dev_reset_ua_sent;
907                 else
908                         spin_unlock_bh(&dev->dev_lock);
909         }
910
911         if (sense_valid) {
912                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
913                              sizeof(cmd->sense_buffer));
914                 /* Check Unit Attention Sense Key */
915                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
916                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
917                                 if (dbl_ua_possible) 
918                                 {
919                                         if (ua_sent) {
920                                                 TRACE(TRACE_MGMT, "%s", 
921                                                         "Double UA detected");
922                                                 /* Do retry */
923                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
924                                                         "(tag %d)", cmd, cmd->tag);
925                                                 cmd->status = 0;
926                                                 cmd->masked_status = 0;
927                                                 cmd->msg_status = 0;
928                                                 cmd->host_status = DID_OK;
929                                                 cmd->driver_status = 0;
930                                                 memset(cmd->sense_buffer, 0,
931                                                         sizeof(cmd->sense_buffer));
932                                                 cmd->retry = 1;
933                                                 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
934                                                 /* 
935                                                  * Dev is still blocked by this cmd, so
936                                                  * it's OK to clear SCST_DEV_SERIALIZED
937                                                  * here.
938                                                  */
939                                                 dev->dev_double_ua_possible = 0;
940                                                 dev->dev_serialized = 0;
941                                                 dev->dev_reset_ua_sent = 0;
942                                                 goto out_unlock;
943                                         } else
944                                                 dev->dev_reset_ua_sent = 1;
945                                 }
946                         }
947                         if (cmd->ua_ignore == 0) {
948                                 if (unlikely(dbl_ua_possible)) {
949                                         __scst_process_UA(dev, cmd,
950                                                 cmd->sense_buffer,
951                                                 sizeof(cmd->sense_buffer), 0);
952                                 } else {
953                                         scst_process_UA(dev, cmd,
954                                                 cmd->sense_buffer,
955                                                 sizeof(cmd->sense_buffer), 0);
956                                 }
957                         }
958                 }
959         }
960
961         if (unlikely(dbl_ua_possible)) {
962                 if (ua_sent && scst_is_ua_command(cmd)) {
963                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
964                         dev->dev_double_ua_possible = 0;
965                         dev->dev_serialized = 0;
966                         dev->dev_reset_ua_sent = 0;
967                 }
968                 spin_unlock_bh(&dev->dev_lock);
969         }
970
971 out:
972         TRACE_EXIT();
973         return;
974
975 out_unlock:
976         spin_unlock_bh(&dev->dev_lock);
977         goto out;
978 }
979
980 static int scst_check_auto_sense(struct scst_cmd *cmd)
981 {
982         int res = 0;
983
984         TRACE_ENTRY();
985
986         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
987             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
988              SCST_NO_SENSE(cmd->sense_buffer)))
989         {
990                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
991                       "cmd->status=%x, cmd->masked_status=%x, "
992                       "cmd->msg_status=%x, cmd->host_status=%x, "
993                       "cmd->driver_status=%x", cmd->status, cmd->masked_status, 
994                       cmd->msg_status, cmd->host_status, cmd->driver_status);
995                 res = 1;
996         } else if (unlikely(cmd->host_status)) {
997                 if ((cmd->host_status == DID_REQUEUE) ||
998                     (cmd->host_status == DID_IMM_RETRY) ||
999                     (cmd->host_status == DID_SOFT_ERROR)) {
1000                         scst_set_busy(cmd);
1001                 } else {
1002                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
1003                                 "received, returning HARDWARE ERROR instead",
1004                                 cmd->host_status);
1005                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1006                 }
1007         }
1008
1009         TRACE_EXIT_RES(res);
1010         return res;
1011 }
1012
1013 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1014         const uint8_t *rq_sense, int rq_sense_len, int resid,
1015         int *next_state)
1016 {
1017         unsigned char type;
1018
1019         TRACE_ENTRY();
1020
1021         cmd->status = result & 0xff;
1022         cmd->masked_status = status_byte(result);
1023         cmd->msg_status = msg_byte(result);
1024         cmd->host_status = host_byte(result);
1025         cmd->driver_status = driver_byte(result);
1026         if (unlikely(resid != 0)) {
1027 #ifdef EXTRACHECKS
1028                 if ((resid < 0) || (resid >= cmd->resp_data_len)) {
1029                         PRINT_ERROR_PR("Wrong resid %d (cmd->resp_data_len=%d)",
1030                                 resid, cmd->resp_data_len);
1031                 } else
1032 #endif
1033                         scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1034         }
1035
1036         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, resid=%d, "
1037               "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
1038               "cmd->driver_status=%x", result, cmd->status, resid,
1039               cmd->masked_status, cmd->msg_status, cmd->host_status,
1040               cmd->driver_status);
1041
1042         cmd->completed = 1;
1043
1044         scst_dec_on_dev_cmd(cmd);
1045
1046         type = cmd->dev->handler->type;
1047         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1048             cmd->tgt_dev->acg_dev->rd_only_flag &&
1049             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1050              type == TYPE_TAPE)) {
1051                 int32_t length;
1052                 uint8_t *address;
1053
1054                 length = scst_get_buf_first(cmd, &address);
1055                 TRACE_DBG("length %d", length);
1056                 if (unlikely(length <= 0)) {
1057                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1058                                 __func__);
1059                         goto next;
1060                 }
1061                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1062                         address[2] |= 0x80;   /* Write Protect*/
1063                 }
1064                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1065                         address[3] |= 0x80;   /* Write Protect*/
1066                 }
1067                 scst_put_buf(cmd, address);
1068         }
1069
1070 next:
1071         scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1072
1073         TRACE_EXIT();
1074         return;
1075 }
1076
1077 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1078 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1079                                             struct scsi_request **req)
1080 {
1081         struct scst_cmd *cmd = NULL;
1082
1083         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1084                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1085
1086         if (cmd == NULL) {
1087                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1088                 if (*req)
1089                         scsi_release_request(*req);
1090         }
1091
1092         return cmd;
1093 }
1094
1095 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1096 {
1097         struct scsi_request *req = NULL;
1098         struct scst_cmd *cmd;
1099         int next_state;
1100
1101         TRACE_ENTRY();
1102
1103         WARN_ON(in_irq());
1104
1105         cmd = scst_get_cmd(scsi_cmd, &req);
1106         if (cmd == NULL)
1107                 goto out;
1108
1109         next_state = SCST_CMD_STATE_DEV_DONE;
1110         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1111                 sizeof(req->sr_sense_buffer), scsi_cmd->resid, &next_state);
1112
1113         /* Clear out request structure */
1114         req->sr_use_sg = 0;
1115         req->sr_sglist_len = 0;
1116         req->sr_bufflen = 0;
1117         req->sr_buffer = NULL;
1118         req->sr_underflow = 0;
1119         req->sr_request->rq_disk = NULL; /* disown request blk */
1120
1121         cmd->bufflen = req->sr_bufflen; //??
1122
1123         scst_release_request(cmd);
1124
1125         cmd->state = next_state;
1126         cmd->non_atomic_only = 0;
1127
1128         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1129
1130 out:
1131         TRACE_EXIT();
1132         return;
1133 }
1134 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1135 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1136 {
1137         struct scst_cmd *cmd;
1138         int next_state;
1139
1140         TRACE_ENTRY();
1141
1142         WARN_ON(in_irq());
1143
1144         /*
1145          * We don't use resid, because:
1146          * 1. Many low level initiator drivers don't use (set) this field
1147          * 2. We determine the command's buffer size directly from CDB,
1148          *    so resid is not relevant for us, and target drivers
1149          *    should know the residual, if necessary, by comparing expected
1150          *    and actual transfer sizes.
1151          */
1152
1153         cmd = (struct scst_cmd *)data;
1154         if (cmd == NULL)
1155                 goto out;
1156
1157         next_state = SCST_CMD_STATE_DEV_DONE;
1158         scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid,
1159                 &next_state);
1160
1161         cmd->state = next_state;
1162         cmd->non_atomic_only = 0;
1163
1164         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1165
1166 out:
1167         TRACE_EXIT();
1168         return;
1169 }
1170 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1171
1172 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1173 {
1174         TRACE_ENTRY();
1175
1176         BUG_ON(in_irq());
1177
1178         scst_dec_on_dev_cmd(cmd);
1179
1180         if (next_state == SCST_CMD_STATE_DEFAULT)
1181                 next_state = SCST_CMD_STATE_DEV_DONE;
1182
1183         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1184 #if defined(DEBUG) || defined(TRACING)
1185                 if (cmd->sg) {
1186                         int i;
1187                         struct scatterlist *sg = cmd->sg;
1188                         TRACE(TRACE_RECV_TOP, 
1189                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1190                               cmd->sg_cnt, sg, (void*)sg[0].page);
1191                         for(i = 0; i < cmd->sg_cnt; ++i) {
1192                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1193                                         "Exec'd sg", page_address(sg[i].page),
1194                                         sg[i].length);
1195                         }
1196                 }
1197 #endif
1198         }
1199
1200
1201 #ifdef EXTRACHECKS
1202         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1203             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1204             (next_state != SCST_CMD_STATE_FINISHED)) 
1205         {
1206                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1207                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1208                 scst_set_cmd_error(cmd,
1209                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1210                 next_state = SCST_CMD_STATE_DEV_DONE;
1211         }
1212
1213         if (scst_check_auto_sense(cmd)) {
1214                 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1215                         "opcode %d", cmd->cdb[0]);
1216         }
1217 #endif
1218
1219         scst_check_sense(cmd, NULL, 0, &next_state);
1220
1221         cmd->state = next_state;
1222         cmd->non_atomic_only = 0;
1223
1224         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1225
1226         TRACE_EXIT();
1227         return;
1228 }
1229
1230 static int scst_report_luns_local(struct scst_cmd *cmd)
1231 {
1232         int res = SCST_EXEC_COMPLETED;
1233         int dev_cnt = 0;
1234         int buffer_size;
1235         struct scst_tgt_dev *tgt_dev = NULL;
1236         uint8_t *buffer;
1237
1238         TRACE_ENTRY();
1239
1240         cmd->status = 0;
1241         cmd->masked_status = 0;
1242         cmd->msg_status = 0;
1243         cmd->host_status = DID_OK;
1244         cmd->driver_status = 0;
1245
1246         /* ToDo: use full SG buffer, not only the first entry */
1247         buffer_size = scst_get_buf_first(cmd, &buffer);
1248         if (unlikely(buffer_size <= 0))
1249                 goto out_err;
1250
1251         if (buffer_size < 16) {
1252                 goto out_put_err;
1253         }
1254
1255         memset(buffer, 0, buffer_size);
1256
1257         /* sess->sess_tgt_dev_list is protected by suspended activity */
1258         list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1259                             sess_tgt_dev_list_entry) 
1260         {
1261                 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1262                         buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1263                         buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1264                 }
1265                 dev_cnt++;
1266                 /* Tmp, until ToDo above done */
1267                 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1268                         break;
1269         }
1270
1271         /* Set the response header */
1272         dev_cnt *= 8;
1273         buffer[0] = (dev_cnt >> 24) & 0xff;
1274         buffer[1] = (dev_cnt >> 16) & 0xff;
1275         buffer[2] = (dev_cnt >> 8) & 0xff;
1276         buffer[3] = dev_cnt & 0xff;
1277
1278         dev_cnt += 8;
1279
1280         scst_put_buf(cmd, buffer);
1281
1282         if (buffer_size > dev_cnt)
1283                 scst_set_resp_data_len(cmd, dev_cnt);
1284         
1285 out_done:
1286         cmd->completed = 1;
1287
1288         /* Report the result */
1289         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1290
1291         TRACE_EXIT_RES(res);
1292         return res;
1293         
1294 out_put_err:
1295         scst_put_buf(cmd, buffer);
1296
1297 out_err:
1298         scst_set_cmd_error(cmd,
1299                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1300         goto out_done;
1301 }
1302
1303 static int scst_pre_select(struct scst_cmd *cmd)
1304 {
1305         int res = SCST_EXEC_NOT_COMPLETED;
1306
1307         TRACE_ENTRY();
1308
1309         if (scst_cmd_atomic(cmd)) {
1310                 res = SCST_EXEC_NEED_THREAD;
1311                 goto out;
1312         }
1313
1314         scst_block_dev(cmd->dev, 1);
1315         /* Device will be unblocked in scst_done_cmd_check() */
1316
1317         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1318                 int rc = scst_set_pending_UA(cmd);
1319                 if (rc == 0) {
1320                         res = SCST_EXEC_COMPLETED;
1321                         cmd->completed = 1;
1322                         /* Report the result */
1323                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1324                         goto out;
1325                 }
1326         }
1327
1328 out:
1329         TRACE_EXIT_RES(res);
1330         return res;
1331 }
1332
1333 static inline void scst_report_reserved(struct scst_cmd *cmd)
1334 {
1335         TRACE_ENTRY();
1336
1337         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1338         cmd->completed = 1;
1339         /* Report the result */
1340         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1341
1342         TRACE_EXIT();
1343         return;
1344 }
1345
1346 static int scst_reserve_local(struct scst_cmd *cmd)
1347 {
1348         int res = SCST_EXEC_NOT_COMPLETED;
1349         struct scst_device *dev;
1350         struct scst_tgt_dev *tgt_dev_tmp;
1351
1352         TRACE_ENTRY();
1353
1354         if (scst_cmd_atomic(cmd)) {
1355                 res = SCST_EXEC_NEED_THREAD;
1356                 goto out;
1357         }
1358
1359         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1360                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1361                      "(lun=%Ld)", (uint64_t)cmd->lun);
1362                 scst_set_cmd_error(cmd,
1363                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1364                 cmd->completed = 1;
1365                 res = SCST_EXEC_COMPLETED;
1366                 goto out;
1367         }
1368
1369         dev = cmd->dev;
1370         scst_block_dev(dev, 1);
1371         /* Device will be unblocked in scst_done_cmd_check() */
1372
1373         spin_lock_bh(&dev->dev_lock);
1374
1375         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1376                 scst_report_reserved(cmd);
1377                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1378                 res = SCST_EXEC_COMPLETED;
1379                 goto out_unlock;
1380         }
1381
1382         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1383                             dev_tgt_dev_list_entry) 
1384         {
1385                 if (cmd->tgt_dev != tgt_dev_tmp)
1386                         set_bit(SCST_TGT_DEV_RESERVED, 
1387                                 &tgt_dev_tmp->tgt_dev_flags);
1388         }
1389         dev->dev_reserved = 1;
1390
1391 out_unlock:
1392         spin_unlock_bh(&dev->dev_lock);
1393         
1394 out:
1395         TRACE_EXIT_RES(res);
1396         return res;
1397 }
1398
1399 static int scst_release_local(struct scst_cmd *cmd)
1400 {
1401         int res = SCST_EXEC_NOT_COMPLETED;
1402         struct scst_tgt_dev *tgt_dev_tmp;
1403         struct scst_device *dev;
1404
1405         TRACE_ENTRY();
1406
1407         dev = cmd->dev;
1408
1409         scst_block_dev(dev, 1);
1410         cmd->blocking = 1;
1411         TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1412
1413         spin_lock_bh(&dev->dev_lock);
1414
1415         /* 
1416          * The device could be RELEASED behind us, if RESERVING session 
1417          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1418          * matter, so use lock and no retest for DEV_RESERVED bits again
1419          */
1420         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1421                 res = SCST_EXEC_COMPLETED;
1422                 cmd->status = 0;
1423                 cmd->masked_status = 0;
1424                 cmd->msg_status = 0;
1425                 cmd->host_status = DID_OK;
1426                 cmd->driver_status = 0;
1427         } else {
1428                 list_for_each_entry(tgt_dev_tmp,
1429                                     &dev->dev_tgt_dev_list,
1430                                     dev_tgt_dev_list_entry) 
1431                 {
1432                         clear_bit(SCST_TGT_DEV_RESERVED, 
1433                                 &tgt_dev_tmp->tgt_dev_flags);
1434                 }
1435                 dev->dev_reserved = 0;
1436         }
1437
1438         spin_unlock_bh(&dev->dev_lock);
1439
1440         if (res == SCST_EXEC_COMPLETED) {
1441                 cmd->completed = 1;
1442                 /* Report the result */
1443                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1444         }
1445
1446         TRACE_EXIT_RES(res);
1447         return res;
1448 }
1449
1450 /* 
1451  * The result of cmd execution, if any, should be reported 
1452  * via scst_cmd_done_local() 
1453  */
1454 static int scst_pre_exec(struct scst_cmd *cmd)
1455 {
1456         int res = SCST_EXEC_NOT_COMPLETED, rc;
1457         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1458
1459         TRACE_ENTRY();
1460
1461         /* Reserve check before Unit Attention */
1462         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1463             (cmd->cdb[0] != INQUIRY) &&
1464             (cmd->cdb[0] != REPORT_LUNS) &&
1465             (cmd->cdb[0] != RELEASE) &&
1466             (cmd->cdb[0] != RELEASE_10) &&
1467             (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1468             (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1469             (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) 
1470         {
1471                 scst_report_reserved(cmd);
1472                 res = SCST_EXEC_COMPLETED;
1473                 goto out;
1474         }
1475
1476         /* If we had a internal bus reset, set the command error unit attention */
1477         if ((cmd->dev->scsi_dev != NULL) &&
1478             unlikely(cmd->dev->scsi_dev->was_reset) &&
1479             scst_is_ua_command(cmd)) 
1480         {
1481                 struct scst_device *dev = cmd->dev;
1482                 int done = 0;
1483                 /* Prevent more than 1 cmd to be triggered by was_reset */
1484                 spin_lock_bh(&dev->dev_lock);
1485                 barrier(); /* to reread was_reset */
1486                 if (dev->scsi_dev->was_reset) {
1487                         TRACE(TRACE_MGMT, "was_reset is %d", 1);
1488                         scst_set_cmd_error(cmd,
1489                                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1490                         /* It looks like it is safe to clear was_reset here */
1491                         dev->scsi_dev->was_reset = 0;
1492                         smp_mb();
1493                         done = 1;
1494                 }
1495                 spin_unlock_bh(&dev->dev_lock);
1496
1497                 if (done)
1498                         goto out_done;
1499         }
1500
1501         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1502             scst_is_ua_command(cmd)) 
1503         {
1504                 rc = scst_set_pending_UA(cmd);
1505                 if (rc == 0)
1506                         goto out_done;
1507         }
1508
1509         /* Check READ_ONLY device status */
1510         if (tgt_dev->acg_dev->rd_only_flag &&
1511             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1512              cmd->cdb[0] == WRITE_10 ||
1513              cmd->cdb[0] == WRITE_12 ||
1514              cmd->cdb[0] == WRITE_16 ||
1515              cmd->cdb[0] == WRITE_VERIFY ||
1516              cmd->cdb[0] == WRITE_VERIFY_12 ||
1517              cmd->cdb[0] == WRITE_VERIFY_16 ||
1518              (cmd->dev->handler->type == TYPE_TAPE &&
1519               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1520         {
1521                 scst_set_cmd_error(cmd,
1522                            SCST_LOAD_SENSE(scst_sense_data_protect));
1523                 goto out_done;
1524         }
1525 out:
1526         TRACE_EXIT_RES(res);
1527         return res;
1528
1529 out_done:
1530         res = SCST_EXEC_COMPLETED;
1531         cmd->completed = 1;
1532         /* Report the result */
1533         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1534         goto out;
1535 }
1536
1537 /* 
1538  * The result of cmd execution, if any, should be reported 
1539  * via scst_cmd_done_local() 
1540  */
1541 static inline int scst_local_exec(struct scst_cmd *cmd)
1542 {
1543         int res = SCST_EXEC_NOT_COMPLETED;
1544
1545         TRACE_ENTRY();
1546
1547         /*
1548          * Adding new commands here don't forget to update
1549          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1550          */
1551
1552         switch (cmd->cdb[0]) {
1553         case MODE_SELECT:
1554         case MODE_SELECT_10:
1555         case LOG_SELECT:
1556                 res = scst_pre_select(cmd);
1557                 break;
1558         case RESERVE:
1559         case RESERVE_10:
1560                 res = scst_reserve_local(cmd);
1561                 break;
1562         case RELEASE:
1563         case RELEASE_10:
1564                 res = scst_release_local(cmd);
1565                 break;
1566         case REPORT_LUNS:
1567                 res = scst_report_luns_local(cmd);
1568                 break;
1569         }
1570
1571         TRACE_EXIT_RES(res);
1572         return res;
1573 }
1574
1575 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1576 {
1577         int rc = SCST_EXEC_NOT_COMPLETED;
1578
1579         TRACE_ENTRY();
1580
1581         cmd->sent_to_midlev = 1;
1582         cmd->state = SCST_CMD_STATE_EXECUTING;
1583         cmd->scst_cmd_done = scst_cmd_done_local;
1584
1585         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1586         smp_mb__after_set_bit();
1587
1588         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1589                 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1590                 goto out_aborted;
1591         }
1592
1593         rc = scst_pre_exec(cmd);
1594         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1595         if (rc != SCST_EXEC_NOT_COMPLETED) {
1596                 if (rc == SCST_EXEC_COMPLETED)
1597                         goto out;
1598                 else if (rc == SCST_EXEC_NEED_THREAD)
1599                         goto out_clear;
1600                 else
1601                         goto out_rc_error;
1602         }
1603
1604         rc = scst_local_exec(cmd);
1605         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1606         if (rc != SCST_EXEC_NOT_COMPLETED) {
1607                 if (rc == SCST_EXEC_COMPLETED)
1608                         goto out;
1609                 else if (rc == SCST_EXEC_NEED_THREAD)
1610                         goto out_clear;
1611                 else
1612                         goto out_rc_error;
1613         }
1614
1615         if (cmd->dev->handler->exec) {
1616                 struct scst_device *dev = cmd->dev;
1617                 TRACE_DBG("Calling dev handler %s exec(%p)",
1618                       dev->handler->name, cmd);
1619                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1620                 cmd->scst_cmd_done = scst_cmd_done_local;
1621                 rc = dev->handler->exec(cmd);
1622                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1623                 TRACE_DBG("Dev handler %s exec() returned %d",
1624                       dev->handler->name, rc);
1625                 if (rc != SCST_EXEC_NOT_COMPLETED) {
1626                         if (rc == SCST_EXEC_COMPLETED)
1627                                 goto out;
1628                         else if (rc == SCST_EXEC_NEED_THREAD)
1629                                 goto out_clear;
1630                         else
1631                                 goto out_rc_error;
1632                 }
1633         }
1634
1635         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1636         
1637         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1638                 PRINT_ERROR_PR("Command for virtual device must be "
1639                         "processed by device handler (lun %Ld)!",
1640                         (uint64_t)cmd->lun);
1641                 goto out_error;
1642         }
1643
1644 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1645         if (scst_alloc_request(cmd) != 0) {
1646                 PRINT_INFO_PR("%s", "Unable to allocate request, "
1647                         "sending BUSY status");
1648                 goto out_busy;
1649         }
1650         
1651         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1652                     (void *)cmd->scsi_req->sr_buffer,
1653                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1654                     cmd->retries);
1655 #else
1656         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1657                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1658                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1659                         GFP_KERNEL);
1660         if (rc) {
1661                 PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1662                 goto out_error;
1663         }
1664 #endif
1665
1666         rc = SCST_EXEC_COMPLETED;
1667
1668 out:
1669         TRACE_EXIT();
1670         return rc;
1671
1672 out_clear:
1673         /* Restore the state */
1674         cmd->sent_to_midlev = 0;
1675         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1676         goto out;
1677
1678 out_rc_error:
1679         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1680                     "invalid code %d", cmd->dev->handler->name, rc);
1681         /* go through */
1682
1683 out_error:
1684         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1685         cmd->completed = 1;
1686         cmd->state = SCST_CMD_STATE_DEV_DONE;
1687         rc = SCST_EXEC_COMPLETED;
1688         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1689         goto out;
1690
1691 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1692 out_busy:
1693         scst_set_busy(cmd);
1694         cmd->completed = 1;
1695         cmd->state = SCST_CMD_STATE_DEV_DONE;
1696         rc = SCST_EXEC_COMPLETED;
1697         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1698         goto out;
1699 #endif
1700
1701 out_aborted:
1702         rc = SCST_EXEC_COMPLETED;
1703         /* Report the result. The cmd is not completed */
1704         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1705         goto out;
1706 }
1707
1708 static int scst_send_to_midlev(struct scst_cmd *cmd)
1709 {
1710         int res, rc;
1711         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1712         struct scst_device *dev = cmd->dev;
1713         int expected_sn;
1714         int count;
1715         int atomic = scst_cmd_atomic(cmd);
1716
1717         TRACE_ENTRY();
1718
1719         res = SCST_CMD_STATE_RES_CONT_NEXT;
1720
1721         if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1722                 TRACE_DBG("Dev handler %s exec() can not be "
1723                       "called in atomic context, rescheduling to the thread",
1724                       dev->handler->name);
1725                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1726                 goto out;
1727         }
1728
1729         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1730                 goto out;
1731
1732         scst_inc_cmd_count(); /* protect dev & tgt_dev */
1733
1734         if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1735                 rc = scst_do_send_to_midlev(cmd);
1736                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1737                 if (rc == SCST_EXEC_NEED_THREAD) {
1738                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1739                               "thread context, rescheduling");
1740                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1741                         scst_dec_on_dev_cmd(cmd);
1742                         goto out_dec_cmd_count;
1743                 } else {
1744                         BUG_ON(rc != SCST_EXEC_COMPLETED);
1745                         goto out_unplug;
1746                 }
1747         }
1748
1749         expected_sn = tgt_dev->expected_sn;
1750         if (cmd->sn != expected_sn) {
1751                 spin_lock_bh(&tgt_dev->sn_lock);
1752                 tgt_dev->def_cmd_count++;
1753                 smp_mb();
1754                 barrier(); /* to reread expected_sn */
1755                 expected_sn = tgt_dev->expected_sn;
1756                 if (cmd->sn != expected_sn) {
1757                         scst_dec_on_dev_cmd(cmd);
1758                         TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1759                               "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1760                         list_add_tail(&cmd->sn_cmd_list_entry,
1761                                       &tgt_dev->deferred_cmd_list);
1762                         spin_unlock_bh(&tgt_dev->sn_lock);
1763                         /* !! At this point cmd can be already freed !! */
1764                         goto out_dec_cmd_count;
1765                 } else {
1766                         TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1767                               "expected_sn %d, continuing", expected_sn);
1768                         tgt_dev->def_cmd_count--;
1769                         spin_unlock_bh(&tgt_dev->sn_lock);
1770                 }
1771         }
1772
1773         count = 0;
1774         while(1) {
1775                 rc = scst_do_send_to_midlev(cmd);
1776                 if (rc == SCST_EXEC_NEED_THREAD) {
1777                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1778                               "thread context, rescheduling");
1779                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1780                         scst_dec_on_dev_cmd(cmd);
1781                         if (count != 0)
1782                                 goto out_unplug;
1783                         else
1784                                 goto out_dec_cmd_count;
1785                 }
1786                 BUG_ON(rc != SCST_EXEC_COMPLETED);
1787                 /* !! At this point cmd can be already freed !! */
1788                 count++;
1789                 expected_sn = __scst_inc_expected_sn(tgt_dev);
1790                 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1791                 if (cmd == NULL)
1792                         break;
1793                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1794                         break;
1795         }
1796
1797 out_unplug:
1798         if (dev->scsi_dev != NULL)
1799                 generic_unplug_device(dev->scsi_dev->request_queue);
1800
1801 out_dec_cmd_count:
1802         scst_dec_cmd_count();
1803         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1804
1805 out:
1806         TRACE_EXIT_HRES(res);
1807         return res;
1808 }
1809
1810 static struct scst_cmd *scst_create_prepare_internal_cmd(
1811         struct scst_cmd *orig_cmd, int bufsize)
1812 {
1813         struct scst_cmd *res;
1814         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1815
1816         TRACE_ENTRY();
1817
1818         res = scst_alloc_cmd(gfp_mask);
1819         if (unlikely(res == NULL)) {
1820                 goto out;
1821         }
1822
1823         res->sess = orig_cmd->sess;
1824         res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1825         res->atomic = scst_cmd_atomic(orig_cmd);
1826         res->internal = 1;
1827         res->tgtt = orig_cmd->tgtt;
1828         res->tgt = orig_cmd->tgt;
1829         res->dev = orig_cmd->dev;
1830         res->tgt_dev = orig_cmd->tgt_dev;
1831         res->lun = orig_cmd->lun;
1832         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1833         res->data_direction = SCST_DATA_UNKNOWN;
1834         res->orig_cmd = orig_cmd;
1835
1836         res->bufflen = bufsize;
1837         if (bufsize > 0) {
1838                 if (scst_alloc_space(res) != 0)
1839                         PRINT_ERROR("Unable to create buffer (size %d) for "
1840                                 "internal cmd", bufsize);
1841                         goto out_free_res;
1842         }
1843
1844 out:
1845         TRACE_EXIT_HRES((unsigned long)res);
1846         return res;
1847
1848 out_free_res:
1849         scst_destroy_cmd(res);
1850         res = NULL;
1851         goto out;
1852 }
1853
1854 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1855 {
1856         TRACE_ENTRY();
1857
1858         if (cmd->bufflen > 0)
1859                 scst_release_space(cmd);
1860         scst_destroy_cmd(cmd);
1861
1862         TRACE_EXIT();
1863         return;
1864 }
1865
1866 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1867 {
1868         int res = SCST_CMD_STATE_RES_RESTART;
1869 #define sbuf_size 252
1870         static const unsigned char request_sense[6] =
1871             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1872         struct scst_cmd *rs_cmd;
1873
1874         TRACE_ENTRY();
1875
1876         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1877         if (rs_cmd != 0)
1878                 goto out_error;
1879
1880         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1881         rs_cmd->cdb_len = sizeof(request_sense);
1882         rs_cmd->data_direction = SCST_DATA_READ;
1883
1884         spin_lock_irq(&scst_list_lock);
1885         list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1886         spin_unlock_irq(&scst_list_lock);
1887
1888 out:
1889         TRACE_EXIT_RES(res);
1890         return res;
1891
1892 out_error:
1893         res = -1;
1894         goto out;
1895 #undef sbuf_size
1896 }
1897
1898 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1899 {
1900         struct scst_cmd *orig_cmd = cmd->orig_cmd;
1901         uint8_t *buf;
1902         int len;
1903
1904         TRACE_ENTRY();
1905
1906         BUG_ON(orig_cmd);
1907
1908         len = scst_get_buf_first(cmd, &buf);
1909
1910         if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1911             (!SCST_NO_SENSE(buf))) 
1912         {
1913                 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
1914                         buf, len);
1915                 memcpy(orig_cmd->sense_buffer, buf,
1916                         (sizeof(orig_cmd->sense_buffer) > len) ?
1917                                 len : sizeof(orig_cmd->sense_buffer));
1918         } else {
1919                 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1920                         "REQUEST SENSE, returning HARDWARE ERROR");
1921                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1922         }
1923
1924         scst_put_buf(cmd, buf);
1925
1926         scst_free_internal_cmd(cmd);
1927
1928         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1929         return orig_cmd;
1930 }
1931
1932 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1933 {
1934         int res = 0, rc;
1935         unsigned char type;
1936
1937         TRACE_ENTRY();
1938
1939         if (cmd->cdb[0] == REQUEST_SENSE) {
1940                 if (cmd->internal)
1941                         cmd = scst_complete_request_sense(cmd);
1942         } else if (scst_check_auto_sense(cmd)) {
1943                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1944                             "without sense data (opcode 0x%x), issuing "
1945                             "REQUEST SENSE", cmd->cdb[0]);
1946                 rc = scst_prepare_request_sense(cmd);
1947                 if (res > 0) {
1948                         *pres = rc;
1949                         res = 1;
1950                         goto out;
1951                 } else {
1952                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1953                                     "returning HARDWARE ERROR");
1954                         scst_set_cmd_error(cmd,
1955                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
1956                 }
1957         }
1958
1959         type = cmd->dev->handler->type;
1960         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1961             cmd->tgt_dev->acg_dev->rd_only_flag &&
1962             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1963              type == TYPE_TAPE))
1964         {
1965                 int32_t length;
1966                 uint8_t *address;
1967
1968                 length = scst_get_buf_first(cmd, &address);
1969                 if (length <= 0)
1970                         goto out;
1971                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1972                         address[2] |= 0x80;   /* Write Protect*/
1973                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1974                         address[3] |= 0x80;   /* Write Protect*/
1975                 scst_put_buf(cmd, address);
1976         }
1977
1978         /* 
1979          * Check and clear NormACA option for the device, if necessary,
1980          * since we don't support ACA
1981          */
1982         if ((cmd->cdb[0] == INQUIRY) &&
1983             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1984             (cmd->resp_data_len > SCST_INQ_BYTE3))
1985         {
1986                 uint8_t *buffer;
1987                 int buflen;
1988
1989                 /* ToDo: all pages ?? */
1990                 buflen = scst_get_buf_first(cmd, &buffer);
1991                 if (buflen > 0) {
1992                         if (buflen > SCST_INQ_BYTE3) {
1993 #ifdef EXTRACHECKS
1994                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1995                                         PRINT_INFO_PR("NormACA set for device: "
1996                                             "lun=%Ld, type 0x%02x", 
1997                                             (uint64_t)cmd->lun, buffer[0]);
1998                                 }
1999 #endif
2000                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2001                         } else
2002                                 scst_set_cmd_error(cmd,
2003                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
2004
2005                         scst_put_buf(cmd, buffer);
2006                 }
2007         }
2008
2009         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
2010                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
2011                                                 &cmd->tgt_dev->tgt_dev_flags)) {
2012                         struct scst_tgt_dev *tgt_dev_tmp;
2013                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2014                               (uint64_t)cmd->lun, cmd->masked_status);
2015                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2016                                      sizeof(cmd->sense_buffer));
2017                         /* Clearing the reservation */
2018                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2019                                             dev_tgt_dev_list_entry) {
2020                                 clear_bit(SCST_TGT_DEV_RESERVED, 
2021                                         &tgt_dev_tmp->tgt_dev_flags);
2022                         }
2023                         cmd->dev->dev_reserved = 0;
2024                 }
2025                 scst_unblock_dev(cmd->dev);
2026         }
2027         
2028         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
2029                      (cmd->cdb[0] == MODE_SELECT_10) ||
2030                      (cmd->cdb[0] == LOG_SELECT)))
2031         {
2032                 if (cmd->status == 0) {
2033                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2034                                 "setting the SELECT UA (lun=%Ld)", 
2035                                 (uint64_t)cmd->lun);
2036                         spin_lock_bh(&scst_temp_UA_lock);
2037                         if (cmd->cdb[0] == LOG_SELECT) {
2038                                 scst_set_sense(scst_temp_UA,
2039                                         sizeof(scst_temp_UA),
2040                                         UNIT_ATTENTION, 0x2a, 0x02);
2041                         } else {
2042                                 scst_set_sense(scst_temp_UA,
2043                                         sizeof(scst_temp_UA),
2044                                         UNIT_ATTENTION, 0x2a, 0x01);
2045                         }
2046                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2047                                 sizeof(scst_temp_UA), 1);
2048                         spin_unlock_bh(&scst_temp_UA_lock);
2049                 }
2050                 scst_unblock_dev(cmd->dev);
2051         }
2052
2053 out:
2054         TRACE_EXIT_RES(res);
2055         return res;
2056 }
2057
2058 static int scst_dev_done(struct scst_cmd *cmd)
2059 {
2060         int res = SCST_CMD_STATE_RES_CONT_SAME;
2061         int state;
2062         int atomic = scst_cmd_atomic(cmd);
2063
2064         TRACE_ENTRY();
2065
2066         if (atomic && !cmd->dev->handler->dev_done_atomic &&
2067             cmd->dev->handler->dev_done) 
2068         {
2069                 TRACE_DBG("Dev handler %s dev_done() can not be "
2070                       "called in atomic context, rescheduling to the thread",
2071                       cmd->dev->handler->name);
2072                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2073                 goto out;
2074         }
2075
2076         if (scst_done_cmd_check(cmd, &res))
2077                 goto out;
2078
2079         state = SCST_CMD_STATE_XMIT_RESP;
2080         if (likely(!scst_is_cmd_local(cmd)) && 
2081             likely(cmd->dev->handler->dev_done != NULL))
2082         {
2083                 int rc;
2084                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2085                       cmd->dev->handler->name, cmd);
2086                 rc = cmd->dev->handler->dev_done(cmd);
2087                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2088                       cmd->dev->handler->name, rc);
2089                 if (rc != SCST_CMD_STATE_DEFAULT)
2090                         state = rc;
2091         }
2092
2093         switch (state) {
2094         case SCST_CMD_STATE_REINIT:
2095                 cmd->state = state;
2096                 res = SCST_CMD_STATE_RES_RESTART;
2097                 break;
2098
2099         case SCST_CMD_STATE_DEV_PARSE:
2100         case SCST_CMD_STATE_PREPARE_SPACE:
2101         case SCST_CMD_STATE_RDY_TO_XFER:
2102         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2103         case SCST_CMD_STATE_DEV_DONE:
2104         case SCST_CMD_STATE_XMIT_RESP:
2105         case SCST_CMD_STATE_FINISHED:
2106                 cmd->state = state;
2107                 res = SCST_CMD_STATE_RES_CONT_SAME;
2108                 break;
2109
2110         case SCST_CMD_STATE_NEED_THREAD_CTX:
2111                 TRACE_DBG("Dev handler %s dev_done() requested "
2112                       "thread context, rescheduling",
2113                       cmd->dev->handler->name);
2114                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2115                 break;
2116
2117         default:
2118                 if (state >= 0) {
2119                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2120                                 "invalid cmd state %d", 
2121                                 cmd->dev->handler->name, state);
2122                 } else {
2123                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2124                                 "error %d", cmd->dev->handler->name, 
2125                                 state);
2126                 }
2127                 scst_set_cmd_error(cmd,
2128                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2129                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2130                 res = SCST_CMD_STATE_RES_CONT_SAME;
2131                 break;
2132         }
2133
2134 out:
2135         TRACE_EXIT_HRES(res);
2136         return res;
2137 }
2138
2139 static int scst_xmit_response(struct scst_cmd *cmd)
2140 {
2141         int res, rc;
2142         int atomic = scst_cmd_atomic(cmd);
2143
2144         TRACE_ENTRY();
2145
2146         /* 
2147          * Check here also in order to avoid unnecessary delays of other
2148          * commands.
2149          */
2150         if (unlikely(cmd->sent_to_midlev == 0) &&
2151             (cmd->tgt_dev != NULL))
2152         {
2153                 TRACE(TRACE_SCSI_SERIALIZING,
2154                       "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2155                 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2156                 cmd->sent_to_midlev = 1;
2157         }
2158
2159         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2160                 TRACE_DBG("%s", "xmit_response() can not be "
2161                       "called in atomic context, rescheduling to the thread");
2162                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2163                 goto out;
2164         }
2165
2166         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2167         smp_mb__after_set_bit();
2168
2169         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2170                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2171                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2172                                 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2173                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2174                 }
2175         }
2176
2177         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2178                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2179                         cmd, cmd->tag);
2180                 cmd->state = SCST_CMD_STATE_FINISHED;
2181                 res = SCST_CMD_STATE_RES_CONT_SAME;
2182                 goto out;
2183         }
2184
2185 #ifdef DEBUG_TM
2186         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2187                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2188                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2189                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2190                         goto out;
2191                 }
2192                 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2193                         cmd, cmd->tag);
2194                 schedule_timeout_uninterruptible(HZ);
2195         }
2196 #endif
2197
2198         while (1) {
2199                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2200
2201                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2202                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2203
2204                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2205
2206 #if defined(DEBUG) || defined(TRACING)
2207                 if (cmd->sg) {
2208                         int i;
2209                         struct scatterlist *sg = cmd->sg;
2210                         TRACE(TRACE_SEND_BOT, 
2211                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2212                               cmd->sg_cnt, sg, (void*)sg[0].page);
2213                         for(i = 0; i < cmd->sg_cnt; ++i) {
2214                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2215                                     "Xmitting sg", page_address(sg[i].page),
2216                                     sg[i].length);
2217                         }
2218                 }
2219 #endif
2220
2221 #ifdef DEBUG_RETRY
2222                 if (((scst_random() % 100) == 77))
2223                         rc = SCST_TGT_RES_QUEUE_FULL;
2224                 else
2225 #endif
2226                         rc = cmd->tgtt->xmit_response(cmd);
2227                 TRACE_DBG("xmit_response() returned %d", rc);
2228
2229                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2230                         goto out;
2231
2232                 /* Restore the previous state */
2233                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2234
2235                 switch (rc) {
2236                 case SCST_TGT_RES_QUEUE_FULL:
2237                 {
2238                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2239                                 break;
2240                         else
2241                                 continue;
2242                 }
2243
2244                 case SCST_TGT_RES_NEED_THREAD_CTX:
2245                 {
2246                         TRACE_DBG("Target driver %s xmit_response() "
2247                               "requested thread context, rescheduling",
2248                               cmd->tgtt->name);
2249                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2250                         break;
2251                 }
2252
2253                 default:
2254                         goto out_error;
2255                 }
2256                 break;
2257         }
2258
2259 out:
2260         /* Caution: cmd can be already dead here */
2261         TRACE_EXIT_HRES(res);
2262         return res;
2263
2264 out_error:
2265         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2266                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2267                         "fatal error", cmd->tgtt->name);
2268         } else {
2269                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2270                         "invalid value %d", cmd->tgtt->name, rc);
2271         }
2272         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2273         cmd->state = SCST_CMD_STATE_FINISHED;
2274         res = SCST_CMD_STATE_RES_CONT_SAME;
2275         goto out;
2276 }
2277
2278 static int scst_finish_cmd(struct scst_cmd *cmd)
2279 {
2280         int res;
2281
2282         TRACE_ENTRY();
2283
2284         if (cmd->mem_checked) {
2285                 spin_lock_bh(&scst_cmd_mem_lock);
2286                 scst_cur_cmd_mem -= cmd->bufflen;
2287                 spin_unlock_bh(&scst_cmd_mem_lock);
2288         }
2289
2290         spin_lock_irq(&scst_list_lock);
2291
2292         TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2293         list_del(&cmd->cmd_list_entry);
2294
2295         if (cmd->mgmt_cmnd)
2296                 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2297
2298         if (likely(cmd->tgt_dev != NULL))
2299                 cmd->tgt_dev->cmd_count--;
2300
2301         cmd->sess->sess_cmd_count--;
2302
2303         list_del(&cmd->search_cmd_list_entry);
2304
2305         spin_unlock_irq(&scst_list_lock);
2306
2307         scst_free_cmd(cmd);
2308
2309         res = SCST_CMD_STATE_RES_CONT_NEXT;
2310
2311         TRACE_EXIT_HRES(res);
2312         return res;
2313 }
2314
2315 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2316 {
2317         TRACE_ENTRY();
2318
2319         BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2320
2321         cmd->state = SCST_CMD_STATE_FINISHED;
2322         scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2323
2324         TRACE_EXIT();
2325         return;
2326 }
2327
2328 /*
2329  * Returns 0 on success, > 0 when we need to wait for unblock,
2330  * < 0 if there is no device (lun) or device type handler.
2331  * Called under scst_list_lock and IRQs disabled
2332  */
2333 static int scst_translate_lun(struct scst_cmd *cmd)
2334 {
2335         struct scst_tgt_dev *tgt_dev = NULL;
2336         int res = 0;
2337
2338         TRACE_ENTRY();
2339
2340         scst_inc_cmd_count();   
2341
2342         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2343                 res = -1;
2344                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2345                       (uint64_t)cmd->lun);
2346                 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2347                                     sess_tgt_dev_list_entry) 
2348                 {
2349                         if (tgt_dev->acg_dev->lun == cmd->lun) {
2350                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2351
2352                                 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2353                                         PRINT_INFO_PR("Dev handler for device "
2354                                           "%Ld is NULL, the device will not be "
2355                                           "visible remotely", (uint64_t)cmd->lun);
2356                                         break;
2357                                 }
2358                                 
2359                                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2360                                         cmd->tgt_dev_saved->cmd_count--;
2361                                         TRACE(TRACE_SCSI_SERIALIZING,
2362                                               "SCST_CMD_STATE_REINIT: "
2363                                               "incrementing expected_sn on tgt_dev_saved %p",
2364                                               cmd->tgt_dev_saved);
2365                                         scst_inc_expected_sn_unblock(
2366                                                 cmd->tgt_dev_saved, cmd, 1);
2367                                 }
2368                                 cmd->tgt_dev = tgt_dev;
2369                                 tgt_dev->cmd_count++;
2370                                 cmd->dev = tgt_dev->acg_dev->dev;
2371
2372                                 /* ToDo: cmd->queue_type */
2373
2374                                 /* scst_list_lock is enough to protect that */
2375                                 cmd->sn = tgt_dev->next_sn;
2376                                 tgt_dev->next_sn++;
2377
2378                                 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2379                                         "cmd->sn: %d", cmd->sn);
2380
2381                                 res = 0;
2382                                 break;
2383                         }
2384                 }
2385                 if (res != 0) {
2386                         TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2387                                 "unexisting LU?", (uint64_t)cmd->lun);
2388                         scst_dec_cmd_count();
2389                 }
2390         } else {
2391                 if ( !cmd->sess->waiting) {
2392                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2393                               cmd->sess);
2394                         list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2395                                       &scst_dev_wait_sess_list);
2396                         cmd->sess->waiting = 1;
2397                 }
2398                 scst_dec_cmd_count();
2399                 res = 1;
2400         }
2401
2402         TRACE_EXIT_RES(res);
2403         return res;
2404 }
2405
2406 /* Called under scst_list_lock and IRQs disabled */
2407 static int scst_process_init_cmd(struct scst_cmd *cmd)
2408 {
2409         int res = 0;
2410
2411         TRACE_ENTRY();
2412
2413         res = scst_translate_lun(cmd);
2414         if (likely(res == 0)) {
2415                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2416                 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2417                         TRACE(TRACE_RETRY, "Too many pending commands in "
2418                                 "session, returning BUSY to initiator \"%s\"",
2419                                 (cmd->sess->initiator_name[0] == '\0') ?
2420                                   "Anonymous" : cmd->sess->initiator_name);
2421                         scst_set_busy(cmd);
2422                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2423                 }
2424                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2425                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2426         } else if (res < 0) {
2427                 TRACE_DBG("Finishing cmd %p", cmd);
2428                 scst_set_cmd_error(cmd,
2429                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2430                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2431                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2432                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2433         }
2434
2435         TRACE_EXIT_RES(res);
2436         return res;
2437 }
2438
2439 /* 
2440  * Called under scst_list_lock and IRQs disabled
2441  * We don't drop it anywhere inside, because command execution
2442  * have to be serialized, i.e. commands must be executed in order
2443  * of their arrival, and we set this order inside scst_translate_lun().
2444  */
2445 static int scst_do_job_init(struct list_head *init_cmd_list)
2446 {
2447         int res = 1;
2448
2449         TRACE_ENTRY();
2450
2451         if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2452                 while (!list_empty(init_cmd_list)) {
2453                         struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2454                                                           typeof(*cmd),
2455                                                           cmd_list_entry);
2456                         res = scst_process_init_cmd(cmd);
2457                         if (res > 0)
2458                                 break;
2459                 }
2460         }
2461
2462         TRACE_EXIT_RES(res);
2463         return res;
2464 }
2465
2466 /* Called with no locks held */
2467 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2468         int left_locked)
2469 {
2470         int res;
2471
2472         TRACE_ENTRY();
2473
2474 #ifdef EXTRACHECKS
2475         BUG_ON(in_irq());
2476 #endif
2477
2478         cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2479                         SCST_CONTEXT_DIRECT_ATOMIC);
2480         cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2481
2482         do {
2483                 switch (cmd->state) {
2484                 case SCST_CMD_STATE_DEV_PARSE:
2485                         res = scst_parse_cmd(cmd);
2486                         break;
2487
2488                 case SCST_CMD_STATE_PREPARE_SPACE:
2489                         res = scst_prepare_space(cmd);
2490                         break;
2491
2492                 case SCST_CMD_STATE_RDY_TO_XFER:
2493                         res = scst_rdy_to_xfer(cmd);
2494                         break;
2495
2496                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2497                         res = scst_send_to_midlev(cmd);
2498                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2499                         break;
2500
2501                 case SCST_CMD_STATE_DEV_DONE:
2502                         res = scst_dev_done(cmd);
2503                         break;
2504
2505                 case SCST_CMD_STATE_XMIT_RESP:
2506                         res = scst_xmit_response(cmd);
2507                         break;
2508
2509                 case SCST_CMD_STATE_FINISHED:
2510                         res = scst_finish_cmd(cmd);
2511                         break;
2512
2513                 default:
2514                         PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2515                                cmd, cmd->state);
2516                         BUG();
2517                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2518                         break;
2519                 }
2520         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2521
2522         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2523                 if (left_locked)
2524                         spin_lock_irq(&scst_list_lock);
2525         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2526                 spin_lock_irq(&scst_list_lock);
2527
2528                 switch (cmd->state) {
2529                 case SCST_CMD_STATE_DEV_PARSE:
2530                 case SCST_CMD_STATE_PREPARE_SPACE:
2531                 case SCST_CMD_STATE_RDY_TO_XFER:
2532                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2533                 case SCST_CMD_STATE_DEV_DONE:
2534                 case SCST_CMD_STATE_XMIT_RESP:
2535                 case SCST_CMD_STATE_FINISHED:
2536                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2537                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2538                         break;
2539 #ifdef EXTRACHECKS
2540                 /* not very valid commands */
2541                 case SCST_CMD_STATE_DEFAULT:
2542                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2543                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2544                                 "useful list (left on scst cmd list)", cmd, 
2545                                 cmd->state);
2546                         spin_unlock_irq(&scst_list_lock);
2547                         BUG();
2548                         spin_lock_irq(&scst_list_lock);
2549                         break;
2550 #endif
2551                 default:
2552                         break;
2553                 }
2554                 cmd->non_atomic_only = 1;
2555                 if (!left_locked)
2556                         spin_unlock_irq(&scst_list_lock);
2557                 wake_up(&scst_list_waitQ);
2558         } else if (res == SCST_CMD_STATE_RES_RESTART) {
2559                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2560                         spin_lock_irq(&scst_list_lock);
2561                         TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2562                         list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2563                         if (!left_locked)
2564                                 spin_unlock_irq(&scst_list_lock);
2565                 } else
2566                         BUG();
2567         } else
2568                 BUG();
2569
2570         TRACE_EXIT_RES(res);
2571         return res;
2572 }
2573
2574 /* Called under scst_list_lock and IRQs disabled */
2575 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2576 {
2577         int res;
2578         struct scst_cmd *cmd;
2579         int atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2580                         SCST_CONTEXT_DIRECT_ATOMIC);
2581
2582         TRACE_ENTRY();
2583
2584 #ifdef EXTRACHECKS
2585         {
2586                 int c = (context & ~SCST_PROCESSIBLE_ENV);
2587                 WARN_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) && 
2588                         (c != SCST_CONTEXT_DIRECT));
2589         }
2590 #endif
2591
2592         tm_dbg_check_released_cmds();
2593
2594 restart:
2595         list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2596                 if (atomic && cmd->non_atomic_only) {
2597                         TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2598                         continue;
2599                 }
2600                 if (tm_dbg_check_cmd(cmd) != 0)
2601                         goto restart;
2602                 res = scst_process_active_cmd(cmd, context, NULL, 1);
2603                 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2604                         goto restart;
2605                 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2606                         goto restart;
2607                 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2608                         break;
2609                 } else
2610                         BUG();
2611         }
2612
2613         TRACE_EXIT();
2614         return;
2615 }
2616
2617 static inline int test_cmd_lists(void)
2618 {
2619         int res = !list_empty(&scst_active_cmd_list) ||
2620             (!list_empty(&scst_init_cmd_list) &&
2621              !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2622             test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2623             unlikely(scst_shut_threads_count > 0) ||
2624             tm_dbg_is_release();
2625         return res;
2626 }
2627
2628 int scst_cmd_thread(void *arg)
2629 {
2630         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2631         int n;
2632
2633         TRACE_ENTRY();
2634
2635         spin_lock(&lock);
2636         n = scst_thread_num++;
2637         spin_unlock(&lock);
2638         daemonize("scsi_tgt%d", n);
2639         recalc_sigpending();
2640         set_user_nice(current, 10);
2641         current->flags |= PF_NOFREEZE;
2642
2643         spin_lock_irq(&scst_list_lock);
2644         while (1) {
2645                 wait_queue_t wait;
2646                 init_waitqueue_entry(&wait, current);
2647
2648                 if (!test_cmd_lists()) {
2649                         add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2650                         for (;;) {
2651                                 set_current_state(TASK_INTERRUPTIBLE);
2652                                 if (test_cmd_lists())
2653                                         break;
2654                                 spin_unlock_irq(&scst_list_lock);
2655                                 schedule();
2656                                 spin_lock_irq(&scst_list_lock);
2657                         }
2658                         set_current_state(TASK_RUNNING);
2659                         remove_wait_queue(&scst_list_waitQ, &wait);
2660                 }
2661
2662                 scst_do_job_init(&scst_init_cmd_list);
2663                 scst_do_job_active(&scst_active_cmd_list,
2664                                    SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
2665
2666                 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2667                     list_empty(&scst_cmd_list) &&
2668                     list_empty(&scst_active_cmd_list) &&
2669                     list_empty(&scst_init_cmd_list)) {
2670                         break;
2671                 }
2672                 
2673                 if (unlikely(scst_shut_threads_count > 0)) {
2674                         scst_shut_threads_count--;
2675                         break;
2676                 }
2677         }
2678         spin_unlock_irq(&scst_list_lock);
2679
2680         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2681                 smp_mb__after_atomic_dec();
2682                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2683                 up(scst_shutdown_mutex);
2684         }
2685
2686         TRACE_EXIT();
2687         return 0;
2688 }
2689
2690 void scst_cmd_tasklet(long p)
2691 {
2692         TRACE_ENTRY();
2693
2694         spin_lock_irq(&scst_list_lock);
2695
2696         scst_do_job_init(&scst_init_cmd_list);
2697         scst_do_job_active(&scst_active_cmd_list, 
2698                 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2699
2700         spin_unlock_irq(&scst_list_lock);
2701
2702         TRACE_EXIT();
2703         return;
2704 }
2705
2706 /*
2707  * Returns 0 on success, < 0 if there is no device handler or
2708  * > 0 if SCST_FLAG_SUSPENDED set.
2709  */
2710 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2711 {
2712         struct scst_tgt_dev *tgt_dev = NULL;
2713         int res = -1;
2714
2715         TRACE_ENTRY();
2716
2717         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2718               (uint64_t)mcmd->lun);
2719
2720         spin_lock_irq(&scst_list_lock);
2721         scst_inc_cmd_count();   
2722         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2723                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2724                                     sess_tgt_dev_list_entry) 
2725                 {
2726                         if (tgt_dev->acg_dev->lun == mcmd->lun) {
2727                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2728                                 mcmd->mcmd_tgt_dev = tgt_dev;
2729                                 res = 0;
2730                                 break;
2731                         }
2732                 }
2733                 if (mcmd->mcmd_tgt_dev == NULL)
2734                         scst_dec_cmd_count();
2735         } else {
2736                 if ( !mcmd->sess->waiting) {
2737                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2738                               mcmd->sess);
2739                         list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2740                                       &scst_dev_wait_sess_list);
2741                         mcmd->sess->waiting = 1;
2742                 }
2743                 scst_dec_cmd_count();
2744                 res = 1;
2745         }
2746         spin_unlock_irq(&scst_list_lock);
2747
2748         TRACE_EXIT_HRES(res);
2749         return res;
2750 }
2751
2752 /* Called under scst_list_lock and IRQ off */
2753 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2754         struct scst_mgmt_cmd *mcmd)
2755 {
2756         TRACE_ENTRY();
2757
2758         TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2759                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2760                 mcmd->cmd_wait_count);
2761
2762         cmd->mgmt_cmnd = NULL;
2763
2764         if (cmd->completed)
2765                 mcmd->completed_cmd_count++;
2766
2767         mcmd->cmd_wait_count--;
2768         if (mcmd->cmd_wait_count > 0) {
2769                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2770                         mcmd->cmd_wait_count);
2771                 goto out;
2772         }
2773
2774         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2775
2776         if (mcmd->completed) {
2777                 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2778                         mcmd);
2779                 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2780                         &scst_active_mgmt_cmd_list);
2781         }
2782
2783         wake_up(&scst_mgmt_cmd_list_waitQ);
2784
2785 out:
2786         TRACE_EXIT();
2787         return;
2788 }
2789
2790 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2791         struct scst_tgt_dev *tgt_dev, int set_status)
2792 {
2793         int res = SCST_DEV_TM_NOT_COMPLETED;
2794         if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2795                 int irq = irqs_disabled();
2796                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2797                         tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2798 #ifdef EXTRACHECKS
2799                 BUG_ON(in_irq());
2800 #endif
2801                 if (!irq)
2802                         local_bh_disable();
2803                 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd, 
2804                         tgt_dev);
2805                 if (!irq)
2806                         local_bh_enable();
2807                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2808                       tgt_dev->acg_dev->dev->handler->name, res);
2809                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2810                         mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ? 
2811                                                 SCST_MGMT_STATUS_SUCCESS :
2812                                                 SCST_MGMT_STATUS_FAILED;
2813                 }
2814         }
2815         return res;
2816 }
2817
2818 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2819 {
2820         switch(mgmt_fn) {
2821                 case SCST_ABORT_TASK:
2822                 case SCST_ABORT_TASK_SET:
2823                 case SCST_CLEAR_TASK_SET:
2824                         return 1;
2825                 default:
2826                         return 0;
2827         }
2828 }
2829
2830 /* 
2831  * Called under scst_list_lock and IRQ off (to protect cmd
2832  * from being destroyed) + BHs also off
2833  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2834  */
2835 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2836         int other_ini, int call_dev_task_mgmt_fn)
2837 {
2838         TRACE_ENTRY();
2839
2840         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2841
2842         if (other_ini) {
2843                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2844                 smp_mb__after_set_bit();
2845         }
2846         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2847         smp_mb__after_set_bit();
2848
2849         if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2850                  scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2851
2852         if (mcmd) {
2853                 int defer;
2854                 if (cmd->tgtt->tm_sync_reply)
2855                         defer = 1;
2856                 else {
2857                         if (scst_is_strict_mgmt_fn(mcmd->fn))
2858                                 defer = test_bit(SCST_CMD_EXECUTING,
2859                                         &cmd->cmd_flags);
2860                         else
2861                                 defer = test_bit(SCST_CMD_XMITTING,
2862                                         &cmd->cmd_flags);
2863                 }
2864
2865                 if (defer) {
2866                         /*
2867                          * Delay the response until the command's finish in
2868                          * order to guarantee that "no further responses from
2869                          * the task are sent to the SCSI initiator port" after
2870                          * response from the TM function is sent (SAM)
2871                          */
2872                         TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2873                                 "xmitted (state %d), deferring ABORT...", cmd,
2874                                 cmd->tag, cmd->state);
2875 #ifdef EXTRACHECKS
2876                         if (cmd->mgmt_cmnd) {
2877                                 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2878                                         "has non-NULL mgmt_cmnd %p!!! Current "
2879                                         "mcmd %p\n", cmd, cmd->tag, cmd->state,
2880                                         cmd->mgmt_cmnd, mcmd);
2881                         }
2882 #endif
2883                         BUG_ON(cmd->mgmt_cmnd);
2884                         mcmd->cmd_wait_count++;
2885                         cmd->mgmt_cmnd = mcmd;
2886                 }
2887         }
2888
2889         tm_dbg_release_cmd(cmd);
2890
2891         TRACE_EXIT();
2892         return;
2893 }
2894
2895 /* Called under scst_list_lock and IRQ off */
2896 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2897 {
2898         int res;
2899         if (mcmd->cmd_wait_count != 0) {
2900                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2901                         "wait", mcmd->cmd_wait_count);
2902                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2903                 res = -1;
2904         } else {
2905                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2906                 res = 0;
2907         }
2908         mcmd->completed = 1;
2909         return res;
2910 }
2911
2912 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2913 {
2914         struct scst_device *dev;
2915         int wake = 0;
2916
2917         TRACE_ENTRY();
2918
2919         if (!scst_mutex_held)
2920                 down(&scst_mutex);
2921
2922         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2923                 struct scst_cmd *cmd, *tcmd;
2924                 spin_lock_bh(&dev->dev_lock);
2925                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2926                                         blocked_cmd_list_entry) {
2927                         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2928                                 list_del(&cmd->blocked_cmd_list_entry);
2929                                 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2930                                         "to active cmd list", cmd);
2931                                 spin_lock_irq(&scst_list_lock);
2932                                 list_move_tail(&cmd->cmd_list_entry,
2933                                         &scst_active_cmd_list);
2934                                 spin_unlock_irq(&scst_list_lock);
2935                                 wake = 1;
2936                         }
2937                 }
2938                 spin_unlock_bh(&dev->dev_lock);
2939         }
2940
2941         if (!scst_mutex_held)
2942                 up(&scst_mutex);
2943
2944         if (wake)
2945                 wake_up(&scst_list_waitQ);
2946
2947         TRACE_EXIT();
2948         return;
2949 }
2950
2951 /* Returns 0 if the command processing should be continued, <0 otherwise */
2952 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2953         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2954 {
2955         struct scst_cmd *cmd;
2956         struct scst_session *sess = tgt_dev->sess;
2957
2958         TRACE_ENTRY();
2959
2960         local_bh_disable();
2961         spin_lock_irq(&scst_list_lock);
2962
2963         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2964         list_for_each_entry(cmd, &sess->search_cmd_list, 
2965                         search_cmd_list_entry) {
2966                 if ((cmd->tgt_dev == NULL) && 
2967                     (cmd->lun == tgt_dev->acg_dev->lun))
2968                         continue;
2969                 if (cmd->tgt_dev != tgt_dev)
2970                         continue;
2971                 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2972         }
2973         spin_unlock_irq(&scst_list_lock);
2974         local_bh_enable();
2975
2976         scst_unblock_aborted_cmds(scst_mutex_held);
2977
2978         TRACE_EXIT();
2979         return;
2980 }
2981
2982 /* Returns 0 if the command processing should be continued, <0 otherwise */
2983 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2984 {
2985         int res;
2986         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2987         struct scst_device *dev = tgt_dev->acg_dev->dev;
2988
2989         TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2990                 tgt_dev->acg_dev->lun, mcmd);
2991
2992         spin_lock_bh(&dev->dev_lock);
2993         __scst_block_dev(dev);
2994         spin_unlock_bh(&dev->dev_lock);
2995
2996         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2997         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2998
2999         res = scst_set_mcmd_next_state(mcmd);
3000
3001         TRACE_EXIT_RES(res);
3002         return res;
3003 }
3004
3005 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
3006 {
3007         /*
3008          * No need for special protection for SCST_FLAG_TM_ACTIVE, since
3009          * we could be called from the only thread.
3010          */
3011         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
3012                 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
3013                         mcmd);
3014                 if (!locked)
3015                         spin_lock_irq(&scst_list_lock);
3016                 list_move_tail(&mcmd->mgmt_cmd_list_entry, 
3017                         &scst_delayed_mgmt_cmd_list);
3018                 if (!locked)
3019                         spin_unlock_irq(&scst_list_lock);
3020                 return -1;
3021         } else {
3022                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3023                 return 0;
3024         }
3025 }
3026
3027 /* Returns 0 if the command processing should be continued, 
3028  * >0, if it should be requeued, <0 otherwise */
3029 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3030 {
3031         int res = 0;
3032
3033         TRACE_ENTRY();
3034
3035         res = scst_check_delay_mgmt_cmd(mcmd, 1);
3036         if (res != 0)
3037                 goto out;
3038
3039         if (mcmd->fn == SCST_ABORT_TASK) {
3040                 struct scst_session *sess = mcmd->sess;
3041                 struct scst_cmd *cmd;
3042
3043                 local_bh_disable();
3044                 spin_lock_irq(&scst_list_lock);
3045                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3046                 if (cmd == NULL) {
3047                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3048                                 "tag %d not found", mcmd->tag);
3049                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3050                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3051                 } else {
3052                         TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
3053                                 "aborting it", cmd, mcmd->tag, cmd->sn);
3054                         mcmd->cmd_to_abort = cmd;
3055                         scst_abort_cmd(cmd, mcmd, 0, 1);
3056                         res = scst_set_mcmd_next_state(mcmd);
3057                         mcmd->cmd_to_abort = NULL; /* just in case */
3058                 }
3059                 spin_unlock_irq(&scst_list_lock);
3060                 local_bh_enable();
3061         } else {
3062                 int rc;
3063                 rc = scst_mgmt_translate_lun(mcmd);
3064                 if (rc < 0) {
3065                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3066                                 "found", (uint64_t)mcmd->lun);
3067                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3068                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3069                 } else if (rc == 0)
3070                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
3071                 else
3072                         res = rc;
3073         }
3074
3075 out:
3076         TRACE_EXIT_RES(res);
3077         return res;
3078 }
3079
3080 /* Returns 0 if the command processing should be continued, <0 otherwise */
3081 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3082 {
3083         int res, rc;
3084         struct scst_device *dev, *d;
3085         struct scst_tgt_dev *tgt_dev;
3086         int cont, c;
3087         LIST_HEAD(host_devs);
3088
3089         TRACE_ENTRY();
3090
3091         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3092                 mcmd, mcmd->sess->sess_cmd_count);
3093
3094         down(&scst_mutex);
3095
3096         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3097                 int found = 0;
3098
3099                 spin_lock_bh(&dev->dev_lock);
3100                 __scst_block_dev(dev);
3101                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3102                 spin_unlock_bh(&dev->dev_lock);
3103
3104                 cont = 0;
3105                 c = 0;
3106                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3107                         dev_tgt_dev_list_entry) 
3108                 {
3109                         cont = 1;
3110                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3111                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3112                                 c = 1;
3113                         else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3114                                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3115                 }
3116                 if (cont && !c)
3117                         continue;
3118                 
3119                 if (dev->scsi_dev == NULL)
3120                         continue;
3121
3122                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3123                         if (dev->scsi_dev->host->host_no ==
3124                                     d->scsi_dev->host->host_no) 
3125                         {
3126                                 found = 1;
3127                                 break;
3128                         }
3129                 }
3130                 if (!found)
3131                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3132         }
3133
3134         /*
3135          * We suppose here that for all commands that already on devices
3136          * on/after scsi_reset_provider() completion callbacks will be called.
3137          */
3138
3139         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3140                 /* dev->scsi_dev must be non-NULL here */
3141                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3142                       dev->scsi_dev->host->host_no);
3143                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3144                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3145                       dev->scsi_dev->host->host_no,
3146                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3147                 if (rc != SUCCESS) {
3148                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3149                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3150                 }
3151         }
3152
3153         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3154                 if (dev->scsi_dev != NULL)
3155                         dev->scsi_dev->was_reset = 0;
3156         }
3157
3158         up(&scst_mutex);
3159
3160         spin_lock_irq(&scst_list_lock);
3161         tm_dbg_task_mgmt("TARGET RESET");
3162         res = scst_set_mcmd_next_state(mcmd);
3163         spin_unlock_irq(&scst_list_lock);
3164
3165         TRACE_EXIT_RES(res);
3166         return res;
3167 }
3168
3169 /* Returns 0 if the command processing should be continued, <0 otherwise */
3170 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3171 {
3172         int res, rc;
3173         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3174         struct scst_device *dev = tgt_dev->acg_dev->dev;
3175
3176         TRACE_ENTRY();
3177
3178         TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3179                 mcmd);
3180
3181         spin_lock_bh(&dev->dev_lock);
3182         __scst_block_dev(dev);
3183         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3184         spin_unlock_bh(&dev->dev_lock);
3185
3186         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3187         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3188                 goto out_tm_dbg;
3189
3190         if (dev->scsi_dev != NULL) {
3191                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3192                       dev->scsi_dev->host->host_no);
3193                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3194                 if (rc != SUCCESS)
3195                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3196                 dev->scsi_dev->was_reset = 0;
3197         }
3198
3199 out_tm_dbg:
3200         spin_lock_irq(&scst_list_lock);
3201         tm_dbg_task_mgmt("LUN RESET");
3202         res = scst_set_mcmd_next_state(mcmd);
3203         spin_unlock_irq(&scst_list_lock);
3204
3205         TRACE_EXIT_RES(res);
3206         return res;
3207 }
3208
3209 /* Returns 0 if the command processing should be continued, <0 otherwise */
3210 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3211         int nexus_loss)
3212 {
3213         int res;
3214         struct scst_session *sess = mcmd->sess;
3215         struct scst_tgt_dev *tgt_dev;
3216
3217         TRACE_ENTRY();
3218
3219         if (nexus_loss) {
3220                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3221                         mcmd);
3222         } else {
3223                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3224                         mcmd);
3225         }
3226
3227         down(&scst_mutex);
3228         list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3229                 sess_tgt_dev_list_entry) 
3230         {
3231                 struct scst_device *dev = tgt_dev->acg_dev->dev;
3232                 int rc;
3233
3234                 spin_lock_bh(&dev->dev_lock);
3235                 __scst_block_dev(dev);
3236                 spin_unlock_bh(&dev->dev_lock);
3237
3238                 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3239                 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3240                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3241
3242                 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3243                 if (nexus_loss)
3244                         scst_reset_tgt_dev(tgt_dev, 1);
3245         }
3246         up(&scst_mutex);
3247
3248         spin_lock_irq(&scst_list_lock);
3249         res = scst_set_mcmd_next_state(mcmd);
3250         spin_unlock_irq(&scst_list_lock);
3251
3252         TRACE_EXIT_RES(res);
3253         return res;
3254 }
3255
3256 /* Returns 0 if the command processing should be continued, <0 otherwise */
3257 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3258         int nexus_loss)
3259 {
3260         int res;
3261         struct scst_tgt *tgt = mcmd->sess->tgt;
3262         struct scst_session *sess;
3263         struct scst_device *dev;
3264         struct scst_tgt_dev *tgt_dev;
3265
3266         TRACE_ENTRY();
3267
3268         if (nexus_loss) {
3269                 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3270                         mcmd);
3271         } else {
3272                 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3273                         mcmd);
3274         }
3275
3276         down(&scst_mutex);
3277
3278         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3279                 spin_lock_bh(&dev->dev_lock);
3280                 __scst_block_dev(dev);
3281                 spin_unlock_bh(&dev->dev_lock);
3282         }
3283
3284         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3285                 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3286                         sess_tgt_dev_list_entry) 
3287                 {
3288                         int rc;
3289
3290                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3291                         if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3292                                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3293
3294                         __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3295                         if (nexus_loss)
3296                                 scst_reset_tgt_dev(tgt_dev, 1);
3297                 }
3298         }
3299
3300         up(&scst_mutex);
3301
3302         spin_lock_irq(&scst_list_lock);
3303         res = scst_set_mcmd_next_state(mcmd);
3304         spin_unlock_irq(&scst_list_lock);
3305
3306         TRACE_EXIT_RES(res);
3307         return res;
3308 }
3309
3310 /* Returns 0 if the command processing should be continued, <0 otherwise */
3311 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3312 {
3313         int res = 0;
3314
3315         TRACE_ENTRY();
3316
3317         mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3318
3319         switch (mcmd->fn) {
3320         case SCST_ABORT_TASK_SET:
3321         case SCST_CLEAR_TASK_SET:
3322                 res = scst_abort_task_set(mcmd);
3323                 break;
3324
3325         case SCST_LUN_RESET:
3326                 res = scst_lun_reset(mcmd);
3327                 break;
3328
3329         case SCST_TARGET_RESET:
3330                 res = scst_target_reset(mcmd);
3331                 break;
3332
3333         case SCST_ABORT_ALL_TASKS_SESS:
3334                 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3335                 break;
3336
3337         case SCST_NEXUS_LOSS_SESS:
3338                 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3339                 break;
3340
3341         case SCST_ABORT_ALL_TASKS:
3342                 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3343                 break;
3344
3345         case SCST_NEXUS_LOSS:
3346                 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3347                 break;
3348
3349         case SCST_CLEAR_ACA:
3350                 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3351                 /* Nothing to do (yet) */
3352                 break;
3353
3354         default:
3355                 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3356                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3357                 break;
3358         }
3359
3360         TRACE_EXIT_RES(res);
3361         return res;
3362 }
3363
3364 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3365 {
3366         struct scst_device *dev;
3367         struct scst_tgt_dev *tgt_dev;
3368
3369         TRACE_ENTRY();
3370
3371         clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3372         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3373                 struct scst_mgmt_cmd *m;
3374                 spin_lock_irq(&scst_list_lock);
3375                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3376                                 mgmt_cmd_list_entry);
3377                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3378                         "cmd list", m);
3379                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3380                 spin_unlock_irq(&scst_list_lock);
3381         }
3382
3383         mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3384         if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3385                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3386
3387         if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3388                 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3389                       mcmd->sess->tgt->tgtt->name);
3390                 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3391                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3392                       mcmd->sess->tgt->tgtt->name);
3393         }
3394
3395         switch (mcmd->fn) {
3396         case SCST_ABORT_TASK_SET:
3397         case SCST_CLEAR_TASK_SET:
3398         case SCST_LUN_RESET:
3399                 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3400                 break;
3401
3402         case SCST_TARGET_RESET:
3403         case SCST_ABORT_ALL_TASKS:
3404         case SCST_NEXUS_LOSS:
3405                 down(&scst_mutex);
3406                 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3407                         scst_unblock_dev(dev);
3408                 }
3409                 up(&scst_mutex);
3410                 break;
3411
3412         case SCST_NEXUS_LOSS_SESS:
3413         case SCST_ABORT_ALL_TASKS_SESS:
3414                 down(&scst_mutex);
3415                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3416                                 sess_tgt_dev_list_entry) {
3417                         scst_unblock_dev(tgt_dev->acg_dev->dev);
3418                 }
3419                 up(&scst_mutex);
3420                 break;
3421
3422         case SCST_CLEAR_ACA:
3423         default:
3424                 break;
3425         }
3426
3427         mcmd->tgt_priv = NULL;
3428
3429         TRACE_EXIT();
3430         return;
3431 }
3432
3433 /* Returns >0, if cmd should be requeued */
3434 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3435 {
3436         int res = 0;
3437
3438         TRACE_ENTRY();
3439
3440         TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3441
3442         while (1) {
3443                 switch (mcmd->state) {
3444                 case SCST_MGMT_CMD_STATE_INIT:
3445                         res = scst_mgmt_cmd_init(mcmd);
3446                         if (res)
3447                                 goto out;
3448                         break;
3449
3450                 case SCST_MGMT_CMD_STATE_READY:
3451                         if (scst_mgmt_cmd_exec(mcmd))
3452                                 goto out;
3453                         break;
3454
3455                 case SCST_MGMT_CMD_STATE_DONE:
3456                         scst_mgmt_cmd_send_done(mcmd);
3457                         break;
3458
3459                 case SCST_MGMT_CMD_STATE_FINISHED:
3460                         goto out_free;
3461
3462 #ifdef EXTRACHECKS
3463                 case SCST_MGMT_CMD_STATE_EXECUTING:
3464                         BUG();
3465 #endif
3466
3467                 default:
3468                         PRINT_ERROR_PR("Unknown state %d of management command",
3469                                     mcmd->state);
3470                         res = -1;
3471                         goto out_free;
3472                 }
3473         }
3474
3475 out:
3476         TRACE_EXIT_RES(res);
3477         return res;
3478
3479 out_free:
3480         scst_free_mgmt_cmd(mcmd, 1);
3481         goto out;
3482 }
3483
3484 static inline int test_mgmt_cmd_list(void)
3485 {
3486         int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3487                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3488                   test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3489         return res;
3490 }
3491
3492 int scst_mgmt_cmd_thread(void *arg)
3493 {
3494         struct scst_mgmt_cmd *mcmd;
3495
3496         TRACE_ENTRY();
3497
3498         daemonize("scsi_tgt_mc");
3499         recalc_sigpending();
3500         current->flags |= PF_NOFREEZE;
3501
3502         spin_lock_irq(&scst_list_lock);
3503         while (1) {
3504                 wait_queue_t wait;
3505                 init_waitqueue_entry(&wait, current);
3506
3507                 if (!test_mgmt_cmd_list()) {
3508                         add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3509                                                  &wait);
3510                         for (;;) {
3511                                 set_current_state(TASK_INTERRUPTIBLE);
3512                                 if (test_mgmt_cmd_list())
3513                                         break;
3514                                 spin_unlock_irq(&scst_list_lock);
3515                                 schedule();
3516                                 spin_lock_irq(&scst_list_lock);
3517                         }
3518                         set_current_state(TASK_RUNNING);
3519                         remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3520                 }
3521
3522                 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3523                        !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3524                 {
3525                         int rc;
3526                         mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3527                                           typeof(*mcmd), mgmt_cmd_list_entry);
3528                         TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3529                               mcmd);
3530                         list_move_tail(&mcmd->mgmt_cmd_list_entry,
3531                                        &scst_mgmt_cmd_list);
3532                         spin_unlock_irq(&scst_list_lock);
3533                         rc = scst_process_mgmt_cmd(mcmd);
3534                         spin_lock_irq(&scst_list_lock);
3535                         if (rc > 0) {
3536                                 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3537                                         "of active mgmt cmd list", mcmd);
3538                                 list_move(&mcmd->mgmt_cmd_list_entry,
3539                                        &scst_active_mgmt_cmd_list);
3540                         }
3541                 }
3542
3543                 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3544                     list_empty(&scst_active_mgmt_cmd_list)) 
3545                 {
3546                         break;
3547                 }
3548         }
3549         spin_unlock_irq(&scst_list_lock);
3550
3551         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3552                 smp_mb__after_atomic_dec();
3553                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3554                 up(scst_shutdown_mutex);
3555         }
3556
3557         TRACE_EXIT();
3558         return 0;
3559 }
3560
3561 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3562         *sess, int fn, int atomic, void *tgt_priv)
3563 {
3564         struct scst_mgmt_cmd *mcmd = NULL;
3565
3566         TRACE_ENTRY();
3567
3568         if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3569                 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3570                             "(target %s)", sess->tgt->tgtt->name);
3571                 goto out;
3572         }
3573
3574         mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3575         if (mcmd == NULL)
3576                 goto out;
3577
3578         mcmd->sess = sess;
3579         mcmd->fn = fn;
3580         mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3581         mcmd->tgt_priv = tgt_priv;
3582
3583 out:
3584         TRACE_EXIT();
3585         return mcmd;
3586 }
3587
3588 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3589         struct scst_mgmt_cmd *mcmd)
3590 {
3591         unsigned long flags;
3592         int res = 0;
3593
3594         TRACE_ENTRY();
3595
3596         scst_sess_get(sess);
3597
3598         spin_lock_irqsave(&scst_list_lock, flags);
3599
3600         sess->sess_cmd_count++;
3601
3602 #ifdef EXTRACHECKS
3603         if (unlikely(sess->shutting_down)) {
3604                 PRINT_ERROR_PR("%s",
3605                         "New mgmt cmd while shutting down the session");
3606                 BUG();
3607         }
3608 #endif
3609
3610         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3611                 switch(sess->init_phase) {
3612                 case SCST_SESS_IPH_INITING:
3613                         TRACE_DBG("Adding mcmd %p to init deferred mcmd list", 
3614                                 mcmd);
3615                         list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3616                                 &sess->init_deferred_mcmd_list);
3617                         goto out_unlock;
3618                 case SCST_SESS_IPH_SUCCESS:
3619                         break;
3620                 case SCST_SESS_IPH_FAILED:
3621                         res = -1;
3622                         goto out_unlock;
3623                 default:
3624                         BUG();
3625                 }
3626         }
3627
3628         TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3629         list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3630
3631         spin_unlock_irqrestore(&scst_list_lock, flags);
3632
3633         wake_up(&scst_mgmt_cmd_list_waitQ);
3634
3635 out:
3636         TRACE_EXIT();
3637         return res;