b90bf64b36afe0aa2e627d79727a1ba7e852a503
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28
29 #include "scst_debug.h"
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static int scst_do_job_init(struct list_head *init_cmd_list);
34
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
36         int left_locked);
37
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39         struct scst_mgmt_cmd *mcmd);
40
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43         unsigned long *pflags, int left_locked)
44 {
45         int res;
46
47         TRACE_ENTRY();
48
49         TRACE_DBG("Moving cmd %p to cmd list", cmd);
50         list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
51
52         /* This is an inline func., so unneeded code will be optimized out */
53         if (pflags)
54                 spin_unlock_irqrestore(&scst_list_lock, *pflags);
55         else
56                 spin_unlock_irq(&scst_list_lock);
57
58         res = __scst_process_active_cmd(cmd, context, left_locked);
59
60         TRACE_EXIT_RES(res);
61         return res;
62 }
63
64 static inline void scst_schedule_tasklet(void)
65 {
66         struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
67
68 #if 0 /* Looks like #else is better for performance */
69         if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
70                 tasklet_schedule(t);
71         else {
72                 /* 
73                  * We suppose that other CPU(s) are rather idle, so we
74                  * ask one of them to help
75                  */
76                 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77                         "instead", smp_processor_id());
78                 wake_up(&scst_list_waitQ);
79         }
80 #else
81         tasklet_schedule(t);
82 #endif
83 }
84
85 /* 
86  * Must not been called in parallel with scst_unregister_session() for the 
87  * same sess
88  */
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90                              const uint8_t *lun, int lun_len,
91                              const uint8_t *cdb, int cdb_len, int atomic)
92 {
93         struct scst_cmd *cmd;
94
95         TRACE_ENTRY();
96
97 #ifdef EXTRACHECKS
98         if (unlikely(sess->shutting_down)) {
99                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
100                 BUG();
101         }
102 #endif
103
104         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
105         if (cmd == NULL)
106                 goto out;
107
108         cmd->sess = sess;
109         cmd->tgt = sess->tgt;
110         cmd->tgtt = sess->tgt->tgtt;
111         cmd->state = SCST_CMD_STATE_INIT_WAIT;
112
113         /* 
114          * For both wrong lun and CDB defer the error reporting for
115          * scst_cmd_init_done()
116          */
117
118         cmd->lun = scst_unpack_lun(lun, lun_len);
119
120         if (cdb_len <= MAX_COMMAND_SIZE) {
121                 memcpy(cmd->cdb, cdb, cdb_len);
122                 cmd->cdb_len = cdb_len;
123         }
124
125         TRACE_DBG("cmd %p, sess %p", cmd, sess);
126         scst_sess_get(sess);
127
128 out:
129         TRACE_EXIT();
130         return cmd;
131 }
132
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
134 {
135         int res = 0;
136         unsigned long flags = 0;
137         struct scst_session *sess = cmd->sess;
138
139         TRACE_ENTRY();
140
141         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142         TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag, 
143                 (uint64_t)cmd->lun, cmd->cdb_len);
144         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145                 cmd->cdb, cmd->cdb_len);
146
147         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
149         {
150                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
152                         cmd->tgtt->name);
153                 pref_context = SCST_CONTEXT_TASKLET;
154         }
155
156         spin_lock_irqsave(&scst_list_lock, flags);
157
158         /* Let's make it here, this will save us a lock or atomic */
159         sess->sess_cmd_count++;
160
161         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
162
163         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164                 switch(sess->init_phase) {
165                 case SCST_SESS_IPH_SUCCESS:
166                         break;
167                 case SCST_SESS_IPH_INITING:
168                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169                         list_add_tail(&cmd->cmd_list_entry, 
170                                 &sess->init_deferred_cmd_list);
171                         goto out_unlock_flags;
172                 case SCST_SESS_IPH_FAILED:
173                         scst_set_busy(cmd);
174                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
175                         TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176                         list_add_tail(&cmd->cmd_list_entry, 
177                                 &scst_active_cmd_list);
178                         goto active;
179                 default:
180                         BUG();
181                 }
182         }
183
184         if (unlikely(cmd->lun == (lun_t)-1)) {
185                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186                 scst_set_cmd_error(cmd,
187                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
191                 goto active;
192         }
193
194         if (unlikely(cmd->cdb_len == 0)) {
195                 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196                 scst_set_cmd_error(cmd,
197                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
201                 goto active;
202         }
203
204         cmd->state = SCST_CMD_STATE_INIT;
205
206         TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207         list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
208
209         switch (pref_context) {
210         case SCST_CONTEXT_DIRECT:
211         case SCST_CONTEXT_DIRECT_ATOMIC:
212                 res = scst_do_job_init(&scst_init_cmd_list);
213                 if (res > 0)
214                         goto out_unlock_flags;
215                 break;
216
217         case SCST_CONTEXT_THREAD:
218                 goto out_thread_unlock_flags;
219
220         case SCST_CONTEXT_TASKLET:
221                 scst_schedule_tasklet();
222                 goto out_unlock_flags;
223
224         default:
225                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
226                             pref_context);
227                 goto out_thread_unlock_flags;
228         }
229
230 active:
231         switch (pref_context) {
232         case SCST_CONTEXT_DIRECT:
233         case SCST_CONTEXT_DIRECT_ATOMIC:
234                 scst_process_active_cmd(cmd, pref_context, &flags, 0);
235                 break;
236
237         case SCST_CONTEXT_THREAD:
238                 goto out_thread_unlock_flags;
239
240         case SCST_CONTEXT_TASKLET:
241                 scst_schedule_tasklet();
242                 goto out_unlock_flags;
243
244         default:
245                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
246                             pref_context);
247                 goto out_thread_unlock_flags;
248         }
249
250 out:
251         TRACE_EXIT();
252         return;
253
254 out_unlock_flags:
255         spin_unlock_irqrestore(&scst_list_lock, flags);
256         goto out;
257
258 out_thread_unlock_flags:
259         cmd->non_atomic_only = 1;
260         spin_unlock_irqrestore(&scst_list_lock, flags);
261         wake_up(&scst_list_waitQ);
262         goto out;
263 }
264
265 static int scst_parse_cmd(struct scst_cmd *cmd)
266 {
267         int res = SCST_CMD_STATE_RES_CONT_SAME;
268         int state;
269         struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270         struct scst_device *dev = cmd->dev;
271         struct scst_info_cdb cdb_info;
272         int atomic = scst_cmd_atomic(cmd);
273         int set_dir = 1;
274
275         TRACE_ENTRY();
276
277         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278                 TRACE_DBG("ABORTED set, returning ABORTED "
279                         "for cmd %p", cmd);
280                 goto out_xmit;
281         }
282
283         if (atomic && !dev->handler->parse_atomic) {
284                 TRACE_DBG("Dev handler %s parse() can not be "
285                       "called in atomic context, rescheduling to the thread",
286                       dev->handler->name);
287                 res = SCST_CMD_STATE_RES_NEED_THREAD;
288                 goto out;
289         }
290
291         /*
292          * Expected transfer data supplied by the SCSI transport via the
293          * target driver are untrusted, so we prefer to fetch them from CDB.
294          * Additionally, not all transports support supplying the expected
295          * transfer data.
296          */
297
298         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
299                         &cdb_info) != 0)) 
300         {
301                 static int t;
302                 if (t < 10) {
303                         t++;
304                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305                                 "Should you update scst_scsi_op_table?",
306                                 cmd->cdb[0], dev->handler->name);
307                 }
308                 if (scst_cmd_is_expected_set(cmd)) {
309                         TRACE(TRACE_MINOR, "Using initiator supplied values: "
310                                 "direction %d, transfer_len %d",
311                                 cmd->expected_data_direction,
312                                 cmd->expected_transfer_len);
313                         cmd->data_direction = cmd->expected_data_direction;
314                         cmd->bufflen = cmd->expected_transfer_len;
315                         /* Restore (most probably) lost CDB length */
316                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317                         if (cmd->cdb_len == -1) {
318                                 PRINT_ERROR_PR("Unable to get CDB length for "
319                                         "opcode 0x%02x. Returning INVALID "
320                                         "OPCODE", cmd->cdb[0]);
321                                 scst_set_cmd_error(cmd,
322                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
323                                 goto out_xmit;
324                         }
325                 }
326                 else {
327                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328                              "target %s not supplied expected values. "
329                              "Returning INVALID OPCODE.", cmd->cdb[0], 
330                              dev->handler->name, cmd->tgtt->name);
331                         scst_set_cmd_error(cmd,
332                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
333                         goto out_xmit;
334                 }
335         } else {
336                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337                         "set %s), transfer_len=%d (expected len %d), flags=%d",
338                         cdb_info.op_name, cdb_info.direction,
339                         cmd->expected_data_direction,
340                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341                         cdb_info.transfer_len, cmd->expected_transfer_len,
342                         cdb_info.flags);
343
344                 /* Restore (most probably) lost CDB length */
345                 cmd->cdb_len = cdb_info.cdb_len;
346
347                 cmd->data_direction = cdb_info.direction;
348                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349                         cmd->bufflen = cdb_info.transfer_len;
350                 /* else cmd->bufflen remained as it was inited in 0 */
351         }
352
353         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355                             "(opcode 0x%02x)", cmd->cdb[0]);
356                 scst_set_cmd_error(cmd,
357                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
358                 goto out_xmit;
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362                 PRINT_ERROR_PR("Linked commands are not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (likely(!scst_is_cmd_local(cmd))) {
370                 TRACE_DBG("Calling dev handler %s parse(%p)",
371                       dev->handler->name, cmd);
372                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373                 state = dev->handler->parse(cmd, &cdb_info);
374                 TRACE_DBG("Dev handler %s parse() returned %d",
375                         dev->handler->name, state);
376
377                 if (state == SCST_CMD_STATE_DEFAULT)
378                         state = SCST_CMD_STATE_PREPARE_SPACE;
379         }
380         else
381                 state = SCST_CMD_STATE_PREPARE_SPACE;
382
383         if (scst_cmd_is_expected_set(cmd)) {
384                 if (cmd->expected_transfer_len < cmd->bufflen) {
385                         TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
386                                 "cmd->bufflen(%d), using expected_transfer_len "
387                                 "instead", cmd->expected_transfer_len,
388                                 cmd->bufflen);
389                         cmd->bufflen = cmd->expected_transfer_len;
390                 }
391         }
392
393         if (cmd->data_len == -1)
394                 cmd->data_len = cmd->bufflen;
395
396 #ifdef EXTRACHECKS
397         if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
398                 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
399                         (state != SCST_CMD_STATE_DEV_PARSE)) ||
400                     ((cmd->bufflen != 0) && 
401                         (cmd->data_direction == SCST_DATA_NONE)) ||
402                     ((cmd->bufflen == 0) && 
403                         (cmd->data_direction != SCST_DATA_NONE)) ||
404                     ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
405                         (state > SCST_CMD_STATE_PREPARE_SPACE))) 
406                 {
407                         PRINT_ERROR_PR("Dev handler %s parse() returned "
408                                        "invalid cmd data_direction %d, "
409                                        "bufflen %zd or state %d (opcode 0x%x)",
410                                        dev->handler->name, 
411                                        cmd->data_direction, cmd->bufflen,
412                                        state, cmd->cdb[0]);
413                         goto out_error;
414                 }
415         }
416 #endif
417
418         switch (state) {
419         case SCST_CMD_STATE_PREPARE_SPACE:
420         case SCST_CMD_STATE_DEV_PARSE:
421         case SCST_CMD_STATE_RDY_TO_XFER:
422         case SCST_CMD_STATE_SEND_TO_MIDLEV:
423         case SCST_CMD_STATE_DEV_DONE:
424         case SCST_CMD_STATE_XMIT_RESP:
425         case SCST_CMD_STATE_FINISHED:
426                 cmd->state = state;
427                 res = SCST_CMD_STATE_RES_CONT_SAME;
428                 break;
429
430         case SCST_CMD_STATE_REINIT:
431                 cmd->tgt_dev_saved = tgt_dev_saved;
432                 cmd->state = state;
433                 res = SCST_CMD_STATE_RES_RESTART;
434                 set_dir = 0;
435                 break;
436
437         case SCST_CMD_STATE_NEED_THREAD_CTX:
438                 TRACE_DBG("Dev handler %s parse() requested thread "
439                       "context, rescheduling", dev->handler->name);
440                 res = SCST_CMD_STATE_RES_NEED_THREAD;
441                 set_dir = 0;
442                 break;
443
444         default:
445                 if (state >= 0) {
446                         PRINT_ERROR_PR("Dev handler %s parse() returned "
447                              "invalid cmd state %d (opcode %d)", 
448                              dev->handler->name, state, cmd->cdb[0]);
449                 } else {
450                         PRINT_ERROR_PR("Dev handler %s parse() returned "
451                                 "error %d (opcode %d)", dev->handler->name, 
452                                 state, cmd->cdb[0]);
453                 }
454                 goto out_error;
455         }
456
457         if ((cmd->resp_data_len == -1) && set_dir) {
458                 if (cmd->data_direction == SCST_DATA_READ)
459                         cmd->resp_data_len = cmd->bufflen;
460                 else
461                          cmd->resp_data_len = 0;
462         }
463         
464 out:
465         TRACE_EXIT_HRES(res);
466         return res;
467
468 out_error:
469         /* dev_done() will be called as part of the regular cmd's finish */
470         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
471         cmd->state = SCST_CMD_STATE_DEV_DONE;
472         res = SCST_CMD_STATE_RES_CONT_SAME;
473         goto out;
474
475 out_xmit:
476         cmd->state = SCST_CMD_STATE_XMIT_RESP;
477         res = SCST_CMD_STATE_RES_CONT_SAME;
478         goto out;
479 }
480
481 void scst_cmd_mem_work_fn(void *p)
482 {
483         TRACE_ENTRY();
484
485         spin_lock_bh(&scst_cmd_mem_lock);
486
487         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
488         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
489                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
490                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
491         } else {
492                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
493                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
494         }
495         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
496
497         spin_unlock_bh(&scst_cmd_mem_lock);
498
499         TRACE_EXIT();
500         return;
501 }
502
503 int scst_check_mem(struct scst_cmd *cmd)
504 {
505         int res = 0;
506
507         TRACE_ENTRY();
508
509         if (cmd->mem_checked)
510                 goto out;
511
512         spin_lock_bh(&scst_cmd_mem_lock);
513
514         scst_cur_cmd_mem += cmd->bufflen;
515         cmd->mem_checked = 1;
516         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
517                 goto out_unlock;
518
519         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
520                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
521                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
522                 (cmd->sess->initiator_name[0] == '\0') ?
523                   "Anonymous" : cmd->sess->initiator_name,
524                 scst_cur_max_cmd_mem >> 10);
525
526         scst_cur_cmd_mem -= cmd->bufflen;
527         cmd->mem_checked = 0;
528         scst_set_busy(cmd);
529         cmd->state = SCST_CMD_STATE_XMIT_RESP;
530         res = 1;
531
532 out_unlock:
533         spin_unlock_bh(&scst_cmd_mem_lock);
534
535 out:
536         TRACE_EXIT_RES(res);
537         return res;
538 }
539
540 static void scst_low_cur_max_cmd_mem(void)
541 {
542         TRACE_ENTRY();
543
544         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
545                 cancel_delayed_work(&scst_cmd_mem_work);
546                 flush_scheduled_work();
547                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
548         }
549
550         spin_lock_bh(&scst_cmd_mem_lock);
551
552         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
553                                 (scst_cur_cmd_mem >> 2);
554         if (scst_cur_max_cmd_mem < 16*1024*1024)
555                 scst_cur_max_cmd_mem = 16*1024*1024;
556
557         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
558                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
559                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
560                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
561         }
562
563         spin_unlock_bh(&scst_cmd_mem_lock);
564
565         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
566
567         TRACE_EXIT();
568         return;
569 }
570
571 static int scst_prepare_space(struct scst_cmd *cmd)
572 {
573         int r, res = SCST_CMD_STATE_RES_CONT_SAME;
574
575         TRACE_ENTRY();
576
577         if (cmd->data_direction == SCST_DATA_NONE) {
578                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
579                 goto out;
580         }
581
582         r = scst_check_mem(cmd);
583         if (unlikely(r != 0))
584                 goto out;
585
586         if (cmd->data_buf_tgt_alloc) {
587                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
588                 r = cmd->tgtt->alloc_data_buf(cmd);
589                 cmd->data_buf_alloced = (r == 0);
590         } else
591                 r = scst_alloc_space(cmd);
592
593         if (r != 0) {
594                 if (scst_cmd_atomic(cmd)) {
595                         TRACE_MEM("%s", "Atomic memory allocation failed, "
596                               "rescheduling to the thread");
597                         res = SCST_CMD_STATE_RES_NEED_THREAD;
598                         goto out;
599                 } else
600                         goto out_no_space;
601         }
602
603         switch (cmd->data_direction) {
604         case SCST_DATA_WRITE:
605                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
606                 break;
607
608         default:
609                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
610                 break;
611         }
612
613 out:
614         TRACE_EXIT_HRES(res);
615         return res;
616
617 out_no_space:
618         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
619                 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
620         scst_low_cur_max_cmd_mem();
621         scst_set_busy(cmd);
622         cmd->state = SCST_CMD_STATE_DEV_DONE;
623         res = SCST_CMD_STATE_RES_CONT_SAME;
624         goto out;
625 }
626
627 /* No locks */
628 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
629 {
630         struct scst_tgt *tgt = cmd->sess->tgt;
631         int res = 0;
632         unsigned long flags;
633
634         TRACE_ENTRY();
635
636         spin_lock_irqsave(&tgt->tgt_lock, flags);
637         tgt->retry_cmds++;
638         smp_mb();
639         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
640               tgt->retry_cmds);
641         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
642                 /* At least one cmd finished, so try again */
643                 tgt->retry_cmds--;
644                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
645                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
646                       "retry_cmds=%d)", finished_cmds,
647                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
648                 res = -1;
649                 goto out_unlock_tgt;
650         }
651
652         TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
653         /* IRQ already off */
654         spin_lock(&scst_list_lock);
655         list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
656         spin_unlock(&scst_list_lock);
657
658         if (!tgt->retry_timer_active) {
659                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
660                 add_timer(&tgt->retry_timer);
661                 tgt->retry_timer_active = 1;
662         }
663
664 out_unlock_tgt:
665         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
666
667         TRACE_EXIT_RES(res);
668         return res;
669 }
670
671 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
672 {
673         int res, rc;
674         int atomic = scst_cmd_atomic(cmd);
675
676         TRACE_ENTRY();
677
678         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
679         {
680                 TRACE_DBG("ABORTED set, returning ABORTED for "
681                         "cmd %p", cmd);
682                 goto out_dev_done;
683         }
684
685         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
686                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
687                       "called in atomic context, rescheduling to the thread");
688                 res = SCST_CMD_STATE_RES_NEED_THREAD;
689                 goto out;
690         }
691
692         while (1) {
693                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
694
695                 res = SCST_CMD_STATE_RES_CONT_NEXT;
696                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
697
698                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
699 #ifdef DEBUG_RETRY
700                 if (((scst_random() % 100) == 75))
701                         rc = SCST_TGT_RES_QUEUE_FULL;
702                 else
703 #endif
704                         rc = cmd->tgtt->rdy_to_xfer(cmd);
705                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
706
707                 if (likely(rc == SCST_TGT_RES_SUCCESS))
708                         goto out;
709
710                 /* Restore the previous state */
711                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
712
713                 switch (rc) {
714                 case SCST_TGT_RES_QUEUE_FULL:
715                 {
716                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
717                                 break;
718                         else
719                                 continue;
720                 }
721
722                 case SCST_TGT_RES_NEED_THREAD_CTX:
723                 {
724                         TRACE_DBG("Target driver %s "
725                               "rdy_to_xfer() requested thread "
726                               "context, rescheduling", cmd->tgtt->name);
727                         res = SCST_CMD_STATE_RES_NEED_THREAD;
728                         break;
729                 }
730
731                 default:
732                         goto out_error_rc;
733                 }
734                 break;
735         }
736
737 out:
738         TRACE_EXIT_HRES(res);
739         return res;
740
741 out_error_rc:
742         if (rc == SCST_TGT_RES_FATAL_ERROR) {
743                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
744                      "fatal error", cmd->tgtt->name);
745         } else {
746                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
747                             "value %d", cmd->tgtt->name, rc);
748         }
749         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
750
751 out_dev_done:
752         cmd->state = SCST_CMD_STATE_DEV_DONE;
753         res = SCST_CMD_STATE_RES_CONT_SAME;
754         goto out;
755 }
756
757 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
758         int check_retries)
759 {
760         unsigned long flags;
761         int rc;
762
763         TRACE_ENTRY();
764
765         TRACE_DBG("Context: %d", context);
766
767         switch(context) {
768         case SCST_CONTEXT_DIRECT:
769         case SCST_CONTEXT_DIRECT_ATOMIC:
770                 if (check_retries)
771                         scst_check_retries(cmd->tgt, 0);
772                 cmd->non_atomic_only = 0;
773                 rc = __scst_process_active_cmd(cmd, context, 0);
774                 if (rc == SCST_CMD_STATE_RES_NEED_THREAD)
775                         goto out_thread;
776                 break;
777
778         default:
779                 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
780                             context);
781                 /* go through */
782         case SCST_CONTEXT_THREAD:
783                 if (check_retries)
784                         scst_check_retries(cmd->tgt, 1);
785                 goto out_thread;
786
787         case SCST_CONTEXT_TASKLET:
788                 if (check_retries)
789                         scst_check_retries(cmd->tgt, 1);
790                 cmd->non_atomic_only = 0;
791                 spin_lock_irqsave(&scst_list_lock, flags);
792                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
793                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
794                 spin_unlock_irqrestore(&scst_list_lock, flags);
795                 scst_schedule_tasklet();
796                 break;
797         }
798 out:
799         TRACE_EXIT();
800         return;
801
802 out_thread:
803         cmd->non_atomic_only = 1;
804         spin_lock_irqsave(&scst_list_lock, flags);
805         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
806         list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
807         spin_unlock_irqrestore(&scst_list_lock, flags);
808         wake_up(&scst_list_waitQ);
809         goto out;
810 }
811
812 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
813 {
814         TRACE_ENTRY();
815
816         TRACE_DBG("Preferred context: %d", pref_context);
817         TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
818         cmd->non_atomic_only = 0;
819
820         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
821                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
822         {
823                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
824                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
825                         cmd->tgtt->name);
826                 pref_context = SCST_CONTEXT_TASKLET;
827         }
828
829         switch (status) {
830         case SCST_RX_STATUS_SUCCESS:
831                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
832                 break;
833
834         case SCST_RX_STATUS_ERROR_SENSE_SET:
835                 cmd->state = SCST_CMD_STATE_DEV_DONE;
836                 break;
837
838         case SCST_RX_STATUS_ERROR_FATAL:
839                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
840                 /* go through */
841         case SCST_RX_STATUS_ERROR:
842                 scst_set_cmd_error(cmd,
843                            SCST_LOAD_SENSE(scst_sense_hardw_error));
844                 cmd->state = SCST_CMD_STATE_DEV_DONE;
845                 break;
846
847         default:
848                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
849                         status);
850                 cmd->state = SCST_CMD_STATE_DEV_DONE;
851                 break;
852         }
853
854         scst_proccess_redirect_cmd(cmd, pref_context, 1);
855
856         TRACE_EXIT();
857         return;
858 }
859
860 /* No locks supposed to be held */
861 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
862         int rq_sense_len, int *next_state)
863 {
864         int sense_valid;
865         struct scst_device *dev = cmd->dev;
866         int dbl_ua_possible, ua_sent = 0;
867
868         TRACE_ENTRY();
869
870         /* If we had a internal bus reset behind us, set the command error UA */
871         if ((dev->scsi_dev != NULL) &&
872             unlikely(cmd->host_status == DID_RESET) &&
873             scst_is_ua_command(cmd))
874         {
875                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
876                       dev->scsi_dev->was_reset, cmd->host_status);
877                 scst_set_cmd_error(cmd,
878                    SCST_LOAD_SENSE(scst_sense_reset_UA));
879                 /* just in case */
880                 cmd->ua_ignore = 0;
881                 /* It looks like it is safe to clear was_reset here */
882                 dev->scsi_dev->was_reset = 0;
883                 smp_mb();
884         }
885
886         if (rq_sense != NULL) {
887                 sense_valid = SCST_SENSE_VALID(rq_sense);
888                 if (sense_valid) {
889                         memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
890                         /* 
891                          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
892                          * in init_scst()
893                          */
894                         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
895                 }
896         } else
897                 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
898
899         dbl_ua_possible = dev->dev_double_ua_possible;
900         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
901         if (unlikely(dbl_ua_possible)) {
902                 spin_lock_bh(&dev->dev_lock);
903                 barrier(); /* to reread dev_double_ua_possible */
904                 dbl_ua_possible = dev->dev_double_ua_possible;
905                 if (dbl_ua_possible)
906                         ua_sent = dev->dev_reset_ua_sent;
907                 else
908                         spin_unlock_bh(&dev->dev_lock);
909         }
910
911         if (sense_valid) {
912                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
913                              sizeof(cmd->sense_buffer));
914                 /* Check Unit Attention Sense Key */
915                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
916                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
917                                 if (dbl_ua_possible) 
918                                 {
919                                         if (ua_sent) {
920                                                 TRACE(TRACE_MGMT, "%s", 
921                                                         "Double UA detected");
922                                                 /* Do retry */
923                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
924                                                         "(tag %d)", cmd, cmd->tag);
925                                                 cmd->status = 0;
926                                                 cmd->masked_status = 0;
927                                                 cmd->msg_status = 0;
928                                                 cmd->host_status = DID_OK;
929                                                 cmd->driver_status = 0;
930                                                 memset(cmd->sense_buffer, 0,
931                                                         sizeof(cmd->sense_buffer));
932                                                 cmd->retry = 1;
933                                                 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
934                                                 /* 
935                                                  * Dev is still blocked by this cmd, so
936                                                  * it's OK to clear SCST_DEV_SERIALIZED
937                                                  * here.
938                                                  */
939                                                 dev->dev_double_ua_possible = 0;
940                                                 dev->dev_serialized = 0;
941                                                 dev->dev_reset_ua_sent = 0;
942                                                 goto out_unlock;
943                                         } else
944                                                 dev->dev_reset_ua_sent = 1;
945                                 }
946                         }
947                         if (cmd->ua_ignore == 0) {
948                                 if (unlikely(dbl_ua_possible)) {
949                                         __scst_process_UA(dev, cmd,
950                                                 cmd->sense_buffer,
951                                                 sizeof(cmd->sense_buffer), 0);
952                                 } else {
953                                         scst_process_UA(dev, cmd,
954                                                 cmd->sense_buffer,
955                                                 sizeof(cmd->sense_buffer), 0);
956                                 }
957                         }
958                 }
959         }
960
961         if (unlikely(dbl_ua_possible)) {
962                 if (ua_sent && scst_is_ua_command(cmd)) {
963                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
964                         dev->dev_double_ua_possible = 0;
965                         dev->dev_serialized = 0;
966                         dev->dev_reset_ua_sent = 0;
967                 }
968                 spin_unlock_bh(&dev->dev_lock);
969         }
970
971 out:
972         TRACE_EXIT();
973         return;
974
975 out_unlock:
976         spin_unlock_bh(&dev->dev_lock);
977         goto out;
978 }
979
980 static int scst_check_auto_sense(struct scst_cmd *cmd)
981 {
982         int res = 0;
983
984         TRACE_ENTRY();
985
986         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
987             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
988              SCST_NO_SENSE(cmd->sense_buffer)))
989         {
990                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
991                       "cmd->status=%x, cmd->masked_status=%x, "
992                       "cmd->msg_status=%x, cmd->host_status=%x, "
993                       "cmd->driver_status=%x", cmd->status, cmd->masked_status, 
994                       cmd->msg_status, cmd->host_status, cmd->driver_status);
995                 res = 1;
996         } else if (unlikely(cmd->host_status)) {
997                 if ((cmd->host_status == DID_REQUEUE) ||
998                     (cmd->host_status == DID_IMM_RETRY) ||
999                     (cmd->host_status == DID_SOFT_ERROR)) {
1000                         scst_set_busy(cmd);
1001                 } else {
1002                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
1003                                 "received, returning HARDWARE ERROR instead",
1004                                 cmd->host_status);
1005                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1006                 }
1007         }
1008
1009         TRACE_EXIT_RES(res);
1010         return res;
1011 }
1012
1013 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1014         const uint8_t *rq_sense, int rq_sense_len, int *next_state)
1015 {
1016         unsigned char type;
1017
1018         TRACE_ENTRY();
1019
1020         cmd->status = result & 0xff;
1021         cmd->masked_status = status_byte(result);
1022         cmd->msg_status = msg_byte(result);
1023         cmd->host_status = host_byte(result);
1024         cmd->driver_status = driver_byte(result);
1025         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, "
1026               "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
1027               "cmd->driver_status=%x", result, cmd->status,
1028               cmd->masked_status, cmd->msg_status, cmd->host_status,
1029               cmd->driver_status);
1030
1031         cmd->completed = 1;
1032
1033         scst_dec_on_dev_cmd(cmd);
1034
1035         type = cmd->dev->handler->type;
1036         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1037             cmd->tgt_dev->acg_dev->rd_only_flag &&
1038             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1039              type == TYPE_TAPE)) {
1040                 int32_t length;
1041                 uint8_t *address;
1042
1043                 length = scst_get_buf_first(cmd, &address);
1044                 TRACE_DBG("length %d", length);
1045                 if (unlikely(length <= 0)) {
1046                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1047                                 __func__);
1048                         goto next;
1049                 }
1050                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1051                         address[2] |= 0x80;   /* Write Protect*/
1052                 }
1053                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1054                         address[3] |= 0x80;   /* Write Protect*/
1055                 }
1056                 scst_put_buf(cmd, address);
1057         }
1058
1059 next:
1060         scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1061
1062         TRACE_EXIT();
1063         return;
1064 }
1065
1066 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1067 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1068                                             struct scsi_request **req)
1069 {
1070         struct scst_cmd *cmd = NULL;
1071
1072         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1073                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1074
1075         if (cmd == NULL) {
1076                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1077                 if (*req)
1078                         scsi_release_request(*req);
1079         }
1080
1081         return cmd;
1082 }
1083
1084 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1085 {
1086         struct scsi_request *req = NULL;
1087         struct scst_cmd *cmd;
1088         int next_state;
1089
1090         TRACE_ENTRY();
1091
1092         WARN_ON(in_irq());
1093
1094         /*
1095          * We don't use scsi_cmd->resid, because:
1096          * 1. Many low level initiator drivers don't use (set) this field
1097          * 2. We determine the command's buffer size directly from CDB, 
1098          *    so scsi_cmd->resid is not relevant for us, and target drivers 
1099          *    should know the residual, if necessary, by comparing expected 
1100          *    and actual transfer sizes.
1101          */
1102
1103         cmd = scst_get_cmd(scsi_cmd, &req);
1104         if (cmd == NULL)
1105                 goto out;
1106
1107         next_state = SCST_CMD_STATE_DEV_DONE;
1108         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1109                 sizeof(req->sr_sense_buffer), &next_state);
1110
1111         /* Clear out request structure */
1112         req->sr_use_sg = 0;
1113         req->sr_sglist_len = 0;
1114         req->sr_bufflen = 0;
1115         req->sr_buffer = NULL;
1116         req->sr_underflow = 0;
1117         req->sr_request->rq_disk = NULL; /* disown request blk */
1118
1119         cmd->bufflen = req->sr_bufflen; //??
1120
1121         scst_release_request(cmd);
1122
1123         cmd->state = next_state;
1124         cmd->non_atomic_only = 0;
1125
1126         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1127
1128 out:
1129         TRACE_EXIT();
1130         return;
1131 }
1132 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1133 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1134 {
1135         struct scst_cmd *cmd;
1136         int next_state;
1137
1138         TRACE_ENTRY();
1139
1140         WARN_ON(in_irq());
1141
1142         /*
1143          * We don't use resid, because:
1144          * 1. Many low level initiator drivers don't use (set) this field
1145          * 2. We determine the command's buffer size directly from CDB,
1146          *    so resid is not relevant for us, and target drivers
1147          *    should know the residual, if necessary, by comparing expected
1148          *    and actual transfer sizes.
1149          */
1150
1151         cmd = (struct scst_cmd *)data;
1152         if (cmd == NULL)
1153                 goto out;
1154
1155         next_state = SCST_CMD_STATE_DEV_DONE;
1156         scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE,
1157                 &next_state);
1158
1159         cmd->state = next_state;
1160         cmd->non_atomic_only = 0;
1161
1162         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1163
1164 out:
1165         TRACE_EXIT();
1166         return;
1167 }
1168 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1169
1170 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1171 {
1172         TRACE_ENTRY();
1173
1174         BUG_ON(in_irq());
1175
1176         scst_dec_on_dev_cmd(cmd);
1177
1178         if (next_state == SCST_CMD_STATE_DEFAULT)
1179                 next_state = SCST_CMD_STATE_DEV_DONE;
1180
1181         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1182 #if defined(DEBUG) || defined(TRACING)
1183                 if (cmd->sg) {
1184                         int i;
1185                         struct scatterlist *sg = cmd->sg;
1186                         TRACE(TRACE_RECV_TOP, 
1187                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1188                               cmd->sg_cnt, sg, (void*)sg[0].page);
1189                         for(i = 0; i < cmd->sg_cnt; ++i) {
1190                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1191                                         "Exec'd sg", page_address(sg[i].page),
1192                                         sg[i].length);
1193                         }
1194                 }
1195 #endif
1196         }
1197
1198
1199 #ifdef EXTRACHECKS
1200         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1201             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1202             (next_state != SCST_CMD_STATE_FINISHED)) 
1203         {
1204                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1205                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1206                 scst_set_cmd_error(cmd,
1207                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1208                 next_state = SCST_CMD_STATE_DEV_DONE;
1209         }
1210
1211         if (scst_check_auto_sense(cmd)) {
1212                 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1213                         "opcode %d", cmd->cdb[0]);
1214         }
1215 #endif
1216
1217         scst_check_sense(cmd, NULL, 0, &next_state);
1218
1219         cmd->state = next_state;
1220         cmd->non_atomic_only = 0;
1221
1222         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1223
1224         TRACE_EXIT();
1225         return;
1226 }
1227
1228 static int scst_report_luns_local(struct scst_cmd *cmd)
1229 {
1230         int res = SCST_EXEC_COMPLETED;
1231         int dev_cnt = 0;
1232         int buffer_size;
1233         struct scst_tgt_dev *tgt_dev = NULL;
1234         uint8_t *buffer;
1235
1236         TRACE_ENTRY();
1237
1238         cmd->status = 0;
1239         cmd->masked_status = 0;
1240         cmd->msg_status = 0;
1241         cmd->host_status = DID_OK;
1242         cmd->driver_status = 0;
1243
1244         /* ToDo: use full SG buffer, not only the first entry */
1245         buffer_size = scst_get_buf_first(cmd, &buffer);
1246         if (unlikely(buffer_size <= 0))
1247                 goto out_err;
1248
1249         if (buffer_size < 16) {
1250                 goto out_put_err;
1251         }
1252
1253         memset(buffer, 0, buffer_size);
1254
1255         /* sess->sess_tgt_dev_list is protected by suspended activity */
1256         list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1257                             sess_tgt_dev_list_entry) 
1258         {
1259                 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1260                         buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1261                         buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1262                 }
1263                 dev_cnt++;
1264                 /* Tmp, until ToDo above done */
1265                 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1266                         break;
1267         }
1268
1269         /* Set the response header */
1270         dev_cnt *= 8;
1271         buffer[0] = (dev_cnt >> 24) & 0xff;
1272         buffer[1] = (dev_cnt >> 16) & 0xff;
1273         buffer[2] = (dev_cnt >> 8) & 0xff;
1274         buffer[3] = dev_cnt & 0xff;
1275
1276         dev_cnt += 8;
1277
1278         scst_put_buf(cmd, buffer);
1279
1280         if (buffer_size > dev_cnt)
1281                 scst_set_resp_data_len(cmd, dev_cnt);
1282         
1283 out_done:
1284         cmd->completed = 1;
1285
1286         /* Report the result */
1287         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1288
1289         TRACE_EXIT_RES(res);
1290         return res;
1291         
1292 out_put_err:
1293         scst_put_buf(cmd, buffer);
1294
1295 out_err:
1296         scst_set_cmd_error(cmd,
1297                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1298         goto out_done;
1299 }
1300
1301 static int scst_pre_select(struct scst_cmd *cmd)
1302 {
1303         int res = SCST_EXEC_NOT_COMPLETED;
1304
1305         TRACE_ENTRY();
1306
1307         if (scst_cmd_atomic(cmd)) {
1308                 res = SCST_EXEC_NEED_THREAD;
1309                 goto out;
1310         }
1311
1312         scst_block_dev(cmd->dev, 1);
1313         /* Device will be unblocked in scst_done_cmd_check() */
1314
1315         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1316                 int rc = scst_set_pending_UA(cmd);
1317                 if (rc == 0) {
1318                         res = SCST_EXEC_COMPLETED;
1319                         cmd->completed = 1;
1320                         /* Report the result */
1321                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1322                         goto out;
1323                 }
1324         }
1325
1326 out:
1327         TRACE_EXIT_RES(res);
1328         return res;
1329 }
1330
1331 static inline void scst_report_reserved(struct scst_cmd *cmd)
1332 {
1333         TRACE_ENTRY();
1334
1335         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1336         cmd->completed = 1;
1337         /* Report the result */
1338         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1339
1340         TRACE_EXIT();
1341         return;
1342 }
1343
1344 static int scst_reserve_local(struct scst_cmd *cmd)
1345 {
1346         int res = SCST_EXEC_NOT_COMPLETED;
1347         struct scst_device *dev;
1348         struct scst_tgt_dev *tgt_dev_tmp;
1349
1350         TRACE_ENTRY();
1351
1352         if (scst_cmd_atomic(cmd)) {
1353                 res = SCST_EXEC_NEED_THREAD;
1354                 goto out;
1355         }
1356
1357         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1358                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1359                      "(lun=%Ld)", (uint64_t)cmd->lun);
1360                 scst_set_cmd_error(cmd,
1361                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1362                 cmd->completed = 1;
1363                 res = SCST_EXEC_COMPLETED;
1364                 goto out;
1365         }
1366
1367         dev = cmd->dev;
1368         scst_block_dev(dev, 1);
1369         /* Device will be unblocked in scst_done_cmd_check() */
1370
1371         spin_lock_bh(&dev->dev_lock);
1372
1373         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1374                 scst_report_reserved(cmd);
1375                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1376                 res = SCST_EXEC_COMPLETED;
1377                 goto out_unlock;
1378         }
1379
1380         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1381                             dev_tgt_dev_list_entry) 
1382         {
1383                 if (cmd->tgt_dev != tgt_dev_tmp)
1384                         set_bit(SCST_TGT_DEV_RESERVED, 
1385                                 &tgt_dev_tmp->tgt_dev_flags);
1386         }
1387         dev->dev_reserved = 1;
1388
1389 out_unlock:
1390         spin_unlock_bh(&dev->dev_lock);
1391         
1392 out:
1393         TRACE_EXIT_RES(res);
1394         return res;
1395 }
1396
1397 static int scst_release_local(struct scst_cmd *cmd)
1398 {
1399         int res = SCST_EXEC_NOT_COMPLETED;
1400         struct scst_tgt_dev *tgt_dev_tmp;
1401         struct scst_device *dev;
1402
1403         TRACE_ENTRY();
1404
1405         dev = cmd->dev;
1406
1407         scst_block_dev(dev, 1);
1408         cmd->blocking = 1;
1409         TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1410
1411         spin_lock_bh(&dev->dev_lock);
1412
1413         /* 
1414          * The device could be RELEASED behind us, if RESERVING session 
1415          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1416          * matter, so use lock and no retest for DEV_RESERVED bits again
1417          */
1418         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1419                 res = SCST_EXEC_COMPLETED;
1420                 cmd->status = 0;
1421                 cmd->masked_status = 0;
1422                 cmd->msg_status = 0;
1423                 cmd->host_status = DID_OK;
1424                 cmd->driver_status = 0;
1425         } else {
1426                 list_for_each_entry(tgt_dev_tmp,
1427                                     &dev->dev_tgt_dev_list,
1428                                     dev_tgt_dev_list_entry) 
1429                 {
1430                         clear_bit(SCST_TGT_DEV_RESERVED, 
1431                                 &tgt_dev_tmp->tgt_dev_flags);
1432                 }
1433                 dev->dev_reserved = 0;
1434         }
1435
1436         spin_unlock_bh(&dev->dev_lock);
1437
1438         if (res == SCST_EXEC_COMPLETED) {
1439                 cmd->completed = 1;
1440                 /* Report the result */
1441                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1442         }
1443
1444         TRACE_EXIT_RES(res);
1445         return res;
1446 }
1447
1448 /* 
1449  * The result of cmd execution, if any, should be reported 
1450  * via scst_cmd_done_local() 
1451  */
1452 static int scst_pre_exec(struct scst_cmd *cmd)
1453 {
1454         int res = SCST_EXEC_NOT_COMPLETED, rc;
1455         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1456
1457         TRACE_ENTRY();
1458
1459         /* Reserve check before Unit Attention */
1460         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1461             (cmd->cdb[0] != INQUIRY) &&
1462             (cmd->cdb[0] != REPORT_LUNS) &&
1463             (cmd->cdb[0] != RELEASE) &&
1464             (cmd->cdb[0] != RELEASE_10) &&
1465             (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1466             (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1467             (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) 
1468         {
1469                 scst_report_reserved(cmd);
1470                 res = SCST_EXEC_COMPLETED;
1471                 goto out;
1472         }
1473
1474         /* If we had a internal bus reset, set the command error unit attention */
1475         if ((cmd->dev->scsi_dev != NULL) &&
1476             unlikely(cmd->dev->scsi_dev->was_reset) &&
1477             scst_is_ua_command(cmd)) 
1478         {
1479                 struct scst_device *dev = cmd->dev;
1480                 int done = 0;
1481                 /* Prevent more than 1 cmd to be triggered by was_reset */
1482                 spin_lock_bh(&dev->dev_lock);
1483                 barrier(); /* to reread was_reset */
1484                 if (dev->scsi_dev->was_reset) {
1485                         TRACE(TRACE_MGMT, "was_reset is %d", 1);
1486                         scst_set_cmd_error(cmd,
1487                                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1488                         /* It looks like it is safe to clear was_reset here */
1489                         dev->scsi_dev->was_reset = 0;
1490                         smp_mb();
1491                         done = 1;
1492                 }
1493                 spin_unlock_bh(&dev->dev_lock);
1494
1495                 if (done)
1496                         goto out_done;
1497         }
1498
1499         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1500             scst_is_ua_command(cmd)) 
1501         {
1502                 rc = scst_set_pending_UA(cmd);
1503                 if (rc == 0)
1504                         goto out_done;
1505         }
1506
1507         /* Check READ_ONLY device status */
1508         if (tgt_dev->acg_dev->rd_only_flag &&
1509             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1510              cmd->cdb[0] == WRITE_10 ||
1511              cmd->cdb[0] == WRITE_12 ||
1512              cmd->cdb[0] == WRITE_16 ||
1513              cmd->cdb[0] == WRITE_VERIFY ||
1514              cmd->cdb[0] == WRITE_VERIFY_12 ||
1515              cmd->cdb[0] == WRITE_VERIFY_16 ||
1516              (cmd->dev->handler->type == TYPE_TAPE &&
1517               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1518         {
1519                 scst_set_cmd_error(cmd,
1520                            SCST_LOAD_SENSE(scst_sense_data_protect));
1521                 goto out_done;
1522         }
1523 out:
1524         TRACE_EXIT_RES(res);
1525         return res;
1526
1527 out_done:
1528         res = SCST_EXEC_COMPLETED;
1529         cmd->completed = 1;
1530         /* Report the result */
1531         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1532         goto out;
1533 }
1534
1535 /* 
1536  * The result of cmd execution, if any, should be reported 
1537  * via scst_cmd_done_local() 
1538  */
1539 static inline int scst_local_exec(struct scst_cmd *cmd)
1540 {
1541         int res = SCST_EXEC_NOT_COMPLETED;
1542
1543         TRACE_ENTRY();
1544
1545         /*
1546          * Adding new commands here don't forget to update
1547          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1548          */
1549
1550         switch (cmd->cdb[0]) {
1551         case MODE_SELECT:
1552         case MODE_SELECT_10:
1553         case LOG_SELECT:
1554                 res = scst_pre_select(cmd);
1555                 break;
1556         case RESERVE:
1557         case RESERVE_10:
1558                 res = scst_reserve_local(cmd);
1559                 break;
1560         case RELEASE:
1561         case RELEASE_10:
1562                 res = scst_release_local(cmd);
1563                 break;
1564         case REPORT_LUNS:
1565                 res = scst_report_luns_local(cmd);
1566                 break;
1567         }
1568
1569         TRACE_EXIT_RES(res);
1570         return res;
1571 }
1572
1573 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1574 {
1575         int rc = SCST_EXEC_NOT_COMPLETED;
1576
1577         TRACE_ENTRY();
1578
1579         cmd->sent_to_midlev = 1;
1580         cmd->state = SCST_CMD_STATE_EXECUTING;
1581         cmd->scst_cmd_done = scst_cmd_done_local;
1582
1583         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1584         smp_mb__after_set_bit();
1585
1586         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1587                 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1588                 goto out_aborted;
1589         }
1590
1591         rc = scst_pre_exec(cmd);
1592         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1593         if (rc != SCST_EXEC_NOT_COMPLETED) {
1594                 if (rc == SCST_EXEC_COMPLETED)
1595                         goto out;
1596                 else if (rc == SCST_EXEC_NEED_THREAD)
1597                         goto out_clear;
1598                 else
1599                         goto out_rc_error;
1600         }
1601
1602         rc = scst_local_exec(cmd);
1603         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1604         if (rc != SCST_EXEC_NOT_COMPLETED) {
1605                 if (rc == SCST_EXEC_COMPLETED)
1606                         goto out;
1607                 else if (rc == SCST_EXEC_NEED_THREAD)
1608                         goto out_clear;
1609                 else
1610                         goto out_rc_error;
1611         }
1612
1613         if (cmd->dev->handler->exec) {
1614                 struct scst_device *dev = cmd->dev;
1615                 TRACE_DBG("Calling dev handler %s exec(%p)",
1616                       dev->handler->name, cmd);
1617                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1618                 cmd->scst_cmd_done = scst_cmd_done_local;
1619                 rc = dev->handler->exec(cmd);
1620                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1621                 TRACE_DBG("Dev handler %s exec() returned %d",
1622                       dev->handler->name, rc);
1623                 if (rc != SCST_EXEC_NOT_COMPLETED) {
1624                         if (rc == SCST_EXEC_COMPLETED)
1625                                 goto out;
1626                         else if (rc == SCST_EXEC_NEED_THREAD)
1627                                 goto out_clear;
1628                         else
1629                                 goto out_rc_error;
1630                 }
1631         }
1632
1633         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1634         
1635         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1636                 PRINT_ERROR_PR("Command for virtual device must be "
1637                         "processed by device handler (lun %Ld)!",
1638                         (uint64_t)cmd->lun);
1639                 goto out_error;
1640         }
1641
1642 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1643         if (scst_alloc_request(cmd) != 0) {
1644                 PRINT_INFO_PR("%s", "Unable to allocate request, "
1645                         "sending BUSY status");
1646                 goto out_busy;
1647         }
1648         
1649         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1650                     (void *)cmd->scsi_req->sr_buffer,
1651                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1652                     cmd->retries);
1653 #else
1654         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1655                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1656                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1657                         GFP_KERNEL);
1658         if (rc) {
1659                 PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1660                 goto out_error;
1661         }
1662 #endif
1663
1664         rc = SCST_EXEC_COMPLETED;
1665
1666 out:
1667         TRACE_EXIT();
1668         return rc;
1669
1670 out_clear:
1671         /* Restore the state */
1672         cmd->sent_to_midlev = 0;
1673         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1674         goto out;
1675
1676 out_rc_error:
1677         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1678                     "invalid code %d", cmd->dev->handler->name, rc);
1679         /* go through */
1680
1681 out_error:
1682         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1683         cmd->completed = 1;
1684         cmd->state = SCST_CMD_STATE_DEV_DONE;
1685         rc = SCST_EXEC_COMPLETED;
1686         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1687         goto out;
1688
1689 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1690 out_busy:
1691         scst_set_busy(cmd);
1692         cmd->completed = 1;
1693         cmd->state = SCST_CMD_STATE_DEV_DONE;
1694         rc = SCST_EXEC_COMPLETED;
1695         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1696         goto out;
1697 #endif
1698
1699 out_aborted:
1700         rc = SCST_EXEC_COMPLETED;
1701         /* Report the result. The cmd is not completed */
1702         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1703         goto out;
1704 }
1705
1706 static int scst_send_to_midlev(struct scst_cmd *cmd)
1707 {
1708         int res, rc;
1709         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1710         struct scst_device *dev = cmd->dev;
1711         int expected_sn;
1712         int count;
1713         int atomic = scst_cmd_atomic(cmd);
1714
1715         TRACE_ENTRY();
1716
1717         res = SCST_CMD_STATE_RES_CONT_NEXT;
1718
1719         if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1720                 TRACE_DBG("Dev handler %s exec() can not be "
1721                       "called in atomic context, rescheduling to the thread",
1722                       dev->handler->name);
1723                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1724                 goto out;
1725         }
1726
1727         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1728                 goto out;
1729
1730         scst_inc_cmd_count(); /* protect dev & tgt_dev */
1731
1732         if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1733                 rc = scst_do_send_to_midlev(cmd);
1734                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1735                 if (rc == SCST_EXEC_NEED_THREAD) {
1736                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1737                               "thread context, rescheduling");
1738                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1739                         scst_dec_on_dev_cmd(cmd);
1740                         goto out_dec_cmd_count;
1741                 } else {
1742                         BUG_ON(rc != SCST_EXEC_COMPLETED);
1743                         goto out_unplug;
1744                 }
1745         }
1746
1747         expected_sn = tgt_dev->expected_sn;
1748         if (cmd->sn != expected_sn) {
1749                 spin_lock_bh(&tgt_dev->sn_lock);
1750                 tgt_dev->def_cmd_count++;
1751                 smp_mb();
1752                 barrier(); /* to reread expected_sn */
1753                 expected_sn = tgt_dev->expected_sn;
1754                 if (cmd->sn != expected_sn) {
1755                         scst_dec_on_dev_cmd(cmd);
1756                         TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1757                               "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1758                         list_add_tail(&cmd->sn_cmd_list_entry,
1759                                       &tgt_dev->deferred_cmd_list);
1760                         spin_unlock_bh(&tgt_dev->sn_lock);
1761                         /* !! At this point cmd can be already freed !! */
1762                         goto out_dec_cmd_count;
1763                 } else {
1764                         TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1765                               "expected_sn %d, continuing", expected_sn);
1766                         tgt_dev->def_cmd_count--;
1767                         spin_unlock_bh(&tgt_dev->sn_lock);
1768                 }
1769         }
1770
1771         count = 0;
1772         while(1) {
1773                 rc = scst_do_send_to_midlev(cmd);
1774                 if (rc == SCST_EXEC_NEED_THREAD) {
1775                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1776                               "thread context, rescheduling");
1777                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1778                         scst_dec_on_dev_cmd(cmd);
1779                         if (count != 0)
1780                                 goto out_unplug;
1781                         else
1782                                 goto out_dec_cmd_count;
1783                 }
1784                 BUG_ON(rc != SCST_EXEC_COMPLETED);
1785                 /* !! At this point cmd can be already freed !! */
1786                 count++;
1787                 expected_sn = __scst_inc_expected_sn(tgt_dev);
1788                 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1789                 if (cmd == NULL)
1790                         break;
1791                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1792                         break;
1793         }
1794
1795 out_unplug:
1796         if (dev->scsi_dev != NULL)
1797                 generic_unplug_device(dev->scsi_dev->request_queue);
1798
1799 out_dec_cmd_count:
1800         scst_dec_cmd_count();
1801         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1802
1803 out:
1804         TRACE_EXIT_HRES(res);
1805         return res;
1806 }
1807
1808 static struct scst_cmd *scst_create_prepare_internal_cmd(
1809         struct scst_cmd *orig_cmd, int bufsize)
1810 {
1811         struct scst_cmd *res;
1812         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1813
1814         TRACE_ENTRY();
1815
1816         res = scst_alloc_cmd(gfp_mask);
1817         if (unlikely(res == NULL)) {
1818                 goto out;
1819         }
1820
1821         res->sess = orig_cmd->sess;
1822         res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1823         res->atomic = scst_cmd_atomic(orig_cmd);
1824         res->internal = 1;
1825         res->tgtt = orig_cmd->tgtt;
1826         res->tgt = orig_cmd->tgt;
1827         res->dev = orig_cmd->dev;
1828         res->tgt_dev = orig_cmd->tgt_dev;
1829         res->lun = orig_cmd->lun;
1830         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1831         res->data_direction = SCST_DATA_UNKNOWN;
1832         res->orig_cmd = orig_cmd;
1833
1834         res->bufflen = bufsize;
1835         if (bufsize > 0) {
1836                 if (scst_alloc_space(res) != 0)
1837                         PRINT_ERROR("Unable to create buffer (size %d) for "
1838                                 "internal cmd", bufsize);
1839                         goto out_free_res;
1840         }
1841
1842 out:
1843         TRACE_EXIT_HRES((unsigned long)res);
1844         return res;
1845
1846 out_free_res:
1847         scst_destroy_cmd(res);
1848         res = NULL;
1849         goto out;
1850 }
1851
1852 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1853 {
1854         TRACE_ENTRY();
1855
1856         if (cmd->bufflen > 0)
1857                 scst_release_space(cmd);
1858         scst_destroy_cmd(cmd);
1859
1860         TRACE_EXIT();
1861         return;
1862 }
1863
1864 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1865 {
1866         int res = SCST_CMD_STATE_RES_RESTART;
1867 #define sbuf_size 252
1868         static const unsigned char request_sense[6] =
1869             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1870         struct scst_cmd *rs_cmd;
1871
1872         TRACE_ENTRY();
1873
1874         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1875         if (rs_cmd != 0)
1876                 goto out_error;
1877
1878         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1879         rs_cmd->cdb_len = sizeof(request_sense);
1880         rs_cmd->data_direction = SCST_DATA_READ;
1881
1882         spin_lock_irq(&scst_list_lock);
1883         list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1884         spin_unlock_irq(&scst_list_lock);
1885
1886 out:
1887         TRACE_EXIT_RES(res);
1888         return res;
1889
1890 out_error:
1891         res = -1;
1892         goto out;
1893 #undef sbuf_size
1894 }
1895
1896 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1897 {
1898         struct scst_cmd *orig_cmd = cmd->orig_cmd;
1899         uint8_t *buf;
1900         int len;
1901
1902         TRACE_ENTRY();
1903
1904         BUG_ON(orig_cmd);
1905
1906         len = scst_get_buf_first(cmd, &buf);
1907
1908         if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1909             (!SCST_NO_SENSE(buf))) 
1910         {
1911                 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
1912                         buf, len);
1913                 memcpy(orig_cmd->sense_buffer, buf,
1914                         (sizeof(orig_cmd->sense_buffer) > len) ?
1915                                 len : sizeof(orig_cmd->sense_buffer));
1916         } else {
1917                 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1918                         "REQUEST SENSE, returning HARDWARE ERROR");
1919                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1920         }
1921
1922         scst_put_buf(cmd, buf);
1923
1924         scst_free_internal_cmd(cmd);
1925
1926         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1927         return orig_cmd;
1928 }
1929
1930 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1931 {
1932         int res = 0, rc;
1933         unsigned char type;
1934
1935         TRACE_ENTRY();
1936
1937         if (cmd->cdb[0] == REQUEST_SENSE) {
1938                 if (cmd->internal)
1939                         cmd = scst_complete_request_sense(cmd);
1940         } else if (scst_check_auto_sense(cmd)) {
1941                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1942                             "without sense data (opcode 0x%x), issuing "
1943                             "REQUEST SENSE", cmd->cdb[0]);
1944                 rc = scst_prepare_request_sense(cmd);
1945                 if (res > 0) {
1946                         *pres = rc;
1947                         res = 1;
1948                         goto out;
1949                 } else {
1950                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1951                                     "returning HARDWARE ERROR");
1952                         scst_set_cmd_error(cmd,
1953                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
1954                 }
1955         }
1956
1957         type = cmd->dev->handler->type;
1958         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1959             cmd->tgt_dev->acg_dev->rd_only_flag &&
1960             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1961              type == TYPE_TAPE))
1962         {
1963                 int32_t length;
1964                 uint8_t *address;
1965
1966                 length = scst_get_buf_first(cmd, &address);
1967                 if (length <= 0)
1968                         goto out;
1969                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1970                         address[2] |= 0x80;   /* Write Protect*/
1971                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1972                         address[3] |= 0x80;   /* Write Protect*/
1973                 scst_put_buf(cmd, address);
1974         }
1975
1976         /* 
1977          * Check and clear NormACA option for the device, if necessary,
1978          * since we don't support ACA
1979          */
1980         if ((cmd->cdb[0] == INQUIRY) &&
1981             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1982             (cmd->resp_data_len > SCST_INQ_BYTE3))
1983         {
1984                 uint8_t *buffer;
1985                 int buflen;
1986
1987                 /* ToDo: all pages ?? */
1988                 buflen = scst_get_buf_first(cmd, &buffer);
1989                 if (buflen > 0) {
1990                         if (buflen > SCST_INQ_BYTE3) {
1991 #ifdef EXTRACHECKS
1992                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1993                                         PRINT_INFO_PR("NormACA set for device: "
1994                                             "lun=%Ld, type 0x%02x", 
1995                                             (uint64_t)cmd->lun, buffer[0]);
1996                                 }
1997 #endif
1998                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1999                         } else
2000                                 scst_set_cmd_error(cmd,
2001                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
2002
2003                         scst_put_buf(cmd, buffer);
2004                 }
2005         }
2006
2007         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
2008                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
2009                                                 &cmd->tgt_dev->tgt_dev_flags)) {
2010                         struct scst_tgt_dev *tgt_dev_tmp;
2011                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2012                               (uint64_t)cmd->lun, cmd->masked_status);
2013                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2014                                      sizeof(cmd->sense_buffer));
2015                         /* Clearing the reservation */
2016                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2017                                             dev_tgt_dev_list_entry) {
2018                                 clear_bit(SCST_TGT_DEV_RESERVED, 
2019                                         &tgt_dev_tmp->tgt_dev_flags);
2020                         }
2021                         cmd->dev->dev_reserved = 0;
2022                 }
2023                 scst_unblock_dev(cmd->dev);
2024         }
2025         
2026         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
2027                      (cmd->cdb[0] == MODE_SELECT_10) ||
2028                      (cmd->cdb[0] == LOG_SELECT)))
2029         {
2030                 if (cmd->status == 0) {
2031                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2032                                 "setting the SELECT UA (lun=%Ld)", 
2033                                 (uint64_t)cmd->lun);
2034                         spin_lock_bh(&scst_temp_UA_lock);
2035                         if (cmd->cdb[0] == LOG_SELECT) {
2036                                 scst_set_sense(scst_temp_UA,
2037                                         sizeof(scst_temp_UA),
2038                                         UNIT_ATTENTION, 0x2a, 0x02);
2039                         } else {
2040                                 scst_set_sense(scst_temp_UA,
2041                                         sizeof(scst_temp_UA),
2042                                         UNIT_ATTENTION, 0x2a, 0x01);
2043                         }
2044                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2045                                 sizeof(scst_temp_UA), 1);
2046                         spin_unlock_bh(&scst_temp_UA_lock);
2047                 }
2048                 scst_unblock_dev(cmd->dev);
2049         }
2050
2051 out:
2052         TRACE_EXIT_RES(res);
2053         return res;
2054 }
2055
2056 static int scst_dev_done(struct scst_cmd *cmd)
2057 {
2058         int res = SCST_CMD_STATE_RES_CONT_SAME;
2059         int state;
2060         int atomic = scst_cmd_atomic(cmd);
2061
2062         TRACE_ENTRY();
2063
2064         if (atomic && !cmd->dev->handler->dev_done_atomic &&
2065             cmd->dev->handler->dev_done) 
2066         {
2067                 TRACE_DBG("Dev handler %s dev_done() can not be "
2068                       "called in atomic context, rescheduling to the thread",
2069                       cmd->dev->handler->name);
2070                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2071                 goto out;
2072         }
2073
2074         if (scst_done_cmd_check(cmd, &res))
2075                 goto out;
2076
2077         state = SCST_CMD_STATE_XMIT_RESP;
2078         if (likely(!scst_is_cmd_local(cmd)) && 
2079             likely(cmd->dev->handler->dev_done != NULL))
2080         {
2081                 int rc;
2082                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2083                       cmd->dev->handler->name, cmd);
2084                 rc = cmd->dev->handler->dev_done(cmd);
2085                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2086                       cmd->dev->handler->name, rc);
2087                 if (rc != SCST_CMD_STATE_DEFAULT)
2088                         state = rc;
2089         }
2090
2091         switch (state) {
2092         case SCST_CMD_STATE_REINIT:
2093                 cmd->state = state;
2094                 res = SCST_CMD_STATE_RES_RESTART;
2095                 break;
2096
2097         case SCST_CMD_STATE_DEV_PARSE:
2098         case SCST_CMD_STATE_PREPARE_SPACE:
2099         case SCST_CMD_STATE_RDY_TO_XFER:
2100         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2101         case SCST_CMD_STATE_DEV_DONE:
2102         case SCST_CMD_STATE_XMIT_RESP:
2103         case SCST_CMD_STATE_FINISHED:
2104                 cmd->state = state;
2105                 res = SCST_CMD_STATE_RES_CONT_SAME;
2106                 break;
2107
2108         case SCST_CMD_STATE_NEED_THREAD_CTX:
2109                 TRACE_DBG("Dev handler %s dev_done() requested "
2110                       "thread context, rescheduling",
2111                       cmd->dev->handler->name);
2112                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2113                 break;
2114
2115         default:
2116                 if (state >= 0) {
2117                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2118                                 "invalid cmd state %d", 
2119                                 cmd->dev->handler->name, state);
2120                 } else {
2121                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2122                                 "error %d", cmd->dev->handler->name, 
2123                                 state);
2124                 }
2125                 scst_set_cmd_error(cmd,
2126                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2127                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2128                 res = SCST_CMD_STATE_RES_CONT_SAME;
2129                 break;
2130         }
2131
2132 out:
2133         TRACE_EXIT_HRES(res);
2134         return res;
2135 }
2136
2137 static int scst_xmit_response(struct scst_cmd *cmd)
2138 {
2139         int res, rc;
2140         int atomic = scst_cmd_atomic(cmd);
2141
2142         TRACE_ENTRY();
2143
2144         /* 
2145          * Check here also in order to avoid unnecessary delays of other
2146          * commands.
2147          */
2148         if (unlikely(cmd->sent_to_midlev == 0) &&
2149             (cmd->tgt_dev != NULL))
2150         {
2151                 TRACE(TRACE_SCSI_SERIALIZING,
2152                       "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2153                 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2154                 cmd->sent_to_midlev = 1;
2155         }
2156
2157         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2158                 TRACE_DBG("%s", "xmit_response() can not be "
2159                       "called in atomic context, rescheduling to the thread");
2160                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2161                 goto out;
2162         }
2163
2164         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2165         smp_mb__after_set_bit();
2166
2167         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2168                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2169                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2170                                 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2171                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2172                 }
2173         }
2174
2175         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2176                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2177                         cmd, cmd->tag);
2178                 cmd->state = SCST_CMD_STATE_FINISHED;
2179                 res = SCST_CMD_STATE_RES_CONT_SAME;
2180                 goto out;
2181         }
2182
2183 #ifdef DEBUG_TM
2184         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2185                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2186                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2187                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2188                         goto out;
2189                 }
2190                 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2191                         cmd, cmd->tag);
2192                 schedule_timeout_uninterruptible(HZ);
2193         }
2194 #endif
2195
2196         while (1) {
2197                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2198
2199                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2200                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2201
2202                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2203
2204 #if defined(DEBUG) || defined(TRACING)
2205                 if (cmd->sg) {
2206                         int i;
2207                         struct scatterlist *sg = cmd->sg;
2208                         TRACE(TRACE_SEND_BOT, 
2209                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2210                               cmd->sg_cnt, sg, (void*)sg[0].page);
2211                         for(i = 0; i < cmd->sg_cnt; ++i) {
2212                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2213                                     "Xmitting sg", page_address(sg[i].page),
2214                                     sg[i].length);
2215                         }
2216                 }
2217 #endif
2218
2219 #ifdef DEBUG_RETRY
2220                 if (((scst_random() % 100) == 77))
2221                         rc = SCST_TGT_RES_QUEUE_FULL;
2222                 else
2223 #endif
2224                         rc = cmd->tgtt->xmit_response(cmd);
2225                 TRACE_DBG("xmit_response() returned %d", rc);
2226
2227                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2228                         goto out;
2229
2230                 /* Restore the previous state */
2231                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2232
2233                 switch (rc) {
2234                 case SCST_TGT_RES_QUEUE_FULL:
2235                 {
2236                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2237                                 break;
2238                         else
2239                                 continue;
2240                 }
2241
2242                 case SCST_TGT_RES_NEED_THREAD_CTX:
2243                 {
2244                         TRACE_DBG("Target driver %s xmit_response() "
2245                               "requested thread context, rescheduling",
2246                               cmd->tgtt->name);
2247                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2248                         break;
2249                 }
2250
2251                 default:
2252                         goto out_error;
2253                 }
2254                 break;
2255         }
2256
2257 out:
2258         /* Caution: cmd can be already dead here */
2259         TRACE_EXIT_HRES(res);
2260         return res;
2261
2262 out_error:
2263         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2264                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2265                         "fatal error", cmd->tgtt->name);
2266         } else {
2267                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2268                         "invalid value %d", cmd->tgtt->name, rc);
2269         }
2270         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2271         cmd->state = SCST_CMD_STATE_FINISHED;
2272         res = SCST_CMD_STATE_RES_CONT_SAME;
2273         goto out;
2274 }
2275
2276 static int scst_finish_cmd(struct scst_cmd *cmd)
2277 {
2278         int res;
2279
2280         TRACE_ENTRY();
2281
2282         if (cmd->mem_checked) {
2283                 spin_lock_bh(&scst_cmd_mem_lock);
2284                 scst_cur_cmd_mem -= cmd->bufflen;
2285                 spin_unlock_bh(&scst_cmd_mem_lock);
2286         }
2287
2288         spin_lock_irq(&scst_list_lock);
2289
2290         TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2291         list_del(&cmd->cmd_list_entry);
2292
2293         if (cmd->mgmt_cmnd)
2294                 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2295
2296         if (likely(cmd->tgt_dev != NULL))
2297                 cmd->tgt_dev->cmd_count--;
2298
2299         cmd->sess->sess_cmd_count--;
2300
2301         list_del(&cmd->search_cmd_list_entry);
2302
2303         spin_unlock_irq(&scst_list_lock);
2304
2305         scst_free_cmd(cmd);
2306
2307         res = SCST_CMD_STATE_RES_CONT_NEXT;
2308
2309         TRACE_EXIT_HRES(res);
2310         return res;
2311 }
2312
2313 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2314 {
2315         TRACE_ENTRY();
2316
2317         BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2318
2319         cmd->state = SCST_CMD_STATE_FINISHED;
2320         scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2321
2322         TRACE_EXIT();
2323         return;
2324 }
2325
2326 /*
2327  * Returns 0 on success, > 0 when we need to wait for unblock,
2328  * < 0 if there is no device (lun) or device type handler.
2329  * Called under scst_list_lock and IRQs disabled
2330  */
2331 static int scst_translate_lun(struct scst_cmd *cmd)
2332 {
2333         struct scst_tgt_dev *tgt_dev = NULL;
2334         int res = 0;
2335
2336         TRACE_ENTRY();
2337
2338         scst_inc_cmd_count();   
2339
2340         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2341                 res = -1;
2342                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2343                       (uint64_t)cmd->lun);
2344                 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2345                                     sess_tgt_dev_list_entry) 
2346                 {
2347                         if (tgt_dev->acg_dev->lun == cmd->lun) {
2348                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2349
2350                                 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2351                                         PRINT_INFO_PR("Dev handler for device "
2352                                           "%Ld is NULL, the device will not be "
2353                                           "visible remotely", (uint64_t)cmd->lun);
2354                                         break;
2355                                 }
2356                                 
2357                                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2358                                         cmd->tgt_dev_saved->cmd_count--;
2359                                         TRACE(TRACE_SCSI_SERIALIZING,
2360                                               "SCST_CMD_STATE_REINIT: "
2361                                               "incrementing expected_sn on tgt_dev_saved %p",
2362                                               cmd->tgt_dev_saved);
2363                                         scst_inc_expected_sn_unblock(
2364                                                 cmd->tgt_dev_saved, cmd, 1);
2365                                 }
2366                                 cmd->tgt_dev = tgt_dev;
2367                                 tgt_dev->cmd_count++;
2368                                 cmd->dev = tgt_dev->acg_dev->dev;
2369
2370                                 /* ToDo: cmd->queue_type */
2371
2372                                 /* scst_list_lock is enough to protect that */
2373                                 cmd->sn = tgt_dev->next_sn;
2374                                 tgt_dev->next_sn++;
2375
2376                                 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2377                                         "cmd->sn: %d", cmd->sn);
2378
2379                                 res = 0;
2380                                 break;
2381                         }
2382                 }
2383                 if (res != 0) {
2384                         TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2385                                 "unexisting LU?", (uint64_t)cmd->lun);
2386                         scst_dec_cmd_count();
2387                 }
2388         } else {
2389                 if ( !cmd->sess->waiting) {
2390                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2391                               cmd->sess);
2392                         list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2393                                       &scst_dev_wait_sess_list);
2394                         cmd->sess->waiting = 1;
2395                 }
2396                 scst_dec_cmd_count();
2397                 res = 1;
2398         }
2399
2400         TRACE_EXIT_RES(res);
2401         return res;
2402 }
2403
2404 /* Called under scst_list_lock and IRQs disabled */
2405 static int scst_process_init_cmd(struct scst_cmd *cmd)
2406 {
2407         int res = 0;
2408
2409         TRACE_ENTRY();
2410
2411         res = scst_translate_lun(cmd);
2412         if (likely(res == 0)) {
2413                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2414                 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2415                         TRACE(TRACE_RETRY, "Too many pending commands in "
2416                                 "session, returning BUSY to initiator \"%s\"",
2417                                 (cmd->sess->initiator_name[0] == '\0') ?
2418                                   "Anonymous" : cmd->sess->initiator_name);
2419                         scst_set_busy(cmd);
2420                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2421                 }
2422                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2423                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2424         } else if (res < 0) {
2425                 TRACE_DBG("Finishing cmd %p", cmd);
2426                 scst_set_cmd_error(cmd,
2427                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2428                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2429                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2430                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2431         }
2432
2433         TRACE_EXIT_RES(res);
2434         return res;
2435 }
2436
2437 /* 
2438  * Called under scst_list_lock and IRQs disabled
2439  * We don't drop it anywhere inside, because command execution
2440  * have to be serialized, i.e. commands must be executed in order
2441  * of their arrival, and we set this order inside scst_translate_lun().
2442  */
2443 static int scst_do_job_init(struct list_head *init_cmd_list)
2444 {
2445         int res = 1;
2446
2447         TRACE_ENTRY();
2448
2449         if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2450                 while (!list_empty(init_cmd_list)) {
2451                         struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2452                                                           typeof(*cmd),
2453                                                           cmd_list_entry);
2454                         res = scst_process_init_cmd(cmd);
2455                         if (res > 0)
2456                                 break;
2457                 }
2458         }
2459
2460         TRACE_EXIT_RES(res);
2461         return res;
2462 }
2463
2464 /* Called with no locks held */
2465 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2466         int left_locked)
2467 {
2468         int res;
2469
2470         TRACE_ENTRY();
2471
2472         BUG_ON(in_irq());
2473
2474         cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2475                         SCST_CONTEXT_DIRECT_ATOMIC);
2476         cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2477
2478         do {
2479                 switch (cmd->state) {
2480                 case SCST_CMD_STATE_DEV_PARSE:
2481                         res = scst_parse_cmd(cmd);
2482                         break;
2483
2484                 case SCST_CMD_STATE_PREPARE_SPACE:
2485                         res = scst_prepare_space(cmd);
2486                         break;
2487
2488                 case SCST_CMD_STATE_RDY_TO_XFER:
2489                         res = scst_rdy_to_xfer(cmd);
2490                         break;
2491
2492                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2493                         res = scst_send_to_midlev(cmd);
2494                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2495                         break;
2496
2497                 case SCST_CMD_STATE_DEV_DONE:
2498                         res = scst_dev_done(cmd);
2499                         break;
2500
2501                 case SCST_CMD_STATE_XMIT_RESP:
2502                         res = scst_xmit_response(cmd);
2503                         break;
2504
2505                 case SCST_CMD_STATE_FINISHED:
2506                         res = scst_finish_cmd(cmd);
2507                         break;
2508
2509                 default:
2510                         PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2511                                cmd, cmd->state);
2512                         BUG();
2513                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2514                         break;
2515                 }
2516         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2517
2518         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2519                 if (left_locked)
2520                         spin_lock_irq(&scst_list_lock);
2521         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2522                 spin_lock_irq(&scst_list_lock);
2523
2524                 switch (cmd->state) {
2525                 case SCST_CMD_STATE_DEV_PARSE:
2526                 case SCST_CMD_STATE_PREPARE_SPACE:
2527                 case SCST_CMD_STATE_RDY_TO_XFER:
2528                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2529                 case SCST_CMD_STATE_DEV_DONE:
2530                 case SCST_CMD_STATE_XMIT_RESP:
2531                 case SCST_CMD_STATE_FINISHED:
2532                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2533                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2534                         break;
2535 #ifdef EXTRACHECKS
2536                 /* not very valid commands */
2537                 case SCST_CMD_STATE_DEFAULT:
2538                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2539                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2540                                 "useful list (left on scst cmd list)", cmd, 
2541                                 cmd->state);
2542                         spin_unlock_irq(&scst_list_lock);
2543                         BUG();
2544                         spin_lock_irq(&scst_list_lock);
2545                         break;
2546 #endif
2547                 default:
2548                         break;
2549                 }
2550                 cmd->non_atomic_only = 1;
2551                 if (!left_locked)
2552                         spin_unlock_irq(&scst_list_lock);
2553                 wake_up(&scst_list_waitQ);
2554         } else if (res == SCST_CMD_STATE_RES_RESTART) {
2555                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2556                         spin_lock_irq(&scst_list_lock);
2557                         TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2558                         list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2559                         if (!left_locked)
2560                                 spin_unlock_irq(&scst_list_lock);
2561                 } else
2562                         BUG();
2563         } else
2564                 BUG();
2565
2566         TRACE_EXIT_RES(res);
2567         return res;
2568 }
2569
2570 /* Called under scst_list_lock and IRQs disabled */
2571 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2572 {
2573         int res;
2574         struct scst_cmd *cmd;
2575         int atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2576                         SCST_CONTEXT_DIRECT_ATOMIC);
2577
2578         TRACE_ENTRY();
2579
2580 #ifdef EXTRACHECKS
2581         {
2582                 int c = (context & ~SCST_PROCESSIBLE_ENV);
2583                 WARN_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) && 
2584                         (c != SCST_CONTEXT_DIRECT));
2585         }
2586 #endif
2587
2588         tm_dbg_check_released_cmds();
2589
2590 restart:
2591         list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2592                 if (atomic && cmd->non_atomic_only) {
2593                         TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2594                         continue;
2595                 }
2596                 if (tm_dbg_check_cmd(cmd) != 0)
2597                         goto restart;
2598                 res = scst_process_active_cmd(cmd, context, NULL, 1);
2599                 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2600                         goto restart;
2601                 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2602                         goto restart;
2603                 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2604                         break;
2605                 } else
2606                         BUG();
2607         }
2608
2609         TRACE_EXIT();
2610         return;
2611 }
2612
2613 static inline int test_cmd_lists(void)
2614 {
2615         int res = !list_empty(&scst_active_cmd_list) ||
2616             (!list_empty(&scst_init_cmd_list) &&
2617              !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2618             test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2619             unlikely(scst_shut_threads_count > 0) ||
2620             tm_dbg_is_release();
2621         return res;
2622 }
2623
2624 int scst_cmd_thread(void *arg)
2625 {
2626         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2627         int n;
2628
2629         TRACE_ENTRY();
2630
2631         spin_lock(&lock);
2632         n = scst_thread_num++;
2633         spin_unlock(&lock);
2634         daemonize("scsi_tgt%d", n);
2635         recalc_sigpending();
2636         set_user_nice(current, 10);
2637         current->flags |= PF_NOFREEZE;
2638
2639         spin_lock_irq(&scst_list_lock);
2640         while (1) {
2641                 wait_queue_t wait;
2642                 init_waitqueue_entry(&wait, current);
2643
2644                 if (!test_cmd_lists()) {
2645                         add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2646                         for (;;) {
2647                                 set_current_state(TASK_INTERRUPTIBLE);
2648                                 if (test_cmd_lists())
2649                                         break;
2650                                 spin_unlock_irq(&scst_list_lock);
2651                                 schedule();
2652                                 spin_lock_irq(&scst_list_lock);
2653                         }
2654                         set_current_state(TASK_RUNNING);
2655                         remove_wait_queue(&scst_list_waitQ, &wait);
2656                 }
2657
2658                 scst_do_job_init(&scst_init_cmd_list);
2659                 scst_do_job_active(&scst_active_cmd_list,
2660                                    SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
2661
2662                 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2663                     list_empty(&scst_cmd_list) &&
2664                     list_empty(&scst_active_cmd_list) &&
2665                     list_empty(&scst_init_cmd_list)) {
2666                         break;
2667                 }
2668                 
2669                 if (unlikely(scst_shut_threads_count > 0)) {
2670                         scst_shut_threads_count--;
2671                         break;
2672                 }
2673         }
2674         spin_unlock_irq(&scst_list_lock);
2675
2676         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2677                 smp_mb__after_atomic_dec();
2678                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2679                 up(scst_shutdown_mutex);
2680         }
2681
2682         TRACE_EXIT();
2683         return 0;
2684 }
2685
2686 void scst_cmd_tasklet(long p)
2687 {
2688         TRACE_ENTRY();
2689
2690         spin_lock_irq(&scst_list_lock);
2691
2692         scst_do_job_init(&scst_init_cmd_list);
2693         scst_do_job_active(&scst_active_cmd_list, 
2694                 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2695
2696         spin_unlock_irq(&scst_list_lock);
2697
2698         TRACE_EXIT();
2699         return;
2700 }
2701
2702 /*
2703  * Returns 0 on success, < 0 if there is no device handler or
2704  * > 0 if SCST_FLAG_SUSPENDED set.
2705  */
2706 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2707 {
2708         struct scst_tgt_dev *tgt_dev = NULL;
2709         int res = -1;
2710
2711         TRACE_ENTRY();
2712
2713         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2714               (uint64_t)mcmd->lun);
2715
2716         spin_lock_irq(&scst_list_lock);
2717         scst_inc_cmd_count();   
2718         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2719                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2720                                     sess_tgt_dev_list_entry) 
2721                 {
2722                         if (tgt_dev->acg_dev->lun == mcmd->lun) {
2723                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2724                                 mcmd->mcmd_tgt_dev = tgt_dev;
2725                                 res = 0;
2726                                 break;
2727                         }
2728                 }
2729                 if (mcmd->mcmd_tgt_dev == NULL)
2730                         scst_dec_cmd_count();
2731         } else {
2732                 if ( !mcmd->sess->waiting) {
2733                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2734                               mcmd->sess);
2735                         list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2736                                       &scst_dev_wait_sess_list);
2737                         mcmd->sess->waiting = 1;
2738                 }
2739                 scst_dec_cmd_count();
2740                 res = 1;
2741         }
2742         spin_unlock_irq(&scst_list_lock);
2743
2744         TRACE_EXIT_HRES(res);
2745         return res;
2746 }
2747
2748 /* Called under scst_list_lock and IRQ off */
2749 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2750         struct scst_mgmt_cmd *mcmd)
2751 {
2752         TRACE_ENTRY();
2753
2754         TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2755                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2756                 mcmd->cmd_wait_count);
2757
2758         cmd->mgmt_cmnd = NULL;
2759
2760         if (cmd->completed)
2761                 mcmd->completed_cmd_count++;
2762
2763         mcmd->cmd_wait_count--;
2764         if (mcmd->cmd_wait_count > 0) {
2765                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2766                         mcmd->cmd_wait_count);
2767                 goto out;
2768         }
2769
2770         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2771
2772         if (mcmd->completed) {
2773                 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2774                         mcmd);
2775                 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2776                         &scst_active_mgmt_cmd_list);
2777         }
2778
2779         wake_up(&scst_mgmt_cmd_list_waitQ);
2780
2781 out:
2782         TRACE_EXIT();
2783         return;
2784 }
2785
2786 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2787         struct scst_tgt_dev *tgt_dev, int set_status)
2788 {
2789         int res = SCST_DEV_TM_NOT_COMPLETED;
2790         if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2791                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2792                       tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2793                 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd, 
2794                         tgt_dev);
2795                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2796                       tgt_dev->acg_dev->dev->handler->name, res);
2797                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2798                         mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ? 
2799                                                 SCST_MGMT_STATUS_SUCCESS :
2800                                                 SCST_MGMT_STATUS_FAILED;
2801                 }
2802         }
2803         return res;
2804 }
2805
2806 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2807 {
2808         switch(mgmt_fn) {
2809                 case SCST_ABORT_TASK:
2810                 case SCST_ABORT_TASK_SET:
2811                 case SCST_CLEAR_TASK_SET:
2812                         return 1;
2813                 default:
2814                         return 0;
2815         }
2816 }
2817
2818 /* 
2819  * Called under scst_list_lock and IRQ off (to protect cmd
2820  * from being destroyed).
2821  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2822  */
2823 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2824         int other_ini, int call_dev_task_mgmt_fn)
2825 {
2826         TRACE_ENTRY();
2827
2828         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2829
2830         if (other_ini) {
2831                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2832                 smp_mb__after_set_bit();
2833         }
2834         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2835         smp_mb__after_set_bit();
2836
2837         if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2838                  scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2839
2840         if (mcmd) {
2841                 int defer;
2842                 if (cmd->tgtt->tm_sync_reply)
2843                         defer = 1;
2844                 else {
2845                         if (scst_is_strict_mgmt_fn(mcmd->fn))
2846                                 defer = test_bit(SCST_CMD_EXECUTING,
2847                                         &cmd->cmd_flags);
2848                         else
2849                                 defer = test_bit(SCST_CMD_XMITTING,
2850                                         &cmd->cmd_flags);
2851                 }
2852
2853                 if (defer) {
2854                         /*
2855                          * Delay the response until the command's finish in
2856                          * order to guarantee that "no further responses from
2857                          * the task are sent to the SCSI initiator port" after
2858                          * response from the TM function is sent (SAM)
2859                          */
2860                         TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2861                                 "xmitted (state %d), deferring ABORT...", cmd,
2862                                 cmd->tag, cmd->state);
2863 #ifdef EXTRACHECKS
2864                         if (cmd->mgmt_cmnd) {
2865                                 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2866                                         "has non-NULL mgmt_cmnd %p!!! Current "
2867                                         "mcmd %p\n", cmd, cmd->tag, cmd->state,
2868                                         cmd->mgmt_cmnd, mcmd);
2869                         }
2870 #endif
2871                         BUG_ON(cmd->mgmt_cmnd);
2872                         mcmd->cmd_wait_count++;
2873                         cmd->mgmt_cmnd = mcmd;
2874                 }
2875         }
2876
2877         tm_dbg_release_cmd(cmd);
2878
2879         TRACE_EXIT();
2880         return;
2881 }
2882
2883 /* Called under scst_list_lock and IRQ off */
2884 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2885 {
2886         int res;
2887         if (mcmd->cmd_wait_count != 0) {
2888                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2889                         "wait", mcmd->cmd_wait_count);
2890                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2891                 res = -1;
2892         } else {
2893                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2894                 res = 0;
2895         }
2896         mcmd->completed = 1;
2897         return res;
2898 }
2899
2900 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2901 {
2902         struct scst_device *dev;
2903         int wake = 0;
2904
2905         TRACE_ENTRY();
2906
2907         if (!scst_mutex_held)
2908                 down(&scst_mutex);
2909
2910         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2911                 struct scst_cmd *cmd, *tcmd;
2912                 spin_lock_bh(&dev->dev_lock);
2913                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2914                                         blocked_cmd_list_entry) {
2915                         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2916                                 list_del(&cmd->blocked_cmd_list_entry);
2917                                 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2918                                         "to active cmd list", cmd);
2919                                 spin_lock_irq(&scst_list_lock);
2920                                 list_move_tail(&cmd->cmd_list_entry,
2921                                         &scst_active_cmd_list);
2922                                 spin_unlock_irq(&scst_list_lock);
2923                                 wake = 1;
2924                         }
2925                 }
2926                 spin_unlock_bh(&dev->dev_lock);
2927         }
2928
2929         if (!scst_mutex_held)
2930                 up(&scst_mutex);
2931
2932         if (wake)
2933                 wake_up(&scst_list_waitQ);
2934
2935         TRACE_EXIT();
2936         return;
2937 }
2938
2939 /* Returns 0 if the command processing should be continued, <0 otherwise */
2940 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2941         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2942 {
2943         struct scst_cmd *cmd;
2944         struct scst_session *sess = tgt_dev->sess;
2945
2946         TRACE_ENTRY();
2947
2948         spin_lock_irq(&scst_list_lock);
2949
2950         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2951         list_for_each_entry(cmd, &sess->search_cmd_list, 
2952                         search_cmd_list_entry) {
2953                 if ((cmd->tgt_dev == NULL) && 
2954                     (cmd->lun == tgt_dev->acg_dev->lun))
2955                         continue;
2956                 if (cmd->tgt_dev != tgt_dev)
2957                         continue;
2958                 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2959         }
2960         spin_unlock_irq(&scst_list_lock);
2961
2962         scst_unblock_aborted_cmds(scst_mutex_held);
2963
2964         TRACE_EXIT();
2965         return;
2966 }
2967
2968 /* Returns 0 if the command processing should be continued, <0 otherwise */
2969 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2970 {
2971         int res;
2972         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2973         struct scst_device *dev = tgt_dev->acg_dev->dev;
2974
2975         TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2976                 tgt_dev->acg_dev->lun, mcmd);
2977
2978         spin_lock_bh(&dev->dev_lock);
2979         __scst_block_dev(dev);
2980         spin_unlock_bh(&dev->dev_lock);
2981
2982         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2983         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2984
2985         res = scst_set_mcmd_next_state(mcmd);
2986
2987         TRACE_EXIT_RES(res);
2988         return res;
2989 }
2990
2991 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
2992 {
2993         /*
2994          * No need for special protection for SCST_FLAG_TM_ACTIVE, since
2995          * we could be called from the only thread.
2996          */
2997         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
2998                 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
2999                         mcmd);
3000                 if (!locked)
3001                         spin_lock_irq(&scst_list_lock);
3002                 list_move_tail(&mcmd->mgmt_cmd_list_entry, 
3003                         &scst_delayed_mgmt_cmd_list);
3004                 if (!locked)
3005                         spin_unlock_irq(&scst_list_lock);
3006                 return -1;
3007         } else {
3008                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3009                 return 0;
3010         }
3011 }
3012
3013 /* Returns 0 if the command processing should be continued, 
3014  * >0, if it should be requeued, <0 otherwise */
3015 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3016 {
3017         int res = 0;
3018
3019         TRACE_ENTRY();
3020
3021         res = scst_check_delay_mgmt_cmd(mcmd, 1);
3022         if (res != 0)
3023                 goto out;
3024
3025         if (mcmd->fn == SCST_ABORT_TASK) {
3026                 struct scst_session *sess = mcmd->sess;
3027                 struct scst_cmd *cmd;
3028
3029                 spin_lock_irq(&scst_list_lock);
3030                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3031                 if (cmd == NULL) {
3032                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3033                                 "tag %d not found", mcmd->tag);
3034                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3035                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3036                 } else {
3037                         TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
3038                                 "aborting it", cmd, mcmd->tag, cmd->sn);
3039                         mcmd->cmd_to_abort = cmd;
3040                         scst_abort_cmd(cmd, mcmd, 0, 1);
3041                         res = scst_set_mcmd_next_state(mcmd);
3042                         mcmd->cmd_to_abort = NULL; /* just in case */
3043                 }
3044                 spin_unlock_irq(&scst_list_lock);
3045         } else {
3046                 int rc;
3047                 rc = scst_mgmt_translate_lun(mcmd);
3048                 if (rc < 0) {
3049                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3050                                 "found", (uint64_t)mcmd->lun);
3051                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3052                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3053                 } else if (rc == 0)
3054                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
3055                 else
3056                         res = rc;
3057         }
3058
3059 out:
3060         TRACE_EXIT_RES(res);
3061         return res;
3062 }
3063
3064 /* Returns 0 if the command processing should be continued, <0 otherwise */
3065 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3066 {
3067         int res, rc;
3068         struct scst_device *dev, *d;
3069         struct scst_tgt_dev *tgt_dev;
3070         int cont, c;
3071         LIST_HEAD(host_devs);
3072
3073         TRACE_ENTRY();
3074
3075         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3076                 mcmd, mcmd->sess->sess_cmd_count);
3077
3078         down(&scst_mutex);
3079
3080         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3081                 int found = 0;
3082
3083                 spin_lock_bh(&dev->dev_lock);
3084                 __scst_block_dev(dev);
3085                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3086                 spin_unlock_bh(&dev->dev_lock);
3087
3088                 cont = 0;
3089                 c = 0;
3090                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3091                         dev_tgt_dev_list_entry) 
3092                 {
3093                         cont = 1;
3094                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3095                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3096                                 c = 1;
3097                         else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3098                                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3099                 }
3100                 if (cont && !c)
3101                         continue;
3102                 
3103                 if (dev->scsi_dev == NULL)
3104                         continue;
3105
3106                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3107                         if (dev->scsi_dev->host->host_no ==
3108                                     d->scsi_dev->host->host_no) 
3109                         {
3110                                 found = 1;
3111                                 break;
3112                         }
3113                 }
3114                 if (!found)
3115                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3116         }
3117
3118         /*
3119          * We suppose here that for all commands that already on devices
3120          * on/after scsi_reset_provider() completion callbacks will be called.
3121          */
3122
3123         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3124                 /* dev->scsi_dev must be non-NULL here */
3125                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3126                       dev->scsi_dev->host->host_no);
3127                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3128                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3129                       dev->scsi_dev->host->host_no,
3130                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3131                 if (rc != SUCCESS) {
3132                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3133                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3134                 }
3135         }
3136
3137         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3138                 if (dev->scsi_dev != NULL)
3139                         dev->scsi_dev->was_reset = 0;
3140         }
3141
3142         up(&scst_mutex);
3143
3144         spin_lock_irq(&scst_list_lock);
3145         tm_dbg_task_mgmt("TARGET RESET");
3146         res = scst_set_mcmd_next_state(mcmd);
3147         spin_unlock_irq(&scst_list_lock);
3148
3149         TRACE_EXIT_RES(res);
3150         return res;
3151 }
3152
3153 /* Returns 0 if the command processing should be continued, <0 otherwise */
3154 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3155 {
3156         int res, rc;
3157         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3158         struct scst_device *dev = tgt_dev->acg_dev->dev;
3159
3160         TRACE_ENTRY();
3161
3162         TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3163                 mcmd);
3164
3165         spin_lock_bh(&dev->dev_lock);
3166         __scst_block_dev(dev);
3167         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3168         spin_unlock_bh(&dev->dev_lock);
3169
3170         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3171         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3172                 goto out_tm_dbg;
3173
3174         if (dev->scsi_dev != NULL) {
3175                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3176                       dev->scsi_dev->host->host_no);
3177                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3178                 if (rc != SUCCESS)
3179                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3180                 dev->scsi_dev->was_reset = 0;
3181         }
3182
3183 out_tm_dbg:
3184         spin_lock_irq(&scst_list_lock);
3185         tm_dbg_task_mgmt("LUN RESET");
3186         res = scst_set_mcmd_next_state(mcmd);
3187         spin_unlock_irq(&scst_list_lock);
3188
3189         TRACE_EXIT_RES(res);
3190         return res;
3191 }
3192
3193 /* Returns 0 if the command processing should be continued, <0 otherwise */
3194 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3195         int nexus_loss)
3196 {
3197         int res;
3198         struct scst_session *sess = mcmd->sess;
3199         struct scst_tgt_dev *tgt_dev;
3200
3201         TRACE_ENTRY();
3202
3203         if (nexus_loss) {
3204                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3205                         mcmd);
3206         } else {
3207                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3208                         mcmd);
3209         }
3210
3211         down(&scst_mutex);
3212         list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3213                 sess_tgt_dev_list_entry) 
3214         {
3215                 struct scst_device *dev = tgt_dev->acg_dev->dev;
3216                 int rc;
3217
3218                 spin_lock_bh(&dev->dev_lock);
3219                 __scst_block_dev(dev);
3220                 spin_unlock_bh(&dev->dev_lock);
3221
3222                 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3223                 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3224                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3225
3226                 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3227                 if (nexus_loss)
3228                         scst_reset_tgt_dev(tgt_dev, 1);
3229         }
3230         up(&scst_mutex);
3231
3232         spin_lock_irq(&scst_list_lock);
3233         res = scst_set_mcmd_next_state(mcmd);
3234         spin_unlock_irq(&scst_list_lock);
3235
3236         TRACE_EXIT_RES(res);
3237         return res;
3238 }
3239
3240 /* Returns 0 if the command processing should be continued, <0 otherwise */
3241 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3242         int nexus_loss)
3243 {
3244         int res;
3245         struct scst_tgt *tgt = mcmd->sess->tgt;
3246         struct scst_session *sess;
3247         struct scst_device *dev;
3248         struct scst_tgt_dev *tgt_dev;
3249
3250         TRACE_ENTRY();
3251
3252         if (nexus_loss) {
3253                 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3254                         mcmd);
3255         } else {
3256                 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3257                         mcmd);
3258         }
3259
3260         down(&scst_mutex);
3261
3262         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3263                 spin_lock_bh(&dev->dev_lock);
3264                 __scst_block_dev(dev);
3265                 spin_unlock_bh(&dev->dev_lock);
3266         }
3267
3268         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3269                 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3270                         sess_tgt_dev_list_entry) 
3271                 {
3272                         int rc;
3273
3274                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3275                         if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3276                                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3277
3278                         __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3279                         if (nexus_loss)
3280                                 scst_reset_tgt_dev(tgt_dev, 1);
3281                 }
3282         }
3283
3284         up(&scst_mutex);
3285
3286         spin_lock_irq(&scst_list_lock);
3287         res = scst_set_mcmd_next_state(mcmd);
3288         spin_unlock_irq(&scst_list_lock);
3289
3290         TRACE_EXIT_RES(res);
3291         return res;
3292 }
3293
3294 /* Returns 0 if the command processing should be continued, <0 otherwise */
3295 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3296 {
3297         int res = 0;
3298
3299         TRACE_ENTRY();
3300
3301         mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3302
3303         switch (mcmd->fn) {
3304         case SCST_ABORT_TASK_SET:
3305         case SCST_CLEAR_TASK_SET:
3306                 res = scst_abort_task_set(mcmd);
3307                 break;
3308
3309         case SCST_LUN_RESET:
3310                 res = scst_lun_reset(mcmd);
3311                 break;
3312
3313         case SCST_TARGET_RESET:
3314                 res = scst_target_reset(mcmd);
3315                 break;
3316
3317         case SCST_ABORT_ALL_TASKS_SESS:
3318                 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3319                 break;
3320
3321         case SCST_NEXUS_LOSS_SESS:
3322                 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3323                 break;
3324
3325         case SCST_ABORT_ALL_TASKS:
3326                 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3327                 break;
3328
3329         case SCST_NEXUS_LOSS:
3330                 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3331                 break;
3332
3333         case SCST_CLEAR_ACA:
3334                 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3335                 /* Nothing to do (yet) */
3336                 break;
3337
3338         default:
3339                 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3340                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3341                 break;
3342         }
3343
3344         TRACE_EXIT_RES(res);
3345         return res;
3346 }
3347
3348 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3349 {
3350         struct scst_device *dev;
3351         struct scst_tgt_dev *tgt_dev;
3352
3353         TRACE_ENTRY();
3354
3355         clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3356         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3357                 struct scst_mgmt_cmd *m;
3358                 spin_lock_irq(&scst_list_lock);
3359                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3360                                 mgmt_cmd_list_entry);
3361                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3362                         "cmd list", m);
3363                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3364                 spin_unlock_irq(&scst_list_lock);
3365         }
3366
3367         mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3368         if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3369                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3370
3371         if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3372                 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3373                       mcmd->sess->tgt->tgtt->name);
3374                 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3375                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3376                       mcmd->sess->tgt->tgtt->name);
3377         }
3378
3379         switch (mcmd->fn) {
3380         case SCST_ABORT_TASK_SET:
3381         case SCST_CLEAR_TASK_SET:
3382         case SCST_LUN_RESET:
3383                 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3384                 break;
3385
3386         case SCST_TARGET_RESET:
3387         case SCST_ABORT_ALL_TASKS:
3388         case SCST_NEXUS_LOSS:
3389                 down(&scst_mutex);
3390                 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3391                         scst_unblock_dev(dev);
3392                 }
3393                 up(&scst_mutex);
3394                 break;
3395
3396         case SCST_NEXUS_LOSS_SESS:
3397         case SCST_ABORT_ALL_TASKS_SESS:
3398                 down(&scst_mutex);
3399                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3400                                 sess_tgt_dev_list_entry) {
3401                         scst_unblock_dev(tgt_dev->acg_dev->dev);
3402                 }
3403                 up(&scst_mutex);
3404                 break;
3405
3406         case SCST_CLEAR_ACA:
3407         default:
3408                 break;
3409         }
3410
3411         mcmd->tgt_priv = NULL;
3412
3413         TRACE_EXIT();
3414         return;
3415 }
3416
3417 /* Returns >0, if cmd should be requeued */
3418 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3419 {
3420         int res = 0;
3421
3422         TRACE_ENTRY();
3423
3424         TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3425
3426         while (1) {
3427                 switch (mcmd->state) {
3428                 case SCST_MGMT_CMD_STATE_INIT:
3429                         res = scst_mgmt_cmd_init(mcmd);
3430                         if (res)
3431                                 goto out;
3432                         break;
3433
3434                 case SCST_MGMT_CMD_STATE_READY:
3435                         if (scst_mgmt_cmd_exec(mcmd))
3436                                 goto out;
3437                         break;
3438
3439                 case SCST_MGMT_CMD_STATE_DONE:
3440                         scst_mgmt_cmd_send_done(mcmd);
3441                         break;
3442
3443                 case SCST_MGMT_CMD_STATE_FINISHED:
3444                         goto out_free;
3445
3446 #ifdef EXTRACHECKS
3447                 case SCST_MGMT_CMD_STATE_EXECUTING:
3448                         BUG();
3449 #endif
3450
3451                 default:
3452                         PRINT_ERROR_PR("Unknown state %d of management command",
3453                                     mcmd->state);
3454                         res = -1;
3455                         goto out_free;
3456                 }
3457         }
3458
3459 out:
3460         TRACE_EXIT_RES(res);
3461         return res;
3462
3463 out_free:
3464         scst_free_mgmt_cmd(mcmd, 1);
3465         goto out;
3466 }
3467
3468 static inline int test_mgmt_cmd_list(void)
3469 {
3470         int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3471                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3472                   test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3473         return res;
3474 }
3475
3476 int scst_mgmt_cmd_thread(void *arg)
3477 {
3478         struct scst_mgmt_cmd *mcmd;
3479
3480         TRACE_ENTRY();
3481
3482         daemonize("scsi_tgt_mc");
3483         recalc_sigpending();
3484         current->flags |= PF_NOFREEZE;
3485
3486         spin_lock_irq(&scst_list_lock);
3487         while (1) {
3488                 wait_queue_t wait;
3489                 init_waitqueue_entry(&wait, current);
3490
3491                 if (!test_mgmt_cmd_list()) {
3492                         add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3493                                                  &wait);
3494                         for (;;) {
3495                                 set_current_state(TASK_INTERRUPTIBLE);
3496                                 if (test_mgmt_cmd_list())
3497                                         break;
3498                                 spin_unlock_irq(&scst_list_lock);
3499                                 schedule();
3500                                 spin_lock_irq(&scst_list_lock);
3501                         }
3502                         set_current_state(TASK_RUNNING);
3503                         remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3504                 }
3505
3506                 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3507                        !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3508                 {
3509                         int rc;
3510                         mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3511                                           typeof(*mcmd), mgmt_cmd_list_entry);
3512                         TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3513                               mcmd);
3514                         list_move_tail(&mcmd->mgmt_cmd_list_entry,
3515                                        &scst_mgmt_cmd_list);
3516                         spin_unlock_irq(&scst_list_lock);
3517                         rc = scst_process_mgmt_cmd(mcmd);
3518                         spin_lock_irq(&scst_list_lock);
3519                         if (rc > 0) {
3520                                 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3521                                         "of active mgmt cmd list", mcmd);
3522                                 list_move(&mcmd->mgmt_cmd_list_entry,
3523                                        &scst_active_mgmt_cmd_list);
3524                         }
3525                 }
3526
3527                 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3528                     list_empty(&scst_active_mgmt_cmd_list)) 
3529                 {
3530                         break;
3531                 }
3532         }
3533         spin_unlock_irq(&scst_list_lock);
3534
3535         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3536                 smp_mb__after_atomic_dec();
3537                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3538                 up(scst_shutdown_mutex);
3539         }
3540
3541         TRACE_EXIT();
3542         return 0;
3543 }
3544
3545 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3546         *sess, int fn, int atomic, void *tgt_priv)
3547 {
3548         struct scst_mgmt_cmd *mcmd = NULL;
3549
3550         TRACE_ENTRY();
3551
3552         if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3553                 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3554                             "(target %s)", sess->tgt->tgtt->name);
3555                 goto out;
3556         }
3557
3558         mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3559         if (mcmd == NULL)
3560                 goto out;
3561
3562         mcmd->sess = sess;
3563         mcmd->fn = fn;
3564         mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3565         mcmd->tgt_priv = tgt_priv;
3566
3567 out:
3568         TRACE_EXIT();
3569         return mcmd;
3570 }
3571
3572 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3573         struct scst_mgmt_cmd *mcmd)
3574 {
3575         unsigned long flags;
3576         int res = 0;
3577
3578         TRACE_ENTRY();
3579
3580         scst_sess_get(sess);
3581
3582         spin_lock_irqsave(&scst_list_lock, flags);
3583
3584         sess->sess_cmd_count++;
3585
3586 #ifdef EXTRACHECKS
3587         if (unlikely(sess->shutting_down)) {
3588                 PRINT_ERROR_PR("%s",
3589                         "New mgmt cmd while shutting down the session");
3590                 BUG();
3591         }
3592 #endif
3593
3594         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3595                 switch(sess->init_phase) {
3596                 case SCST_SESS_IPH_INITING:
3597                         TRACE_DBG("Adding mcmd %p to init deferred mcmd list", 
3598                                 mcmd);
3599                         list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3600                                 &sess->init_deferred_mcmd_list);
3601                         goto out_unlock;
3602                 case SCST_SESS_IPH_SUCCESS:
3603                         break;
3604                 case SCST_SESS_IPH_FAILED:
3605                         res = -1;
3606                         goto out_unlock;
3607                 default:
3608                         BUG();
3609                 }
3610         }
3611
3612         TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3613         list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3614
3615         spin_unlock_irqrestore(&scst_list_lock, flags);
3616
3617         wake_up(&scst_mgmt_cmd_list_waitQ);
3618
3619 out:
3620         TRACE_EXIT();
3621         return res;
3622
3623 out_unlock:
3624         spin_unlock_irqrestore(&scst_list_lock, flags);
3625         goto out;
3626 }
3627
3628 /* 
3629  * Must not been called in parallel with scst_unregister_session() for the 
3630  * same sess