3701f50f406767b4748d8975f662e9905615d7a6
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28
29 #include "scst_debug.h"
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static int scst_do_job_init(struct list_head *init_cmd_list);
34
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
36         int left_locked);
37
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39         struct scst_mgmt_cmd *mcmd);
40
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43         unsigned long *pflags, int left_locked)
44 {
45         int res;
46
47         TRACE_ENTRY();
48
49         TRACE_DBG("Moving cmd %p to cmd list", cmd);
50         list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
51
52         /* This is an inline func., so unneeded code will be optimized out */
53         if (pflags)
54                 spin_unlock_irqrestore(&scst_list_lock, *pflags);
55         else
56                 spin_unlock_irq(&scst_list_lock);
57
58         res = __scst_process_active_cmd(cmd, context, left_locked);
59
60         TRACE_EXIT_RES(res);
61         return res;
62 }
63
64 static inline void scst_schedule_tasklet(void)
65 {
66         struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
67
68 #if 0 /* Looks like #else is better for performance */
69         if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
70                 tasklet_schedule(t);
71         else {
72                 /* 
73                  * We suppose that other CPU(s) are rather idle, so we
74                  * ask one of them to help
75                  */
76                 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77                         "instead", smp_processor_id());
78                 wake_up(&scst_list_waitQ);
79         }
80 #else
81         tasklet_schedule(t);
82 #endif
83 }
84
85 /* 
86  * Must not been called in parallel with scst_unregister_session() for the 
87  * same sess
88  */
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90                              const uint8_t *lun, int lun_len,
91                              const uint8_t *cdb, int cdb_len, int atomic)
92 {
93         struct scst_cmd *cmd;
94
95         TRACE_ENTRY();
96
97 #ifdef EXTRACHECKS
98         if (unlikely(sess->shutting_down)) {
99                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
100                 BUG();
101         }
102 #endif
103
104         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
105         if (cmd == NULL)
106                 goto out;
107
108         cmd->sess = sess;
109         cmd->tgt = sess->tgt;
110         cmd->tgtt = sess->tgt->tgtt;
111         cmd->state = SCST_CMD_STATE_INIT_WAIT;
112
113         /* 
114          * For both wrong lun and CDB defer the error reporting for
115          * scst_cmd_init_done()
116          */
117
118         cmd->lun = scst_unpack_lun(lun, lun_len);
119
120         if (cdb_len <= MAX_COMMAND_SIZE) {
121                 memcpy(cmd->cdb, cdb, cdb_len);
122                 cmd->cdb_len = cdb_len;
123         }
124
125         TRACE_DBG("cmd %p, sess %p", cmd, sess);
126         scst_sess_get(sess);
127
128 out:
129         TRACE_EXIT();
130         return cmd;
131 }
132
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
134 {
135         int res = 0;
136         unsigned long flags = 0;
137         struct scst_session *sess = cmd->sess;
138
139         TRACE_ENTRY();
140
141         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142         TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag, 
143                 (uint64_t)cmd->lun, cmd->cdb_len);
144         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145                 cmd->cdb, cmd->cdb_len);
146
147         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
149         {
150                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
152                         cmd->tgtt->name);
153                 pref_context = SCST_CONTEXT_TASKLET;
154         }
155
156         spin_lock_irqsave(&scst_list_lock, flags);
157
158         /* Let's make it here, this will save us a lock or atomic */
159         sess->sess_cmd_count++;
160
161         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
162
163         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164                 switch(sess->init_phase) {
165                 case SCST_SESS_IPH_SUCCESS:
166                         break;
167                 case SCST_SESS_IPH_INITING:
168                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169                         list_add_tail(&cmd->cmd_list_entry, 
170                                 &sess->init_deferred_cmd_list);
171                         goto out_unlock_flags;
172                 case SCST_SESS_IPH_FAILED:
173                         scst_set_busy(cmd);
174                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
175                         TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176                         list_add_tail(&cmd->cmd_list_entry, 
177                                 &scst_active_cmd_list);
178                         goto active;
179                 default:
180                         BUG();
181                 }
182         }
183
184         if (unlikely(cmd->lun == (lun_t)-1)) {
185                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186                 scst_set_cmd_error(cmd,
187                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
191                 goto active;
192         }
193
194         if (unlikely(cmd->cdb_len == 0)) {
195                 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196                 scst_set_cmd_error(cmd,
197                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
201                 goto active;
202         }
203
204         cmd->state = SCST_CMD_STATE_INIT;
205
206         TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207         list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
208
209         switch (pref_context) {
210         case SCST_CONTEXT_DIRECT:
211         case SCST_CONTEXT_DIRECT_ATOMIC:
212                 res = scst_do_job_init(&scst_init_cmd_list);
213                 if (res > 0)
214                         goto out_unlock_flags;
215                 break;
216
217         case SCST_CONTEXT_THREAD:
218                 goto out_thread_unlock_flags;
219
220         case SCST_CONTEXT_TASKLET:
221                 scst_schedule_tasklet();
222                 goto out_unlock_flags;
223
224         default:
225                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
226                             pref_context);
227                 goto out_thread_unlock_flags;
228         }
229
230 active:
231         switch (pref_context) {
232         case SCST_CONTEXT_DIRECT:
233         case SCST_CONTEXT_DIRECT_ATOMIC:
234                 scst_process_active_cmd(cmd, pref_context, &flags, 0);
235                 break;
236
237         case SCST_CONTEXT_THREAD:
238                 goto out_thread_unlock_flags;
239
240         case SCST_CONTEXT_TASKLET:
241                 scst_schedule_tasklet();
242                 goto out_unlock_flags;
243
244         default:
245                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
246                             pref_context);
247                 goto out_thread_unlock_flags;
248         }
249
250 out:
251         TRACE_EXIT();
252         return;
253
254 out_unlock_flags:
255         spin_unlock_irqrestore(&scst_list_lock, flags);
256         goto out;
257
258 out_thread_unlock_flags:
259         cmd->non_atomic_only = 1;
260         spin_unlock_irqrestore(&scst_list_lock, flags);
261         wake_up(&scst_list_waitQ);
262         goto out;
263 }
264
265 static int scst_parse_cmd(struct scst_cmd *cmd)
266 {
267         int res = SCST_CMD_STATE_RES_CONT_SAME;
268         int state;
269         struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270         struct scst_device *dev = cmd->dev;
271         struct scst_info_cdb cdb_info;
272         int atomic = scst_cmd_atomic(cmd);
273         int set_dir = 1;
274
275         TRACE_ENTRY();
276
277         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278                 TRACE_DBG("ABORTED set, returning ABORTED "
279                         "for cmd %p", cmd);
280                 goto out_xmit;
281         }
282
283         if (atomic && !dev->handler->parse_atomic) {
284                 TRACE_DBG("Dev handler %s parse() can not be "
285                       "called in atomic context, rescheduling to the thread",
286                       dev->handler->name);
287                 res = SCST_CMD_STATE_RES_NEED_THREAD;
288                 goto out;
289         }
290
291         /*
292          * Expected transfer data supplied by the SCSI transport via the
293          * target driver are untrusted, so we prefer to fetch them from CDB.
294          * Additionally, not all transports support supplying the expected
295          * transfer data.
296          */
297
298         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
299                         &cdb_info) != 0)) 
300         {
301                 static int t;
302                 if (t < 10) {
303                         t++;
304                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305                                 "Should you update scst_scsi_op_table?",
306                                 cmd->cdb[0], dev->handler->name);
307                 }
308                 if (scst_cmd_is_expected_set(cmd)) {
309                         TRACE(TRACE_MINOR, "Using initiator supplied values: "
310                                 "direction %d, transfer_len %d",
311                                 cmd->expected_data_direction,
312                                 cmd->expected_transfer_len);
313                         cmd->data_direction = cmd->expected_data_direction;
314                         cmd->bufflen = cmd->expected_transfer_len;
315                         /* Restore (most probably) lost CDB length */
316                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317                         if (cmd->cdb_len == -1) {
318                                 PRINT_ERROR_PR("Unable to get CDB length for "
319                                         "opcode 0x%02x. Returning INVALID "
320                                         "OPCODE", cmd->cdb[0]);
321                                 scst_set_cmd_error(cmd,
322                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
323                                 goto out_xmit;
324                         }
325                 }
326                 else {
327                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328                              "target %s not supplied expected values. "
329                              "Returning INVALID OPCODE.", cmd->cdb[0], 
330                              dev->handler->name, cmd->tgtt->name);
331                         scst_set_cmd_error(cmd,
332                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
333                         goto out_xmit;
334                 }
335         } else {
336                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337                         "set %s), transfer_len=%d (expected len %d), flags=%d",
338                         cdb_info.op_name, cdb_info.direction,
339                         cmd->expected_data_direction,
340                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341                         cdb_info.transfer_len, cmd->expected_transfer_len,
342                         cdb_info.flags);
343
344                 /* Restore (most probably) lost CDB length */
345                 cmd->cdb_len = cdb_info.cdb_len;
346
347                 cmd->data_direction = cdb_info.direction;
348                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349                         cmd->bufflen = cdb_info.transfer_len;
350                 /* else cmd->bufflen remained as it was inited in 0 */
351         }
352
353         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355                             "(opcode 0x%02x)", cmd->cdb[0]);
356                 scst_set_cmd_error(cmd,
357                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
358                 goto out_xmit;
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362                 PRINT_ERROR_PR("Linked commands are not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (likely(!scst_is_cmd_local(cmd))) {
370                 TRACE_DBG("Calling dev handler %s parse(%p)",
371                       dev->handler->name, cmd);
372                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373                 state = dev->handler->parse(cmd, &cdb_info);
374                 TRACE_DBG("Dev handler %s parse() returned %d",
375                         dev->handler->name, state);
376
377                 if (cmd->data_len == -1)
378                         cmd->data_len = cmd->bufflen;
379
380                 if (state == SCST_CMD_STATE_DEFAULT)
381                         state = SCST_CMD_STATE_PREPARE_SPACE;
382         }
383         else
384                 state = SCST_CMD_STATE_PREPARE_SPACE;
385
386 #ifdef EXTRACHECKS
387         if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
388                 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
389                         (state != SCST_CMD_STATE_DEV_PARSE)) ||
390                     ((cmd->bufflen != 0) && 
391                         (cmd->data_direction == SCST_DATA_NONE)) ||
392                     ((cmd->bufflen == 0) && 
393                         (cmd->data_direction != SCST_DATA_NONE)) ||
394                     ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
395                         (state > SCST_CMD_STATE_PREPARE_SPACE))) 
396                 {
397                         PRINT_ERROR_PR("Dev handler %s parse() returned "
398                                        "invalid cmd data_direction %d, "
399                                        "bufflen %zd or state %d (opcode 0x%x)",
400                                        dev->handler->name, 
401                                        cmd->data_direction, cmd->bufflen,
402                                        state, cmd->cdb[0]);
403                         goto out_error;
404                 }
405         }
406 #endif
407
408         switch (state) {
409         case SCST_CMD_STATE_PREPARE_SPACE:
410         case SCST_CMD_STATE_DEV_PARSE:
411         case SCST_CMD_STATE_RDY_TO_XFER:
412         case SCST_CMD_STATE_SEND_TO_MIDLEV:
413         case SCST_CMD_STATE_DEV_DONE:
414         case SCST_CMD_STATE_XMIT_RESP:
415         case SCST_CMD_STATE_FINISHED:
416                 cmd->state = state;
417                 res = SCST_CMD_STATE_RES_CONT_SAME;
418                 break;
419
420         case SCST_CMD_STATE_REINIT:
421                 cmd->tgt_dev_saved = tgt_dev_saved;
422                 cmd->state = state;
423                 res = SCST_CMD_STATE_RES_RESTART;
424                 set_dir = 0;
425                 break;
426
427         case SCST_CMD_STATE_NEED_THREAD_CTX:
428                 TRACE_DBG("Dev handler %s parse() requested thread "
429                       "context, rescheduling", dev->handler->name);
430                 res = SCST_CMD_STATE_RES_NEED_THREAD;
431                 set_dir = 0;
432                 break;
433
434         default:
435                 if (state >= 0) {
436                         PRINT_ERROR_PR("Dev handler %s parse() returned "
437                              "invalid cmd state %d (opcode %d)", 
438                              dev->handler->name, state, cmd->cdb[0]);
439                 } else {
440                         PRINT_ERROR_PR("Dev handler %s parse() returned "
441                                 "error %d (opcode %d)", dev->handler->name, 
442                                 state, cmd->cdb[0]);
443                 }
444                 goto out_error;
445         }
446
447         if ((cmd->resp_data_len == -1) && set_dir) {
448                 if (cmd->data_direction == SCST_DATA_READ)
449                         cmd->resp_data_len = cmd->bufflen;
450                 else
451                          cmd->resp_data_len = 0;
452         }
453         
454 out:
455         TRACE_EXIT_HRES(res);
456         return res;
457
458 out_error:
459         /* dev_done() will be called as part of the regular cmd's finish */
460         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
461         cmd->state = SCST_CMD_STATE_DEV_DONE;
462         res = SCST_CMD_STATE_RES_CONT_SAME;
463         goto out;
464
465 out_xmit:
466         cmd->state = SCST_CMD_STATE_XMIT_RESP;
467         res = SCST_CMD_STATE_RES_CONT_SAME;
468         goto out;
469 }
470
471 void scst_cmd_mem_work_fn(void *p)
472 {
473         TRACE_ENTRY();
474
475         spin_lock_bh(&scst_cmd_mem_lock);
476
477         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
478         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
479                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
480                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
481         } else {
482                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
483                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
484         }
485         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
486
487         spin_unlock_bh(&scst_cmd_mem_lock);
488
489         TRACE_EXIT();
490         return;
491 }
492
493 int scst_check_mem(struct scst_cmd *cmd)
494 {
495         int res = 0;
496
497         TRACE_ENTRY();
498
499         if (cmd->mem_checked)
500                 goto out;
501
502         spin_lock_bh(&scst_cmd_mem_lock);
503
504         scst_cur_cmd_mem += cmd->bufflen;
505         cmd->mem_checked = 1;
506         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
507                 goto out_unlock;
508
509         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
510                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
511                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
512                 (cmd->sess->initiator_name[0] == '\0') ?
513                   "Anonymous" : cmd->sess->initiator_name,
514                 scst_cur_max_cmd_mem >> 10);
515
516         scst_cur_cmd_mem -= cmd->bufflen;
517         cmd->mem_checked = 0;
518         scst_set_busy(cmd);
519         cmd->state = SCST_CMD_STATE_XMIT_RESP;
520         res = 1;
521
522 out_unlock:
523         spin_unlock_bh(&scst_cmd_mem_lock);
524
525 out:
526         TRACE_EXIT_RES(res);
527         return res;
528 }
529
530 static void scst_low_cur_max_cmd_mem(void)
531 {
532         TRACE_ENTRY();
533
534         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
535                 cancel_delayed_work(&scst_cmd_mem_work);
536                 flush_scheduled_work();
537                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
538         }
539
540         spin_lock_bh(&scst_cmd_mem_lock);
541
542         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
543                                 (scst_cur_cmd_mem >> 2);
544         if (scst_cur_max_cmd_mem < 16*1024*1024)
545                 scst_cur_max_cmd_mem = 16*1024*1024;
546
547         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
548                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
549                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
550                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
551         }
552
553         spin_unlock_bh(&scst_cmd_mem_lock);
554
555         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
556
557         TRACE_EXIT();
558         return;
559 }
560
561 static int scst_prepare_space(struct scst_cmd *cmd)
562 {
563         int r, res = SCST_CMD_STATE_RES_CONT_SAME;
564
565         TRACE_ENTRY();
566
567         if (cmd->data_direction == SCST_DATA_NONE) {
568                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
569                 goto out;
570         }
571
572         r = scst_check_mem(cmd);
573         if (unlikely(r != 0))
574                 goto out;
575
576         if (cmd->data_buf_tgt_alloc) {
577                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
578                 r = cmd->tgtt->alloc_data_buf(cmd);
579                 cmd->data_buf_alloced = (r == 0);
580         } else
581                 r = scst_alloc_space(cmd);
582
583         if (r != 0) {
584                 if (scst_cmd_atomic(cmd)) {
585                         TRACE_MEM("%s", "Atomic memory allocation failed, "
586                               "rescheduling to the thread");
587                         res = SCST_CMD_STATE_RES_NEED_THREAD;
588                         goto out;
589                 } else
590                         goto out_no_space;
591         }
592
593         switch (cmd->data_direction) {
594         case SCST_DATA_WRITE:
595                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
596                 break;
597
598         default:
599                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
600                 break;
601         }
602
603 out:
604         TRACE_EXIT_HRES(res);
605         return res;
606
607 out_no_space:
608         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
609                 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
610         scst_low_cur_max_cmd_mem();
611         scst_set_busy(cmd);
612         cmd->state = SCST_CMD_STATE_DEV_DONE;
613         res = SCST_CMD_STATE_RES_CONT_SAME;
614         goto out;
615 }
616
617 /* No locks */
618 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
619 {
620         struct scst_tgt *tgt = cmd->sess->tgt;
621         int res = 0;
622         unsigned long flags;
623
624         TRACE_ENTRY();
625
626         spin_lock_irqsave(&tgt->tgt_lock, flags);
627         tgt->retry_cmds++;
628         smp_mb();
629         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
630               tgt->retry_cmds);
631         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
632                 /* At least one cmd finished, so try again */
633                 tgt->retry_cmds--;
634                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
635                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
636                       "retry_cmds=%d)", finished_cmds,
637                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
638                 res = -1;
639                 goto out_unlock_tgt;
640         }
641
642         TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
643         /* IRQ already off */
644         spin_lock(&scst_list_lock);
645         list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
646         spin_unlock(&scst_list_lock);
647
648         if (!tgt->retry_timer_active) {
649                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
650                 add_timer(&tgt->retry_timer);
651                 tgt->retry_timer_active = 1;
652         }
653
654 out_unlock_tgt:
655         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
656
657         TRACE_EXIT_RES(res);
658         return res;
659 }
660
661 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
662 {
663         int res, rc;
664         int atomic = scst_cmd_atomic(cmd);
665
666         TRACE_ENTRY();
667
668         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
669         {
670                 TRACE_DBG("ABORTED set, returning ABORTED for "
671                         "cmd %p", cmd);
672                 goto out_dev_done;
673         }
674
675         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
676                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
677                       "called in atomic context, rescheduling to the thread");
678                 res = SCST_CMD_STATE_RES_NEED_THREAD;
679                 goto out;
680         }
681
682         while (1) {
683                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
684
685                 res = SCST_CMD_STATE_RES_CONT_NEXT;
686                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
687
688                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
689 #ifdef DEBUG_RETRY
690                 if (((scst_random() % 100) == 75))
691                         rc = SCST_TGT_RES_QUEUE_FULL;
692                 else
693 #endif
694                         rc = cmd->tgtt->rdy_to_xfer(cmd);
695                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
696
697                 if (likely(rc == SCST_TGT_RES_SUCCESS))
698                         goto out;
699
700                 /* Restore the previous state */
701                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
702
703                 switch (rc) {
704                 case SCST_TGT_RES_QUEUE_FULL:
705                 {
706                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
707                                 break;
708                         else
709                                 continue;
710                 }
711
712                 case SCST_TGT_RES_NEED_THREAD_CTX:
713                 {
714                         TRACE_DBG("Target driver %s "
715                               "rdy_to_xfer() requested thread "
716                               "context, rescheduling", cmd->tgtt->name);
717                         res = SCST_CMD_STATE_RES_NEED_THREAD;
718                         break;
719                 }
720
721                 default:
722                         goto out_error_rc;
723                 }
724                 break;
725         }
726
727 out:
728         TRACE_EXIT_HRES(res);
729         return res;
730
731 out_error_rc:
732         if (rc == SCST_TGT_RES_FATAL_ERROR) {
733                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
734                      "fatal error", cmd->tgtt->name);
735         } else {
736                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
737                             "value %d", cmd->tgtt->name, rc);
738         }
739         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
740
741 out_dev_done:
742         cmd->state = SCST_CMD_STATE_DEV_DONE;
743         res = SCST_CMD_STATE_RES_CONT_SAME;
744         goto out;
745 }
746
747 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
748 {
749         unsigned long flags;
750
751         TRACE_ENTRY();
752
753         TRACE_DBG("Preferred context: %d", pref_context);
754         TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
755         cmd->non_atomic_only = 0;
756
757         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
758                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
759         {
760                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
761                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
762                         cmd->tgtt->name);
763                 pref_context = SCST_CONTEXT_TASKLET;
764         }
765
766         switch (status) {
767         case SCST_RX_STATUS_SUCCESS:
768                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
769                 break;
770
771         case SCST_RX_STATUS_ERROR_SENSE_SET:
772                 cmd->state = SCST_CMD_STATE_DEV_DONE;
773                 break;
774
775         case SCST_RX_STATUS_ERROR_FATAL:
776                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
777                 /* go through */
778         case SCST_RX_STATUS_ERROR:
779                 scst_set_cmd_error(cmd,
780                            SCST_LOAD_SENSE(scst_sense_hardw_error));
781                 cmd->state = SCST_CMD_STATE_DEV_DONE;
782                 break;
783
784         default:
785                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
786                             status);
787                 break;
788         }
789
790         switch (pref_context) {
791         case SCST_CONTEXT_DIRECT:
792         case SCST_CONTEXT_DIRECT_ATOMIC:
793                 scst_check_retries(cmd->tgt, 0);
794                 __scst_process_active_cmd(cmd, pref_context, 0);
795                 break;
796
797         default:
798                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
799                             pref_context);
800                 /* go through */
801         case SCST_CONTEXT_THREAD:
802                 spin_lock_irqsave(&scst_list_lock, flags);
803                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
804                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
805                 cmd->non_atomic_only = 1;
806                 spin_unlock_irqrestore(&scst_list_lock, flags);
807                 scst_check_retries(cmd->tgt, 1);
808                 wake_up(&scst_list_waitQ);
809                 break;
810
811         case SCST_CONTEXT_TASKLET:
812                 spin_lock_irqsave(&scst_list_lock, flags);
813                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
814                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
815                 spin_unlock_irqrestore(&scst_list_lock, flags);
816                 scst_schedule_tasklet();
817                 scst_check_retries(cmd->tgt, 0);
818                 break;
819         }
820
821         TRACE_EXIT();
822         return;
823 }
824
825 /* No locks supposed to be held */
826 static void scst_check_sense(struct scst_cmd *cmd, struct scsi_request *req,
827                              int *next_state)
828 {
829         int sense_valid;
830         struct scst_device *dev = cmd->dev;
831         int dbl_ua_possible, ua_sent = 0;
832
833         TRACE_ENTRY();
834
835         /* If we had a internal bus reset behind us, set the command error UA */
836         if ((dev->scsi_dev != NULL) &&
837             unlikely(cmd->host_status == DID_RESET) &&
838             scst_is_ua_command(cmd))
839         {
840                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
841                       dev->scsi_dev->was_reset, cmd->host_status);
842                 scst_set_cmd_error(cmd,
843                    SCST_LOAD_SENSE(scst_sense_reset_UA));
844                 /* just in case */
845                 cmd->ua_ignore = 0;
846                 /* It looks like it is safe to clear was_reset here */
847                 dev->scsi_dev->was_reset = 0;
848                 smp_mb();
849         }
850
851         if (req != NULL) {
852                 sense_valid = SCST_SENSE_VALID(req->sr_sense_buffer);
853                 if (sense_valid) {
854                         memcpy(cmd->sense_buffer, req->sr_sense_buffer,
855                                sizeof(cmd->sense_buffer));
856                 }
857         } else
858                 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
859
860         dbl_ua_possible = dev->dev_double_ua_possible;
861         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
862         if (unlikely(dbl_ua_possible)) {
863                 spin_lock_bh(&dev->dev_lock);
864                 barrier(); /* to reread dev_double_ua_possible */
865                 dbl_ua_possible = dev->dev_double_ua_possible;
866                 if (dbl_ua_possible)
867                         ua_sent = dev->dev_reset_ua_sent;
868                 else
869                         spin_unlock_bh(&dev->dev_lock);
870         }
871
872         if (sense_valid) {
873                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
874                              sizeof(cmd->sense_buffer));
875                 /* Check Unit Attention Sense Key */
876                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
877                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
878                                 if (dbl_ua_possible) 
879                                 {
880                                         if (ua_sent) {
881                                                 TRACE(TRACE_MGMT, "%s", 
882                                                         "Double UA detected");
883                                                 /* Do retry */
884                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
885                                                         "(tag %d)", cmd, cmd->tag);
886                                                 cmd->status = 0;
887                                                 cmd->masked_status = 0;
888                                                 cmd->msg_status = 0;
889                                                 cmd->host_status = DID_OK;
890                                                 cmd->driver_status = 0;
891                                                 memset(cmd->sense_buffer, 0,
892                                                         sizeof(cmd->sense_buffer));
893                                                 cmd->retry = 1;
894                                                 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
895                                                 /* 
896                                                  * Dev is still blocked by this cmd, so
897                                                  * it's OK to clear SCST_DEV_SERIALIZED
898                                                  * here.
899                                                  */
900                                                 dev->dev_double_ua_possible = 0;
901                                                 dev->dev_serialized = 0;
902                                                 dev->dev_reset_ua_sent = 0;
903                                                 goto out_unlock;
904                                         } else
905                                                 dev->dev_reset_ua_sent = 1;
906                                 }
907                         }
908                         if (cmd->ua_ignore == 0) {
909                                 if (unlikely(dbl_ua_possible)) {
910                                         __scst_process_UA(dev, cmd,
911                                                 cmd->sense_buffer,
912                                                 sizeof(cmd->sense_buffer), 0);
913                                 } else {
914                                         scst_process_UA(dev, cmd,
915                                                 cmd->sense_buffer,
916                                                 sizeof(cmd->sense_buffer), 0);
917                                 }
918                         }
919                 }
920         }
921
922         if (unlikely(dbl_ua_possible)) {
923                 if (ua_sent && scst_is_ua_command(cmd)) {
924                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
925                         dev->dev_double_ua_possible = 0;
926                         dev->dev_serialized = 0;
927                         dev->dev_reset_ua_sent = 0;
928                 }
929                 spin_unlock_bh(&dev->dev_lock);
930         }
931
932 out:
933         TRACE_EXIT();
934         return;
935
936 out_unlock:
937         spin_unlock_bh(&dev->dev_lock);
938         goto out;
939 }
940
941 static int scst_check_auto_sense(struct scst_cmd *cmd)
942 {
943         int res = 0;
944
945         TRACE_ENTRY();
946
947         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
948             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
949              SCST_NO_SENSE(cmd->sense_buffer)))
950         {
951                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
952                       "cmd->status=%x, cmd->masked_status=%x, "
953                       "cmd->msg_status=%x, cmd->host_status=%x, "
954                       "cmd->driver_status=%x", cmd->status, cmd->masked_status, 
955                       cmd->msg_status, cmd->host_status, cmd->driver_status);
956                 res = 1;
957         } else if (unlikely(cmd->host_status)) {
958                 if ((cmd->host_status == DID_REQUEUE) ||
959                     (cmd->host_status == DID_IMM_RETRY) ||
960                     (cmd->host_status == DID_SOFT_ERROR)) {
961                         scst_set_busy(cmd);
962                 } else {
963                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
964                                 "received, returning HARDWARE ERROR instead",
965                                 cmd->host_status);
966                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
967                 }
968         }
969
970         TRACE_EXIT_RES(res);
971         return res;
972 }
973
974 static void scst_do_cmd_done(struct scst_cmd *cmd,
975         struct scsi_request *req, int *next_state)
976 {
977         TRACE_ENTRY();
978
979         cmd->status = req->sr_result & 0xff;
980         cmd->masked_status = status_byte(req->sr_result);
981         cmd->msg_status = msg_byte(req->sr_result);
982         cmd->host_status = host_byte(req->sr_result);
983         cmd->driver_status = driver_byte(req->sr_result);
984         TRACE(TRACE_SCSI, "req->sr_result=%x, cmd->status=%x, "
985               "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
986               "cmd->driver_status=%x", req->sr_result, cmd->status,
987               cmd->masked_status, cmd->msg_status, cmd->host_status,
988               cmd->driver_status);
989
990         scst_check_sense(cmd, req, next_state);
991
992         cmd->bufflen = req->sr_bufflen; //??
993
994         /* Clear out request structure */
995         req->sr_use_sg = 0;
996         req->sr_sglist_len = 0;
997         req->sr_bufflen = 0;
998         req->sr_buffer = NULL;
999         req->sr_underflow = 0;
1000         req->sr_request->rq_disk = NULL; /* disown request blk */ ;
1001
1002         TRACE_EXIT();
1003         return;
1004 }
1005
1006 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1007                                             struct scsi_request **req)
1008 {
1009         struct scst_cmd *cmd = NULL;
1010
1011         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1012                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1013
1014         if (cmd == NULL) {
1015                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1016                 if (*req)
1017                         scsi_release_request(*req);
1018         }
1019
1020         return cmd;
1021 }
1022
1023 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1024 {
1025         struct scsi_request *req = NULL;
1026         struct scst_cmd *cmd;
1027         int next_state;
1028         unsigned char type;
1029
1030         TRACE_ENTRY();
1031
1032         WARN_ON(in_irq());
1033
1034         /*
1035          * We don't use scsi_cmd->resid, because:
1036          * 1. Many low level initiator drivers don't use (set) this field
1037          * 2. We determine the command's buffer size directly from CDB, 
1038          *    so scsi_cmd->resid is not relevant for us, and target drivers 
1039          *    should know the residual, if necessary, by comparing expected 
1040          *    and actual transfer sizes.
1041          */
1042
1043         cmd = scst_get_cmd(scsi_cmd, &req);
1044         if (cmd == NULL)
1045                 goto out;
1046
1047         cmd->completed = 1;
1048
1049         scst_dec_on_dev_cmd(cmd);
1050
1051         type = cmd->dev->handler->type;
1052         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1053             cmd->tgt_dev->acg_dev->rd_only_flag &&
1054             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1055              type == TYPE_TAPE)) {
1056                 int32_t length;
1057                 uint8_t *address;
1058
1059                 length = scst_get_buf_first(cmd, &address);
1060                 TRACE_DBG("length %d", length);
1061                 if (unlikely(length <= 0)) {
1062                         goto out;
1063                 }
1064                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1065                         address[2] |= 0x80;   /* Write Protect*/
1066                 }
1067                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1068                         address[3] |= 0x80;   /* Write Protect*/
1069                 }
1070                 scst_put_buf(cmd, address);
1071         }
1072
1073         next_state = SCST_CMD_STATE_DEV_DONE;
1074
1075         scst_do_cmd_done(cmd, req, &next_state);
1076
1077         scst_release_request(cmd);
1078
1079         cmd->state = next_state;
1080         cmd->non_atomic_only = 0;
1081
1082         __scst_process_active_cmd(cmd, scst_get_context(), 0);
1083
1084 out:
1085         TRACE_EXIT();
1086         return;
1087 }
1088
1089 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1090 {
1091         TRACE_ENTRY();
1092
1093         BUG_ON(in_irq());
1094
1095         scst_dec_on_dev_cmd(cmd);
1096
1097         if (next_state == SCST_CMD_STATE_DEFAULT)
1098                 next_state = SCST_CMD_STATE_DEV_DONE;
1099
1100         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1101 #if defined(DEBUG) || defined(TRACING)
1102                 if (cmd->sg) {
1103                         int i;
1104                         struct scatterlist *sg = cmd->sg;
1105                         TRACE(TRACE_RECV_TOP, 
1106                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1107                               cmd->sg_cnt, sg, (void*)sg[0].page);
1108                         for(i = 0; i < cmd->sg_cnt; ++i) {
1109                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1110                                         "Exec'd sg:", page_address(sg[i].page),
1111                                         sg[i].length);
1112                         }
1113                 }
1114 #endif
1115         }
1116
1117
1118 #ifdef EXTRACHECKS
1119         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1120             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1121             (next_state != SCST_CMD_STATE_FINISHED)) 
1122         {
1123                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1124                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1125                 scst_set_cmd_error(cmd,
1126                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1127                 next_state = SCST_CMD_STATE_DEV_DONE;
1128         }
1129
1130         if (scst_check_auto_sense(cmd)) {
1131                 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1132                         "opcode %d", cmd->cdb[0]);
1133         }
1134 #endif
1135
1136         scst_check_sense(cmd, NULL, &next_state);
1137
1138         cmd->state = next_state;
1139         cmd->non_atomic_only = 0;
1140
1141         __scst_process_active_cmd(cmd, scst_get_context(), 0);
1142
1143         TRACE_EXIT();
1144         return;
1145 }
1146
1147 static int scst_report_luns_local(struct scst_cmd *cmd)
1148 {
1149         int res = SCST_EXEC_COMPLETED;
1150         int dev_cnt = 0;
1151         int buffer_size;
1152         struct scst_tgt_dev *tgt_dev = NULL;
1153         uint8_t *buffer;
1154
1155         TRACE_ENTRY();
1156
1157         cmd->status = 0;
1158         cmd->masked_status = 0;
1159         cmd->msg_status = 0;
1160         cmd->host_status = DID_OK;
1161         cmd->driver_status = 0;
1162
1163         /* ToDo: use full SG buffer, not only the first entry */
1164         buffer_size = scst_get_buf_first(cmd, &buffer);
1165         if (unlikely(buffer_size <= 0))
1166                 goto out_err;
1167
1168         if (buffer_size < 16) {
1169                 goto out_put_err;
1170         }
1171
1172         memset(buffer, 0, buffer_size);
1173
1174         /* sess->sess_tgt_dev_list is protected by suspended activity */
1175         list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1176                             sess_tgt_dev_list_entry) 
1177         {
1178                 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1179                         buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1180                         buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1181                 }
1182                 dev_cnt++;
1183                 /* Tmp, until ToDo above done */
1184                 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1185                         break;
1186         }
1187
1188         /* Set the response header */
1189         dev_cnt *= 8;
1190         buffer[0] = (dev_cnt >> 24) & 0xff;
1191         buffer[1] = (dev_cnt >> 16) & 0xff;
1192         buffer[2] = (dev_cnt >> 8) & 0xff;
1193         buffer[3] = dev_cnt & 0xff;
1194
1195         dev_cnt += 8;
1196
1197         scst_put_buf(cmd, buffer);
1198
1199         if (buffer_size > dev_cnt)
1200                 scst_set_resp_data_len(cmd, dev_cnt);
1201         
1202 out_done:
1203         cmd->completed = 1;
1204
1205         /* Report the result */
1206         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1207
1208         TRACE_EXIT_RES(res);
1209         return res;
1210         
1211 out_put_err:
1212         scst_put_buf(cmd, buffer);
1213
1214 out_err:
1215         scst_set_cmd_error(cmd,
1216                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1217         goto out_done;
1218 }
1219
1220 static int scst_pre_select(struct scst_cmd *cmd)
1221 {
1222         int res = SCST_EXEC_NOT_COMPLETED;
1223
1224         TRACE_ENTRY();
1225
1226         if (scst_cmd_atomic(cmd)) {
1227                 res = SCST_EXEC_NEED_THREAD;
1228                 goto out;
1229         }
1230
1231         scst_block_dev(cmd->dev, 1);
1232         /* Device will be unblocked in scst_done_cmd_check() */
1233
1234         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1235                 int rc = scst_set_pending_UA(cmd);
1236                 if (rc == 0) {
1237                         res = SCST_EXEC_COMPLETED;
1238                         cmd->completed = 1;
1239                         /* Report the result */
1240                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1241                         goto out;
1242                 }
1243         }
1244
1245 out:
1246         TRACE_EXIT_RES(res);
1247         return res;
1248 }
1249
1250 static inline void scst_report_reserved(struct scst_cmd *cmd)
1251 {
1252         TRACE_ENTRY();
1253
1254         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1255         cmd->completed = 1;
1256         /* Report the result */
1257         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1258
1259         TRACE_EXIT();
1260         return;
1261 }
1262
1263 static int scst_reserve_local(struct scst_cmd *cmd)
1264 {
1265         int res = SCST_EXEC_NOT_COMPLETED;
1266         struct scst_device *dev;
1267         struct scst_tgt_dev *tgt_dev_tmp;
1268
1269         TRACE_ENTRY();
1270
1271         if (scst_cmd_atomic(cmd)) {
1272                 res = SCST_EXEC_NEED_THREAD;
1273                 goto out;
1274         }
1275
1276         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1277                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1278                      "(lun=%Ld)", (uint64_t)cmd->lun);
1279                 scst_set_cmd_error(cmd,
1280                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1281                 cmd->completed = 1;
1282                 res = SCST_EXEC_COMPLETED;
1283                 goto out;
1284         }
1285
1286         dev = cmd->dev;
1287         scst_block_dev(dev, 1);
1288         /* Device will be unblocked in scst_done_cmd_check() */
1289
1290         spin_lock_bh(&dev->dev_lock);
1291
1292         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1293                 scst_report_reserved(cmd);
1294                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1295                 res = SCST_EXEC_COMPLETED;
1296                 goto out_unlock;
1297         }
1298
1299         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1300                             dev_tgt_dev_list_entry) 
1301         {
1302                 if (cmd->tgt_dev != tgt_dev_tmp)
1303                         set_bit(SCST_TGT_DEV_RESERVED, 
1304                                 &tgt_dev_tmp->tgt_dev_flags);
1305         }
1306         dev->dev_reserved = 1;
1307
1308 out_unlock:
1309         spin_unlock_bh(&dev->dev_lock);
1310         
1311 out:
1312         TRACE_EXIT_RES(res);
1313         return res;
1314 }
1315
1316 static int scst_release_local(struct scst_cmd *cmd)
1317 {
1318         int res = SCST_EXEC_NOT_COMPLETED;
1319         struct scst_tgt_dev *tgt_dev_tmp;
1320         struct scst_device *dev;
1321
1322         TRACE_ENTRY();
1323
1324         dev = cmd->dev;
1325
1326         scst_block_dev(dev, 1);
1327         cmd->blocking = 1;
1328         TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1329
1330         spin_lock_bh(&dev->dev_lock);
1331
1332         /* 
1333          * The device could be RELEASED behind us, if RESERVING session 
1334          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1335          * matter, so use lock and no retest for DEV_RESERVED bits again
1336          */
1337         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1338                 res = SCST_EXEC_COMPLETED;
1339                 cmd->status = 0;
1340                 cmd->masked_status = 0;
1341                 cmd->msg_status = 0;
1342                 cmd->host_status = DID_OK;
1343                 cmd->driver_status = 0;
1344         } else {
1345                 list_for_each_entry(tgt_dev_tmp,
1346                                     &dev->dev_tgt_dev_list,
1347                                     dev_tgt_dev_list_entry) 
1348                 {
1349                         clear_bit(SCST_TGT_DEV_RESERVED, 
1350                                 &tgt_dev_tmp->tgt_dev_flags);
1351                 }
1352                 dev->dev_reserved = 0;
1353         }
1354
1355         spin_unlock_bh(&dev->dev_lock);
1356
1357         if (res == SCST_EXEC_COMPLETED) {
1358                 cmd->completed = 1;
1359                 /* Report the result */
1360                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1361         }
1362
1363         TRACE_EXIT_RES(res);
1364         return res;
1365 }
1366
1367 /* 
1368  * The result of cmd execution, if any, should be reported 
1369  * via scst_cmd_done_local() 
1370  */
1371 static int scst_pre_exec(struct scst_cmd *cmd)
1372 {
1373         int res = SCST_EXEC_NOT_COMPLETED, rc;
1374         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1375
1376         TRACE_ENTRY();
1377
1378         /* Reserve check before Unit Attention */
1379         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1380             (cmd->cdb[0] != INQUIRY) &&
1381             (cmd->cdb[0] != REPORT_LUNS) &&
1382             (cmd->cdb[0] != RELEASE) &&
1383             (cmd->cdb[0] != RELEASE_10) &&
1384             (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1385             (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1386             (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) 
1387         {
1388                 scst_report_reserved(cmd);
1389                 res = SCST_EXEC_COMPLETED;
1390                 goto out;
1391         }
1392
1393         /* If we had a internal bus reset, set the command error unit attention */
1394         if ((cmd->dev->scsi_dev != NULL) &&
1395             unlikely(cmd->dev->scsi_dev->was_reset) &&
1396             scst_is_ua_command(cmd)) 
1397         {
1398                 struct scst_device *dev = cmd->dev;
1399                 int done = 0;
1400                 /* Prevent more than 1 cmd to be triggered by was_reset */
1401                 spin_lock_bh(&dev->dev_lock);
1402                 barrier(); /* to reread was_reset */
1403                 if (dev->scsi_dev->was_reset) {
1404                         TRACE(TRACE_MGMT, "was_reset is %d", 1);
1405                         scst_set_cmd_error(cmd,
1406                                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1407                         /* It looks like it is safe to clear was_reset here */
1408                         dev->scsi_dev->was_reset = 0;
1409                         smp_mb();
1410                         done = 1;
1411                 }
1412                 spin_unlock_bh(&dev->dev_lock);
1413
1414                 if (done)
1415                         goto out_done;
1416         }
1417
1418         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1419             scst_is_ua_command(cmd)) 
1420         {
1421                 rc = scst_set_pending_UA(cmd);
1422                 if (rc == 0)
1423                         goto out_done;
1424         }
1425
1426         /* Check READ_ONLY device status */
1427         if (tgt_dev->acg_dev->rd_only_flag &&
1428             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1429              cmd->cdb[0] == WRITE_10 ||
1430              cmd->cdb[0] == WRITE_12 ||
1431              cmd->cdb[0] == WRITE_16 ||
1432              cmd->cdb[0] == WRITE_VERIFY ||
1433              cmd->cdb[0] == WRITE_VERIFY_12 ||
1434              cmd->cdb[0] == WRITE_VERIFY_16 ||
1435              (cmd->dev->handler->type == TYPE_TAPE &&
1436               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1437         {
1438                 scst_set_cmd_error(cmd,
1439                            SCST_LOAD_SENSE(scst_sense_data_protect));
1440                 goto out_done;
1441         }
1442 out:
1443         TRACE_EXIT_RES(res);
1444         return res;
1445
1446 out_done:
1447         res = SCST_EXEC_COMPLETED;
1448         cmd->completed = 1;
1449         /* Report the result */
1450         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1451         goto out;
1452 }
1453
1454 /* 
1455  * The result of cmd execution, if any, should be reported 
1456  * via scst_cmd_done_local() 
1457  */
1458 static inline int scst_local_exec(struct scst_cmd *cmd)
1459 {
1460         int res = SCST_EXEC_NOT_COMPLETED;
1461
1462         TRACE_ENTRY();
1463
1464         /*
1465          * Adding new commands here don't forget to update
1466          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1467          */
1468
1469         switch (cmd->cdb[0]) {
1470         case MODE_SELECT:
1471         case MODE_SELECT_10:
1472         case LOG_SELECT:
1473                 res = scst_pre_select(cmd);
1474                 break;
1475         case RESERVE:
1476         case RESERVE_10:
1477                 res = scst_reserve_local(cmd);
1478                 break;
1479         case RELEASE:
1480         case RELEASE_10:
1481                 res = scst_release_local(cmd);
1482                 break;
1483         case REPORT_LUNS:
1484                 res = scst_report_luns_local(cmd);
1485                 break;
1486         }
1487
1488         TRACE_EXIT_RES(res);
1489         return res;
1490 }
1491
1492 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1493 {
1494         int rc = SCST_EXEC_NOT_COMPLETED;
1495
1496         TRACE_ENTRY();
1497
1498         cmd->sent_to_midlev = 1;
1499         cmd->state = SCST_CMD_STATE_EXECUTING;
1500         cmd->scst_cmd_done = scst_cmd_done_local;
1501
1502         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1503         smp_mb__after_set_bit();
1504
1505         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1506                 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1507                 goto out_aborted;
1508         }
1509
1510         rc = scst_pre_exec(cmd);
1511         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1512         if (rc != SCST_EXEC_NOT_COMPLETED) {
1513                 if (rc == SCST_EXEC_COMPLETED)
1514                         goto out;
1515                 else if (rc == SCST_EXEC_NEED_THREAD)
1516                         goto out_clear;
1517                 else
1518                         goto out_rc_error;
1519         }
1520
1521         rc = scst_local_exec(cmd);
1522         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1523         if (rc != SCST_EXEC_NOT_COMPLETED) {
1524                 if (rc == SCST_EXEC_COMPLETED)
1525                         goto out;
1526                 else if (rc == SCST_EXEC_NEED_THREAD)
1527                         goto out_clear;
1528                 else
1529                         goto out_rc_error;
1530         }
1531
1532         if (cmd->dev->handler->exec) {
1533                 struct scst_device *dev = cmd->dev;
1534                 TRACE_DBG("Calling dev handler %s exec(%p)",
1535                       dev->handler->name, cmd);
1536                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1537                 cmd->scst_cmd_done = scst_cmd_done_local;
1538                 rc = dev->handler->exec(cmd);
1539                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1540                 TRACE_DBG("Dev handler %s exec() returned %d",
1541                       dev->handler->name, rc);
1542                 if (rc != SCST_EXEC_NOT_COMPLETED) {
1543                         if (rc == SCST_EXEC_COMPLETED)
1544                                 goto out;
1545                         else if (rc == SCST_EXEC_NEED_THREAD)
1546                                 goto out_clear;
1547                         else
1548                                 goto out_rc_error;
1549                 }
1550         }
1551
1552         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1553         
1554         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1555                 PRINT_ERROR_PR("Command for virtual device must be "
1556                         "processed by device handler (lun %Ld)!",
1557                         (uint64_t)cmd->lun);
1558                 goto out_error;
1559         }
1560         
1561         if (scst_alloc_request(cmd) != 0) {
1562                 PRINT_INFO_PR("%s", "Unable to allocate request, "
1563                         "sending BUSY status");
1564                 goto out_busy;
1565         }
1566         
1567         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1568                     (void *)cmd->scsi_req->sr_buffer,
1569                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1570                     cmd->retries);
1571
1572         rc = SCST_EXEC_COMPLETED;
1573
1574 out:
1575         TRACE_EXIT();
1576         return rc;
1577
1578 out_clear:
1579         /* Restore the state */
1580         cmd->sent_to_midlev = 0;
1581         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1582         goto out;
1583
1584 out_rc_error:
1585         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1586                     "invalid code %d", cmd->dev->handler->name, rc);
1587         /* go through */
1588
1589 out_error:
1590         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1591         cmd->completed = 1;
1592         cmd->state = SCST_CMD_STATE_DEV_DONE;
1593         rc = SCST_EXEC_COMPLETED;
1594         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1595         goto out;
1596         
1597 out_busy:
1598         scst_set_busy(cmd);
1599         cmd->completed = 1;
1600         cmd->state = SCST_CMD_STATE_DEV_DONE;
1601         rc = SCST_EXEC_COMPLETED;
1602         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1603         goto out;
1604
1605 out_aborted:
1606         rc = SCST_EXEC_COMPLETED;
1607         /* Report the result. The cmd is not completed */
1608         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1609         goto out;
1610 }
1611
1612 static int scst_send_to_midlev(struct scst_cmd *cmd)
1613 {
1614         int res, rc;
1615         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1616         struct scst_device *dev = cmd->dev;
1617         int expected_sn;
1618         int count;
1619         int atomic = scst_cmd_atomic(cmd);
1620
1621         TRACE_ENTRY();
1622
1623         res = SCST_CMD_STATE_RES_CONT_NEXT;
1624
1625         if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1626                 TRACE_DBG("Dev handler %s exec() can not be "
1627                       "called in atomic context, rescheduling to the thread",
1628                       dev->handler->name);
1629                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1630                 goto out;
1631         }
1632
1633         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1634                 goto out;
1635
1636         scst_inc_cmd_count(); /* protect dev & tgt_dev */
1637
1638         if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1639                 rc = scst_do_send_to_midlev(cmd);
1640                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1641                 if (rc == SCST_EXEC_NEED_THREAD) {
1642                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1643                               "thread context, rescheduling");
1644                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1645                         scst_dec_on_dev_cmd(cmd);
1646                         goto out_dec_cmd_count;
1647                 } else {
1648                         BUG_ON(rc != SCST_EXEC_COMPLETED);
1649                         goto out_unplug;
1650                 }
1651         }
1652
1653         expected_sn = tgt_dev->expected_sn;
1654         if (cmd->sn != expected_sn) {
1655                 spin_lock_bh(&tgt_dev->sn_lock);
1656                 tgt_dev->def_cmd_count++;
1657                 smp_mb();
1658                 barrier(); /* to reread expected_sn */
1659                 expected_sn = tgt_dev->expected_sn;
1660                 if (cmd->sn != expected_sn) {
1661                         scst_dec_on_dev_cmd(cmd);
1662                         TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1663                               "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1664                         list_add_tail(&cmd->sn_cmd_list_entry,
1665                                       &tgt_dev->deferred_cmd_list);
1666                         spin_unlock_bh(&tgt_dev->sn_lock);
1667                         /* !! At this point cmd can be already freed !! */
1668                         goto out_dec_cmd_count;
1669                 } else {
1670                         TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1671                               "expected_sn %d, continuing", expected_sn);
1672                         tgt_dev->def_cmd_count--;
1673                         spin_unlock_bh(&tgt_dev->sn_lock);
1674                 }
1675         }
1676
1677         count = 0;
1678         while(1) {
1679                 rc = scst_do_send_to_midlev(cmd);
1680                 if (rc == SCST_EXEC_NEED_THREAD) {
1681                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1682                               "thread context, rescheduling");
1683                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1684                         scst_dec_on_dev_cmd(cmd);
1685                         if (count != 0)
1686                                 goto out_unplug;
1687                         else
1688                                 goto out_dec_cmd_count;
1689                 }
1690                 BUG_ON(rc != SCST_EXEC_COMPLETED);
1691                 /* !! At this point cmd can be already freed !! */
1692                 count++;
1693                 expected_sn = __scst_inc_expected_sn(tgt_dev);
1694                 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1695                 if (cmd == NULL)
1696                         break;
1697                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1698                         break;
1699         }
1700
1701 out_unplug:
1702         if (dev->scsi_dev != NULL)
1703                 generic_unplug_device(dev->scsi_dev->request_queue);
1704
1705 out_dec_cmd_count:
1706         scst_dec_cmd_count();
1707         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1708
1709 out:
1710         TRACE_EXIT_HRES(res);
1711         return res;
1712 }
1713
1714 static struct scst_cmd *scst_create_prepare_internal_cmd(
1715         struct scst_cmd *orig_cmd, int bufsize)
1716 {
1717         struct scst_cmd *res;
1718         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1719
1720         TRACE_ENTRY();
1721
1722         res = scst_alloc_cmd(gfp_mask);
1723         if (unlikely(res == NULL)) {
1724                 goto out;
1725         }
1726
1727         res->sess = orig_cmd->sess;
1728         res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1729         res->atomic = scst_cmd_atomic(orig_cmd);
1730         res->internal = 1;
1731         res->tgtt = orig_cmd->tgtt;
1732         res->tgt = orig_cmd->tgt;
1733         res->dev = orig_cmd->dev;
1734         res->tgt_dev = orig_cmd->tgt_dev;
1735         res->lun = orig_cmd->lun;
1736         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1737         res->data_direction = SCST_DATA_UNKNOWN;
1738         res->orig_cmd = orig_cmd;
1739
1740         res->bufflen = bufsize;
1741         if (bufsize > 0) {
1742                 if (scst_alloc_space(res) != 0)
1743                         PRINT_ERROR("Unable to create buffer (size %d) for "
1744                                 "internal cmd", bufsize);
1745                         goto out_free_res;
1746         }
1747
1748 out:
1749         TRACE_EXIT_HRES((unsigned long)res);
1750         return res;
1751
1752 out_free_res:
1753         scst_destroy_cmd(res);
1754         res = NULL;
1755         goto out;
1756 }
1757
1758 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1759 {
1760         TRACE_ENTRY();
1761
1762         if (cmd->bufflen > 0)
1763                 scst_release_space(cmd);
1764         scst_destroy_cmd(cmd);
1765
1766         TRACE_EXIT();
1767         return;
1768 }
1769
1770 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1771 {
1772         int res = SCST_CMD_STATE_RES_RESTART;
1773 #define sbuf_size 252
1774         static const unsigned char request_sense[6] =
1775             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1776         struct scst_cmd *rs_cmd;
1777
1778         TRACE_ENTRY();
1779
1780         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1781         if (rs_cmd != 0)
1782                 goto out_error;
1783
1784         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1785         rs_cmd->cdb_len = sizeof(request_sense);
1786         rs_cmd->data_direction = SCST_DATA_READ;
1787
1788         spin_lock_irq(&scst_list_lock);
1789         list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1790         spin_unlock_irq(&scst_list_lock);
1791
1792 out:
1793         TRACE_EXIT_RES(res);
1794         return res;
1795
1796 out_error:
1797         res = -1;
1798         goto out;
1799 #undef sbuf_size
1800 }
1801
1802 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1803 {
1804         struct scst_cmd *orig_cmd = cmd->orig_cmd;
1805         uint8_t *buf;
1806         int len;
1807
1808         TRACE_ENTRY();
1809
1810         BUG_ON(orig_cmd);
1811
1812         len = scst_get_buf_first(cmd, &buf);
1813
1814         if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1815             (!SCST_NO_SENSE(buf))) 
1816         {
1817                 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
1818                         buf, len);
1819                 memcpy(orig_cmd->sense_buffer, buf,
1820                         (sizeof(orig_cmd->sense_buffer) > len) ?
1821                                 len : sizeof(orig_cmd->sense_buffer));
1822         } else {
1823                 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1824                         "REQUEST SENSE, returning HARDWARE ERROR");
1825                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1826         }
1827
1828         scst_put_buf(cmd, buf);
1829
1830         scst_free_internal_cmd(cmd);
1831
1832         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1833         return orig_cmd;
1834 }
1835
1836 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1837 {
1838         int res = 0, rc;
1839         unsigned char type;
1840
1841         TRACE_ENTRY();
1842
1843         if (cmd->cdb[0] == REQUEST_SENSE) {
1844                 if (cmd->internal)
1845                         cmd = scst_complete_request_sense(cmd);
1846         } else if (scst_check_auto_sense(cmd)) {
1847                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1848                             "without sense data (opcode 0x%x), issuing "
1849                             "REQUEST SENSE", cmd->cdb[0]);
1850                 rc = scst_prepare_request_sense(cmd);
1851                 if (res > 0) {
1852                         *pres = rc;
1853                         res = 1;
1854                         goto out;
1855                 } else {
1856                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1857                                     "returning HARDWARE ERROR");
1858                         scst_set_cmd_error(cmd,
1859                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
1860                 }
1861         }
1862
1863         type = cmd->dev->handler->type;
1864         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1865             cmd->tgt_dev->acg_dev->rd_only_flag &&
1866             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1867              type == TYPE_TAPE))
1868         {
1869                 int32_t length;
1870                 uint8_t *address;
1871
1872                 length = scst_get_buf_first(cmd, &address);
1873                 if (length <= 0)
1874                         goto out;
1875                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1876                         address[2] |= 0x80;   /* Write Protect*/
1877                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1878                         address[3] |= 0x80;   /* Write Protect*/
1879                 scst_put_buf(cmd, address);
1880         }
1881
1882         /* 
1883          * Check and clear NormACA option for the device, if necessary,
1884          * since we don't support ACA
1885          */
1886         if ((cmd->cdb[0] == INQUIRY) &&
1887             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1888             (cmd->resp_data_len > SCST_INQ_BYTE3))
1889         {
1890                 uint8_t *buffer;
1891                 int buflen;
1892
1893                 /* ToDo: all pages ?? */
1894                 buflen = scst_get_buf_first(cmd, &buffer);
1895                 if (buflen > 0) {
1896                         if (buflen > SCST_INQ_BYTE3) {
1897 #ifdef EXTRACHECKS
1898                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1899                                         PRINT_INFO_PR("NormACA set for device: "
1900                                             "lun=%Ld, type 0x%02x", 
1901                                             (uint64_t)cmd->lun, buffer[0]);
1902                                 }
1903 #endif
1904                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1905                         } else
1906                                 scst_set_cmd_error(cmd,
1907                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1908
1909                         scst_put_buf(cmd, buffer);
1910                 }
1911         }
1912
1913         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
1914                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
1915                                                 &cmd->tgt_dev->tgt_dev_flags)) {
1916                         struct scst_tgt_dev *tgt_dev_tmp;
1917                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
1918                               (uint64_t)cmd->lun, cmd->masked_status);
1919                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
1920                                      sizeof(cmd->sense_buffer));
1921                         /* Clearing the reservation */
1922                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
1923                                             dev_tgt_dev_list_entry) {
1924                                 clear_bit(SCST_TGT_DEV_RESERVED, 
1925                                         &tgt_dev_tmp->tgt_dev_flags);
1926                         }
1927                         cmd->dev->dev_reserved = 0;
1928                 }
1929                 scst_unblock_dev(cmd->dev);
1930         }
1931         
1932         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
1933                      (cmd->cdb[0] == MODE_SELECT_10) ||
1934                      (cmd->cdb[0] == LOG_SELECT)))
1935         {
1936                 if (cmd->status == 0) {
1937                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
1938                                 "setting the SELECT UA (lun=%Ld)", 
1939                                 (uint64_t)cmd->lun);
1940                         spin_lock_bh(&scst_temp_UA_lock);
1941                         if (cmd->cdb[0] == LOG_SELECT) {
1942                                 scst_set_sense(scst_temp_UA,
1943                                         sizeof(scst_temp_UA),
1944                                         UNIT_ATTENTION, 0x2a, 0x02);
1945                         } else {
1946                                 scst_set_sense(scst_temp_UA,
1947                                         sizeof(scst_temp_UA),
1948                                         UNIT_ATTENTION, 0x2a, 0x01);
1949                         }
1950                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
1951                                 sizeof(scst_temp_UA), 1);
1952                         spin_unlock_bh(&scst_temp_UA_lock);
1953                 }
1954                 scst_unblock_dev(cmd->dev);
1955         }
1956
1957 out:
1958         TRACE_EXIT_RES(res);
1959         return res;
1960 }
1961
1962 static int scst_dev_done(struct scst_cmd *cmd)
1963 {
1964         int res = SCST_CMD_STATE_RES_CONT_SAME;
1965         int state;
1966         int atomic = scst_cmd_atomic(cmd);
1967
1968         TRACE_ENTRY();
1969
1970         if (atomic && !cmd->dev->handler->dev_done_atomic &&
1971             cmd->dev->handler->dev_done) 
1972         {
1973                 TRACE_DBG("Dev handler %s dev_done() can not be "
1974                       "called in atomic context, rescheduling to the thread",
1975                       cmd->dev->handler->name);
1976                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1977                 goto out;
1978         }
1979
1980         if (scst_done_cmd_check(cmd, &res))
1981                 goto out;
1982
1983         state = SCST_CMD_STATE_XMIT_RESP;
1984         if (likely(!scst_is_cmd_local(cmd)) && 
1985             likely(cmd->dev->handler->dev_done != NULL))
1986         {
1987                 int rc;
1988                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
1989                       cmd->dev->handler->name, cmd);
1990                 rc = cmd->dev->handler->dev_done(cmd);
1991                 TRACE_DBG("Dev handler %s dev_done() returned %d",
1992                       cmd->dev->handler->name, rc);
1993                 if (rc != SCST_CMD_STATE_DEFAULT)
1994                         state = rc;
1995         }
1996
1997         switch (state) {
1998         case SCST_CMD_STATE_REINIT:
1999                 cmd->state = state;
2000                 res = SCST_CMD_STATE_RES_RESTART;
2001                 break;
2002
2003         case SCST_CMD_STATE_DEV_PARSE:
2004         case SCST_CMD_STATE_PREPARE_SPACE:
2005         case SCST_CMD_STATE_RDY_TO_XFER:
2006         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2007         case SCST_CMD_STATE_DEV_DONE:
2008         case SCST_CMD_STATE_XMIT_RESP:
2009         case SCST_CMD_STATE_FINISHED:
2010                 cmd->state = state;
2011                 res = SCST_CMD_STATE_RES_CONT_SAME;
2012                 break;
2013
2014         case SCST_CMD_STATE_NEED_THREAD_CTX:
2015                 TRACE_DBG("Dev handler %s dev_done() requested "
2016                       "thread context, rescheduling",
2017                       cmd->dev->handler->name);
2018                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2019                 break;
2020
2021         default:
2022                 if (state >= 0) {
2023                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2024                                 "invalid cmd state %d", 
2025                                 cmd->dev->handler->name, state);
2026                 } else {
2027                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2028                                 "error %d", cmd->dev->handler->name, 
2029                                 state);
2030                 }
2031                 scst_set_cmd_error(cmd,
2032                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2033                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2034                 res = SCST_CMD_STATE_RES_CONT_SAME;
2035                 break;
2036         }
2037
2038 out:
2039         TRACE_EXIT_HRES(res);
2040         return res;
2041 }
2042
2043 static int scst_xmit_response(struct scst_cmd *cmd)
2044 {
2045         int res, rc;
2046         int atomic = scst_cmd_atomic(cmd);
2047
2048         TRACE_ENTRY();
2049
2050         /* 
2051          * Check here also in order to avoid unnecessary delays of other
2052          * commands.
2053          */
2054         if (unlikely(cmd->sent_to_midlev == 0) &&
2055             (cmd->tgt_dev != NULL))
2056         {
2057                 TRACE(TRACE_SCSI_SERIALIZING,
2058                       "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2059                 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2060                 cmd->sent_to_midlev = 1;
2061         }
2062
2063         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2064                 TRACE_DBG("%s", "xmit_response() can not be "
2065                       "called in atomic context, rescheduling to the thread");
2066                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2067                 goto out;
2068         }
2069
2070         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2071         smp_mb__after_set_bit();
2072
2073         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2074                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2075                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2076                                 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2077                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2078                 }
2079         }
2080
2081         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2082                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2083                         cmd, cmd->tag);
2084                 cmd->state = SCST_CMD_STATE_FINISHED;
2085                 res = SCST_CMD_STATE_RES_CONT_SAME;
2086                 goto out;
2087         }
2088
2089 #ifdef DEBUG_TM
2090         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2091                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2092                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2093                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2094                         goto out;
2095                 }
2096                 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2097                         cmd, cmd->tag);
2098                 schedule_timeout_uninterruptible(HZ);
2099         }
2100 #endif
2101
2102         while (1) {
2103                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2104
2105                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2106                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2107
2108                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2109
2110 #if defined(DEBUG) || defined(TRACING)
2111                 if (cmd->sg) {
2112                         int i;
2113                         struct scatterlist *sg = cmd->sg;
2114                         TRACE(TRACE_SEND_BOT, 
2115                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2116                               cmd->sg_cnt, sg, (void*)sg[0].page);
2117                         for(i = 0; i < cmd->sg_cnt; ++i) {
2118                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2119                                     "Xmitting sg:", page_address(sg[i].page),
2120                                     sg[i].length);
2121                         }
2122                 }
2123 #endif
2124
2125 #ifdef DEBUG_RETRY
2126                 if (((scst_random() % 100) == 77))
2127                         rc = SCST_TGT_RES_QUEUE_FULL;
2128                 else
2129 #endif
2130                         rc = cmd->tgtt->xmit_response(cmd);
2131                 TRACE_DBG("xmit_response() returned %d", rc);
2132
2133                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2134                         goto out;
2135
2136                 /* Restore the previous state */
2137                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2138
2139                 switch (rc) {
2140                 case SCST_TGT_RES_QUEUE_FULL:
2141                 {
2142                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2143                                 break;
2144                         else
2145                                 continue;
2146                 }
2147
2148                 case SCST_TGT_RES_NEED_THREAD_CTX:
2149                 {
2150                         TRACE_DBG("Target driver %s xmit_response() "
2151                               "requested thread context, rescheduling",
2152                               cmd->tgtt->name);
2153                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2154                         break;
2155                 }
2156
2157                 default:
2158                         goto out_error;
2159                 }
2160                 break;
2161         }
2162
2163 out:
2164         /* Caution: cmd can be already dead here */
2165         TRACE_EXIT_HRES(res);
2166         return res;
2167
2168 out_error:
2169         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2170                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2171                         "fatal error", cmd->tgtt->name);
2172         } else {
2173                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2174                         "invalid value %d", cmd->tgtt->name, rc);
2175         }
2176         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2177         cmd->state = SCST_CMD_STATE_FINISHED;
2178         res = SCST_CMD_STATE_RES_CONT_SAME;
2179         goto out;
2180 }
2181
2182 static int scst_finish_cmd(struct scst_cmd *cmd)
2183 {
2184         int res;
2185
2186         TRACE_ENTRY();
2187
2188         if (cmd->mem_checked) {
2189                 spin_lock_bh(&scst_cmd_mem_lock);
2190                 scst_cur_cmd_mem -= cmd->bufflen;
2191                 spin_unlock_bh(&scst_cmd_mem_lock);
2192         }
2193
2194         spin_lock_irq(&scst_list_lock);
2195
2196         TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2197         list_del(&cmd->cmd_list_entry);
2198
2199         if (cmd->mgmt_cmnd)
2200                 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2201
2202         if (likely(cmd->tgt_dev != NULL))
2203                 cmd->tgt_dev->cmd_count--;
2204
2205         cmd->sess->sess_cmd_count--;
2206
2207         list_del(&cmd->search_cmd_list_entry);
2208
2209         spin_unlock_irq(&scst_list_lock);
2210
2211         scst_free_cmd(cmd);
2212
2213         res = SCST_CMD_STATE_RES_CONT_NEXT;
2214
2215         TRACE_EXIT_HRES(res);
2216         return res;
2217 }
2218
2219 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2220 {
2221         int res = 0;
2222         unsigned long flags;
2223         int context;
2224
2225         TRACE_ENTRY();
2226
2227         BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2228
2229         if (in_irq())
2230                 context = SCST_CONTEXT_TASKLET;
2231         else
2232                 context = scst_get_context();
2233
2234         TRACE_DBG("Context: %d", context);
2235         cmd->non_atomic_only = 0;
2236         cmd->state = SCST_CMD_STATE_FINISHED;
2237
2238         switch (context) {
2239         case SCST_CONTEXT_DIRECT:
2240         case SCST_CONTEXT_DIRECT_ATOMIC:
2241                 flags = 0;
2242                 scst_check_retries(cmd->tgt, 0);
2243                 res = __scst_process_active_cmd(cmd, context, 0);
2244                 BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
2245                 break;
2246
2247         case SCST_CONTEXT_TASKLET:
2248         {
2249                 spin_lock_irqsave(&scst_list_lock, flags);
2250                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2251                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2252                 spin_unlock_irqrestore(&scst_list_lock, flags);
2253                 scst_schedule_tasklet();
2254                 scst_check_retries(cmd->tgt, 0);
2255                 break;
2256         }
2257
2258         default:
2259                 BUG();
2260                 break;
2261         }
2262
2263         TRACE_EXIT();
2264         return;
2265 }
2266
2267 /*
2268  * Returns 0 on success, > 0 when we need to wait for unblock,
2269  * < 0 if there is no device (lun) or device type handler.
2270  * Called under scst_list_lock and IRQs disabled
2271  */
2272 static int scst_translate_lun(struct scst_cmd *cmd)
2273 {
2274         struct scst_tgt_dev *tgt_dev = NULL;
2275         int res = 0;
2276
2277         TRACE_ENTRY();
2278
2279         scst_inc_cmd_count();   
2280
2281         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2282                 res = -1;
2283                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2284                       (uint64_t)cmd->lun);
2285                 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2286                                     sess_tgt_dev_list_entry) 
2287                 {
2288                         if (tgt_dev->acg_dev->lun == cmd->lun) {
2289                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2290
2291                                 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2292                                         PRINT_INFO_PR("Dev handler for device "
2293                                           "%Ld is NULL, the device will not be "
2294                                           "visible remotely", (uint64_t)cmd->lun);
2295                                         break;
2296                                 }
2297                                 
2298                                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2299                                         cmd->tgt_dev_saved->cmd_count--;
2300                                         TRACE(TRACE_SCSI_SERIALIZING,
2301                                               "SCST_CMD_STATE_REINIT: "
2302                                               "incrementing expected_sn on tgt_dev_saved %p",
2303                                               cmd->tgt_dev_saved);
2304                                         scst_inc_expected_sn_unblock(
2305                                                 cmd->tgt_dev_saved, cmd, 1);
2306                                 }
2307                                 cmd->tgt_dev = tgt_dev;
2308                                 tgt_dev->cmd_count++;
2309                                 cmd->dev = tgt_dev->acg_dev->dev;
2310
2311                                 /* ToDo: cmd->queue_type */
2312
2313                                 /* scst_list_lock is enough to protect that */
2314                                 cmd->sn = tgt_dev->next_sn;
2315                                 tgt_dev->next_sn++;
2316
2317                                 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2318                                         "cmd->sn: %d", cmd->sn);
2319
2320                                 res = 0;
2321                                 break;
2322                         }
2323                 }
2324                 if (res != 0) {
2325                         TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2326                                 "unexisting LU?", (uint64_t)cmd->lun);
2327                         scst_dec_cmd_count();
2328                 }
2329         } else {
2330                 if ( !cmd->sess->waiting) {
2331                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2332                               cmd->sess);
2333                         list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2334                                       &scst_dev_wait_sess_list);
2335                         cmd->sess->waiting = 1;
2336                 }
2337                 scst_dec_cmd_count();
2338                 res = 1;
2339         }
2340
2341         TRACE_EXIT_RES(res);
2342         return res;
2343 }
2344
2345 /* Called under scst_list_lock and IRQs disabled */
2346 static int scst_process_init_cmd(struct scst_cmd *cmd)
2347 {
2348         int res = 0;
2349
2350         TRACE_ENTRY();
2351
2352         res = scst_translate_lun(cmd);
2353         if (likely(res == 0)) {
2354                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2355                 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2356                         TRACE(TRACE_RETRY, "Too many pending commands in "
2357                                 "session, returning BUSY to initiator \"%s\"",
2358                                 (cmd->sess->initiator_name[0] == '\0') ?
2359                                   "Anonymous" : cmd->sess->initiator_name);
2360                         scst_set_busy(cmd);
2361                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2362                 }
2363                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2364                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2365         } else if (res < 0) {
2366                 TRACE_DBG("Finishing cmd %p", cmd);
2367                 scst_set_cmd_error(cmd,
2368                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2369                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2370                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2371                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2372         }
2373
2374         TRACE_EXIT_RES(res);
2375         return res;
2376 }
2377
2378 /* 
2379  * Called under scst_list_lock and IRQs disabled
2380  * We don't drop it anywhere inside, because command execution
2381  * have to be serialized, i.e. commands must be executed in order
2382  * of their arrival, and we set this order inside scst_translate_lun().
2383  */
2384 static int scst_do_job_init(struct list_head *init_cmd_list)
2385 {
2386         int res = 1;
2387
2388         TRACE_ENTRY();
2389
2390         if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2391                 while (!list_empty(init_cmd_list)) {
2392                         struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2393                                                           typeof(*cmd),
2394                                                           cmd_list_entry);
2395                         res = scst_process_init_cmd(cmd);
2396                         if (res > 0)
2397                                 break;
2398                 }
2399         }
2400
2401         TRACE_EXIT_RES(res);
2402         return res;
2403 }
2404
2405 /* Called with no locks held */
2406 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2407         int left_locked)
2408 {
2409         int res;
2410
2411         TRACE_ENTRY();
2412
2413         BUG_ON(in_irq());
2414
2415         cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2416                         SCST_CONTEXT_DIRECT_ATOMIC);
2417         cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2418
2419         do {
2420                 switch (cmd->state) {
2421                 case SCST_CMD_STATE_DEV_PARSE:
2422                         res = scst_parse_cmd(cmd);
2423                         break;
2424
2425                 case SCST_CMD_STATE_PREPARE_SPACE:
2426                         res = scst_prepare_space(cmd);
2427                         break;
2428
2429                 case SCST_CMD_STATE_RDY_TO_XFER:
2430                         res = scst_rdy_to_xfer(cmd);
2431                         break;
2432
2433                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2434                         res = scst_send_to_midlev(cmd);
2435                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2436                         break;
2437
2438                 case SCST_CMD_STATE_DEV_DONE:
2439                         res = scst_dev_done(cmd);
2440                         break;
2441
2442                 case SCST_CMD_STATE_XMIT_RESP:
2443                         res = scst_xmit_response(cmd);
2444                         break;
2445
2446                 case SCST_CMD_STATE_FINISHED:
2447                         res = scst_finish_cmd(cmd);
2448                         break;
2449
2450                 default:
2451                         PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2452                                cmd, cmd->state);
2453                         BUG();
2454                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2455                         break;
2456                 }
2457         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2458
2459         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2460                 if (left_locked)
2461                         spin_lock_irq(&scst_list_lock);
2462         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2463                 spin_lock_irq(&scst_list_lock);
2464
2465                 switch (cmd->state) {
2466                 case SCST_CMD_STATE_DEV_PARSE:
2467                 case SCST_CMD_STATE_PREPARE_SPACE:
2468                 case SCST_CMD_STATE_RDY_TO_XFER:
2469                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2470                 case SCST_CMD_STATE_DEV_DONE:
2471                 case SCST_CMD_STATE_XMIT_RESP:
2472                 case SCST_CMD_STATE_FINISHED:
2473                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2474                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2475                         break;
2476 #ifdef EXTRACHECKS
2477                 /* not very valid commands */
2478                 case SCST_CMD_STATE_DEFAULT:
2479                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2480                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2481                                 "useful list (left on scst cmd list)", cmd, 
2482                                 cmd->state);
2483                         spin_unlock_irq(&scst_list_lock);
2484                         BUG();
2485                         spin_lock_irq(&scst_list_lock);
2486                         break;
2487 #endif
2488                 default:
2489                         break;
2490                 }
2491                 cmd->non_atomic_only = 1;
2492                 if (!left_locked)
2493                         spin_unlock_irq(&scst_list_lock);
2494                 wake_up(&scst_list_waitQ);
2495         } else if (res == SCST_CMD_STATE_RES_RESTART) {
2496                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2497                         spin_lock_irq(&scst_list_lock);
2498                         TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2499                         list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2500                         if (!left_locked)
2501                                 spin_unlock_irq(&scst_list_lock);
2502                 } else
2503                         BUG();
2504         } else
2505                 BUG();
2506
2507         TRACE_EXIT_RES(res);
2508         return res;
2509 }
2510
2511 /* Called under scst_list_lock and IRQs disabled */
2512 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2513 {
2514         int res;
2515         struct scst_cmd *cmd;
2516         int atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2517                         SCST_CONTEXT_DIRECT_ATOMIC);
2518
2519         TRACE_ENTRY();
2520
2521         tm_dbg_check_released_cmds();
2522
2523 restart:
2524         list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2525                 if (atomic && cmd->non_atomic_only) {
2526                         TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2527                         continue;
2528                 }
2529                 if (tm_dbg_check_cmd(cmd) != 0)
2530                         goto restart;
2531                 res = scst_process_active_cmd(cmd, context, NULL, 1);
2532                 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2533                         goto restart;
2534                 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2535                         goto restart;
2536                 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2537                         break;
2538                 } else
2539                         BUG();
2540         }
2541
2542         TRACE_EXIT();
2543         return;
2544 }
2545
2546 static inline int test_cmd_lists(void)
2547 {
2548         int res = !list_empty(&scst_active_cmd_list) ||
2549             (!list_empty(&scst_init_cmd_list) &&
2550              !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2551             test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2552             unlikely(scst_shut_threads_count > 0) ||
2553             tm_dbg_is_release();
2554         return res;
2555 }
2556
2557 int scst_cmd_thread(void *arg)
2558 {
2559         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2560         int n;
2561
2562         TRACE_ENTRY();
2563
2564         spin_lock(&lock);
2565         n = scst_thread_num++;
2566         spin_unlock(&lock);
2567         daemonize("scsi_tgt%d", n);
2568         recalc_sigpending();
2569         set_user_nice(current, 10);
2570         current->flags |= PF_NOFREEZE;
2571
2572         spin_lock_irq(&scst_list_lock);
2573         while (1) {
2574                 wait_queue_t wait;
2575                 init_waitqueue_entry(&wait, current);
2576
2577                 if (!test_cmd_lists()) {
2578                         add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2579                         for (;;) {
2580                                 set_current_state(TASK_INTERRUPTIBLE);
2581                                 if (test_cmd_lists())
2582                                         break;
2583                                 spin_unlock_irq(&scst_list_lock);
2584                                 schedule();
2585                                 spin_lock_irq(&scst_list_lock);
2586                         }
2587                         set_current_state(TASK_RUNNING);
2588                         remove_wait_queue(&scst_list_waitQ, &wait);
2589                 }
2590
2591                 scst_do_job_init(&scst_init_cmd_list);
2592                 scst_do_job_active(&scst_active_cmd_list,
2593                                    SCST_CONTEXT_THREAD|SCST_PROCESSIBLE_ENV);
2594
2595                 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2596                     list_empty(&scst_cmd_list) &&
2597                     list_empty(&scst_active_cmd_list) &&
2598                     list_empty(&scst_init_cmd_list)) {
2599                         break;
2600                 }
2601                 
2602                 if (unlikely(scst_shut_threads_count > 0)) {
2603                         scst_shut_threads_count--;
2604                         break;
2605                 }
2606         }
2607         spin_unlock_irq(&scst_list_lock);
2608
2609         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2610                 smp_mb__after_atomic_dec();
2611                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2612                 up(scst_shutdown_mutex);
2613         }
2614
2615         TRACE_EXIT();
2616         return 0;
2617 }
2618
2619 void scst_cmd_tasklet(long p)
2620 {
2621         TRACE_ENTRY();
2622
2623         spin_lock_irq(&scst_list_lock);
2624
2625         scst_do_job_init(&scst_init_cmd_list);
2626         scst_do_job_active(&scst_active_cmd_list, 
2627                 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2628
2629         spin_unlock_irq(&scst_list_lock);
2630
2631         TRACE_EXIT();
2632         return;
2633 }
2634
2635 /*
2636  * Returns 0 on success, < 0 if there is no device handler or
2637  * > 0 if SCST_FLAG_SUSPENDED set.
2638  */
2639 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2640 {
2641         struct scst_tgt_dev *tgt_dev = NULL;
2642         int res = -1;
2643
2644         TRACE_ENTRY();
2645
2646         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2647               (uint64_t)mcmd->lun);
2648
2649         spin_lock_irq(&scst_list_lock);
2650         scst_inc_cmd_count();   
2651         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2652                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2653                                     sess_tgt_dev_list_entry) 
2654                 {
2655                         if (tgt_dev->acg_dev->lun == mcmd->lun) {
2656                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2657                                 mcmd->mcmd_tgt_dev = tgt_dev;
2658                                 res = 0;
2659                                 break;
2660                         }
2661                 }
2662                 if (mcmd->mcmd_tgt_dev == NULL)
2663                         scst_dec_cmd_count();
2664         } else {
2665                 if ( !mcmd->sess->waiting) {
2666                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2667                               mcmd->sess);
2668                         list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2669                                       &scst_dev_wait_sess_list);
2670                         mcmd->sess->waiting = 1;
2671                 }
2672                 scst_dec_cmd_count();
2673                 res = 1;
2674         }
2675         spin_unlock_irq(&scst_list_lock);
2676
2677         TRACE_EXIT_HRES(res);
2678         return res;
2679 }
2680
2681 /* Called under scst_list_lock and IRQ off */
2682 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2683         struct scst_mgmt_cmd *mcmd)
2684 {
2685         TRACE_ENTRY();
2686
2687         TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2688                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2689                 mcmd->cmd_wait_count);
2690
2691         cmd->mgmt_cmnd = NULL;
2692
2693         if (cmd->completed)
2694                 mcmd->completed_cmd_count++;
2695
2696         mcmd->cmd_wait_count--;
2697         if (mcmd->cmd_wait_count > 0) {
2698                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2699                         mcmd->cmd_wait_count);
2700                 goto out;
2701         }
2702
2703         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2704
2705         if (mcmd->completed) {
2706                 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2707                         mcmd);
2708                 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2709                         &scst_active_mgmt_cmd_list);
2710         }
2711
2712         wake_up(&scst_mgmt_cmd_list_waitQ);
2713
2714 out:
2715         TRACE_EXIT();
2716         return;
2717 }
2718
2719 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2720         struct scst_tgt_dev *tgt_dev, int set_status)
2721 {
2722         int res = SCST_DEV_TM_NOT_COMPLETED;
2723         if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2724                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2725                       tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2726                 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd, 
2727                         tgt_dev);
2728                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2729                       tgt_dev->acg_dev->dev->handler->name, res);
2730                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2731                         mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ? 
2732                                                 SCST_MGMT_STATUS_SUCCESS :
2733                                                 SCST_MGMT_STATUS_FAILED;
2734                 }
2735         }
2736         return res;
2737 }
2738
2739 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2740 {
2741         switch(mgmt_fn) {
2742                 case SCST_ABORT_TASK:
2743                 case SCST_ABORT_TASK_SET:
2744                 case SCST_CLEAR_TASK_SET:
2745                         return 1;
2746                 default:
2747                         return 0;
2748         }
2749 }
2750
2751 /* 
2752  * Called under scst_list_lock and IRQ off (to protect cmd
2753  * from being destroyed).
2754  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2755  */
2756 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2757         int other_ini, int call_dev_task_mgmt_fn)
2758 {
2759         TRACE_ENTRY();
2760
2761         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2762
2763         if (other_ini) {
2764                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2765                 smp_mb__after_set_bit();
2766         }
2767         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2768         smp_mb__after_set_bit();
2769
2770         if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2771                  scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2772
2773         if (mcmd) {
2774                 int defer;
2775                 if (cmd->tgtt->tm_sync_reply)
2776                         defer = 1;
2777                 else {
2778                         if (scst_is_strict_mgmt_fn(mcmd->fn))
2779                                 defer = test_bit(SCST_CMD_EXECUTING,
2780                                         &cmd->cmd_flags);
2781                         else
2782                                 defer = test_bit(SCST_CMD_XMITTING,
2783                                         &cmd->cmd_flags);
2784                 }
2785
2786                 if (defer) {
2787                         /*
2788                          * Delay the response until the command's finish in
2789                          * order to guarantee that "no further responses from
2790                          * the task are sent to the SCSI initiator port" after
2791                          * response from the TM function is sent (SAM)
2792                          */
2793                         TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2794                                 "xmitted (state %d), deferring ABORT...", cmd,
2795                                 cmd->tag, cmd->state);
2796 #ifdef EXTRACHECKS
2797                         if (cmd->mgmt_cmnd) {
2798                                 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2799                                         "has non-NULL mgmt_cmnd %p!!! Current "
2800                                         "mcmd %p\n", cmd, cmd->tag, cmd->state,
2801                                         cmd->mgmt_cmnd, mcmd);
2802                         }
2803 #endif
2804                         BUG_ON(cmd->mgmt_cmnd);
2805                         mcmd->cmd_wait_count++;
2806                         cmd->mgmt_cmnd = mcmd;
2807                 }
2808         }
2809
2810         tm_dbg_release_cmd(cmd);
2811
2812         TRACE_EXIT();
2813         return;
2814 }
2815
2816 /* Called under scst_list_lock and IRQ off */
2817 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2818 {
2819         int res;
2820         if (mcmd->cmd_wait_count != 0) {
2821                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2822                         "wait", mcmd->cmd_wait_count);
2823                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2824                 res = -1;
2825         } else {
2826                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2827                 res = 0;
2828         }
2829         mcmd->completed = 1;
2830         return res;
2831 }
2832
2833 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2834 {
2835         struct scst_device *dev;
2836         int wake = 0;
2837
2838         TRACE_ENTRY();
2839
2840         if (!scst_mutex_held)
2841                 down(&scst_mutex);
2842
2843         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2844                 struct scst_cmd *cmd, *tcmd;
2845                 spin_lock_bh(&dev->dev_lock);
2846                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2847                                         blocked_cmd_list_entry) {
2848                         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2849                                 list_del(&cmd->blocked_cmd_list_entry);
2850                                 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2851                                         "to active cmd list", cmd);
2852                                 spin_lock_irq(&scst_list_lock);
2853                                 list_move_tail(&cmd->cmd_list_entry,
2854                                         &scst_active_cmd_list);
2855                                 spin_unlock_irq(&scst_list_lock);
2856                                 wake = 1;
2857                         }
2858                 }
2859                 spin_unlock_bh(&dev->dev_lock);
2860         }
2861
2862         if (!scst_mutex_held)
2863                 up(&scst_mutex);
2864
2865         if (wake)
2866                 wake_up(&scst_list_waitQ);
2867
2868         TRACE_EXIT();
2869         return;
2870 }
2871
2872 /* Returns 0 if the command processing should be continued, <0 otherwise */
2873 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2874         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2875 {
2876         struct scst_cmd *cmd;
2877         struct scst_session *sess = tgt_dev->sess;
2878
2879         TRACE_ENTRY();
2880
2881         spin_lock_irq(&scst_list_lock);
2882
2883         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2884         list_for_each_entry(cmd, &sess->search_cmd_list, 
2885                         search_cmd_list_entry) {
2886                 if ((cmd->tgt_dev == NULL) && 
2887                     (cmd->lun == tgt_dev->acg_dev->lun))
2888                         continue;
2889                 if (cmd->tgt_dev != tgt_dev)
2890                         continue;
2891                 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2892         }
2893         spin_unlock_irq(&scst_list_lock);
2894
2895         scst_unblock_aborted_cmds(scst_mutex_held);
2896
2897         TRACE_EXIT();
2898         return;
2899 }
2900
2901 /* Returns 0 if the command processing should be continued, <0 otherwise */
2902 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2903 {
2904         int res;
2905         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2906         struct scst_device *dev = tgt_dev->acg_dev->dev;
2907
2908         TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2909                 tgt_dev->acg_dev->lun, mcmd);
2910
2911         spin_lock_bh(&dev->dev_lock);
2912         __scst_block_dev(dev);
2913         spin_unlock_bh(&dev->dev_lock);
2914
2915         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2916         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2917
2918         res = scst_set_mcmd_next_state(mcmd);
2919
2920         TRACE_EXIT_RES(res);
2921         return res;
2922 }
2923
2924 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
2925 {
2926         /*
2927          * No need for special protection for SCST_FLAG_TM_ACTIVE, since
2928          * we could be called from the only thread.
2929          */
2930         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
2931                 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
2932                         mcmd);
2933                 if (!locked)
2934                         spin_lock_irq(&scst_list_lock);
2935                 list_move_tail(&mcmd->mgmt_cmd_list_entry, 
2936                         &scst_delayed_mgmt_cmd_list);
2937                 if (!locked)
2938                         spin_unlock_irq(&scst_list_lock);
2939                 return -1;
2940         } else {
2941                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
2942                 return 0;
2943         }
2944 }
2945
2946 /* Returns 0 if the command processing should be continued, 
2947  * >0, if it should be requeued, <0 otherwise */
2948 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
2949 {
2950         int res = 0;
2951
2952         TRACE_ENTRY();
2953
2954         res = scst_check_delay_mgmt_cmd(mcmd, 1);
2955         if (res != 0)
2956                 goto out;
2957
2958         if (mcmd->fn == SCST_ABORT_TASK) {
2959                 struct scst_session *sess = mcmd->sess;
2960                 struct scst_cmd *cmd;
2961
2962                 spin_lock_irq(&scst_list_lock);
2963                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
2964                 if (cmd == NULL) {
2965                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
2966                                 "tag %d not found", mcmd->tag);
2967                         mcmd->status = SCST_MGMT_STATUS_FAILED;
2968                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2969                 } else {
2970                         TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
2971                                 "aborting it", cmd, mcmd->tag, cmd->sn);
2972                         mcmd->cmd_to_abort = cmd;
2973                         scst_abort_cmd(cmd, mcmd, 0, 1);
2974                         res = scst_set_mcmd_next_state(mcmd);
2975                         mcmd->cmd_to_abort = NULL; /* just in case */
2976                 }
2977                 spin_unlock_irq(&scst_list_lock);
2978         } else {
2979                 int rc;
2980                 rc = scst_mgmt_translate_lun(mcmd);
2981                 if (rc < 0) {
2982                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
2983                                 "found", (uint64_t)mcmd->lun);
2984                         mcmd->status = SCST_MGMT_STATUS_FAILED;
2985                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2986                 } else if (rc == 0)
2987                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
2988                 else
2989                         res = rc;
2990         }
2991
2992 out:
2993         TRACE_EXIT_RES(res);
2994         return res;
2995 }
2996
2997 /* Returns 0 if the command processing should be continued, <0 otherwise */
2998 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
2999 {
3000         int res, rc;
3001         struct scst_device *dev, *d;
3002         struct scst_tgt_dev *tgt_dev;
3003         int cont, c;
3004         LIST_HEAD(host_devs);
3005
3006         TRACE_ENTRY();
3007
3008         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3009                 mcmd, mcmd->sess->sess_cmd_count);
3010
3011         down(&scst_mutex);
3012
3013         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3014                 int found = 0;
3015
3016                 spin_lock_bh(&dev->dev_lock);
3017                 __scst_block_dev(dev);
3018                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3019                 spin_unlock_bh(&dev->dev_lock);
3020
3021                 cont = 0;
3022                 c = 0;
3023                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3024                         dev_tgt_dev_list_entry) 
3025                 {
3026                         cont = 1;
3027                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3028                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3029                                 c = 1;
3030                         else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3031                                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3032                 }
3033                 if (cont && !c)
3034                         continue;
3035                 
3036                 if (dev->scsi_dev == NULL)
3037                         continue;
3038
3039                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3040                         if (dev->scsi_dev->host->host_no ==
3041                                     d->scsi_dev->host->host_no) 
3042                         {
3043                                 found = 1;
3044                                 break;
3045                         }
3046                 }
3047                 if (!found)
3048                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3049         }
3050
3051         /*
3052          * We suppose here that for all commands that already on devices
3053          * on/after scsi_reset_provider() completion callbacks will be called.
3054          */
3055
3056         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3057                 /* dev->scsi_dev must be non-NULL here */
3058                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3059                       dev->scsi_dev->host->host_no);
3060                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3061                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3062                       dev->scsi_dev->host->host_no,
3063                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3064                 if (rc != SUCCESS) {
3065                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3066                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3067                 }
3068         }
3069
3070         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3071                 if (dev->scsi_dev != NULL)
3072                         dev->scsi_dev->was_reset = 0;
3073         }
3074
3075         up(&scst_mutex);
3076
3077         spin_lock_irq(&scst_list_lock);
3078         tm_dbg_task_mgmt("TARGET RESET");
3079         res = scst_set_mcmd_next_state(mcmd);
3080         spin_unlock_irq(&scst_list_lock);
3081
3082         TRACE_EXIT_RES(res);
3083         return res;
3084 }
3085
3086 /* Returns 0 if the command processing should be continued, <0 otherwise */
3087 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3088 {
3089         int res, rc;
3090         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3091         struct scst_device *dev = tgt_dev->acg_dev->dev;
3092
3093         TRACE_ENTRY();
3094
3095         TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3096                 mcmd);
3097
3098         spin_lock_bh(&dev->dev_lock);
3099         __scst_block_dev(dev);
3100         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3101         spin_unlock_bh(&dev->dev_lock);
3102
3103         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3104         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3105                 goto out_tm_dbg;
3106
3107         if (dev->scsi_dev != NULL) {
3108                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3109                       dev->scsi_dev->host->host_no);
3110                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3111                 if (rc != SUCCESS)
3112                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3113                 dev->scsi_dev->was_reset = 0;
3114         }
3115
3116 out_tm_dbg:
3117         spin_lock_irq(&scst_list_lock);
3118         tm_dbg_task_mgmt("LUN RESET");
3119         res = scst_set_mcmd_next_state(mcmd);
3120         spin_unlock_irq(&scst_list_lock);
3121
3122         TRACE_EXIT_RES(res);
3123         return res;
3124 }
3125
3126 /* Returns 0 if the command processing should be continued, <0 otherwise */
3127 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3128         int nexus_loss)
3129 {
3130         int res;
3131         struct scst_session *sess = mcmd->sess;
3132         struct scst_tgt_dev *tgt_dev;
3133
3134         TRACE_ENTRY();
3135
3136         if (nexus_loss) {
3137                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3138                         mcmd);
3139         } else {
3140                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3141                         mcmd);
3142         }
3143
3144         down(&scst_mutex);
3145         list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3146                 sess_tgt_dev_list_entry) 
3147         {
3148                 struct scst_device *dev = tgt_dev->acg_dev->dev;
3149                 int rc;
3150
3151                 spin_lock_bh(&dev->dev_lock);
3152                 __scst_block_dev(dev);
3153                 spin_unlock_bh(&dev->dev_lock);
3154
3155                 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3156                 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3157                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3158
3159                 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3160                 if (nexus_loss)
3161                         scst_reset_tgt_dev(tgt_dev, 1);
3162         }
3163         up(&scst_mutex);
3164
3165         spin_lock_irq(&scst_list_lock);
3166         res = scst_set_mcmd_next_state(mcmd);
3167         spin_unlock_irq(&scst_list_lock);
3168
3169         TRACE_EXIT_RES(res);
3170         return res;
3171 }
3172
3173 /* Returns 0 if the command processing should be continued, <0 otherwise */
3174 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3175         int nexus_loss)
3176 {
3177         int res;
3178         struct scst_tgt *tgt = mcmd->sess->tgt;
3179         struct scst_session *sess;
3180         struct scst_device *dev;
3181         struct scst_tgt_dev *tgt_dev;
3182
3183         TRACE_ENTRY();
3184
3185         if (nexus_loss) {
3186                 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3187                         mcmd);
3188         } else {
3189                 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3190                         mcmd);
3191         }
3192
3193         down(&scst_mutex);
3194
3195         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3196                 spin_lock_bh(&dev->dev_lock);
3197                 __scst_block_dev(dev);
3198                 spin_unlock_bh(&dev->dev_lock);
3199         }
3200
3201         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3202                 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3203                         sess_tgt_dev_list_entry) 
3204                 {
3205                         int rc;
3206
3207                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3208                         if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3209                                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3210
3211                         __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3212                         if (nexus_loss)
3213                                 scst_reset_tgt_dev(tgt_dev, 1);
3214                 }
3215         }
3216
3217         up(&scst_mutex);
3218
3219         spin_lock_irq(&scst_list_lock);
3220         res = scst_set_mcmd_next_state(mcmd);
3221         spin_unlock_irq(&scst_list_lock);
3222
3223         TRACE_EXIT_RES(res);
3224         return res;
3225 }
3226
3227 /* Returns 0 if the command processing should be continued, <0 otherwise */
3228 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3229 {
3230         int res = 0;
3231
3232         TRACE_ENTRY();
3233
3234         mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3235
3236         switch (mcmd->fn) {
3237         case SCST_ABORT_TASK_SET:
3238         case SCST_CLEAR_TASK_SET:
3239                 res = scst_abort_task_set(mcmd);
3240                 break;
3241
3242         case SCST_LUN_RESET:
3243                 res = scst_lun_reset(mcmd);
3244                 break;
3245
3246         case SCST_TARGET_RESET:
3247                 res = scst_target_reset(mcmd);
3248                 break;
3249
3250         case SCST_ABORT_ALL_TASKS_SESS:
3251                 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3252                 break;
3253
3254         case SCST_NEXUS_LOSS_SESS:
3255                 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3256                 break;
3257
3258         case SCST_ABORT_ALL_TASKS:
3259                 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3260                 break;
3261
3262         case SCST_NEXUS_LOSS:
3263                 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3264                 break;
3265
3266         case SCST_CLEAR_ACA:
3267                 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3268                 /* Nothing to do (yet) */
3269                 break;
3270
3271         default:
3272                 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3273                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3274                 break;
3275         }
3276
3277         TRACE_EXIT_RES(res);
3278         return res;
3279 }
3280
3281 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3282 {
3283         struct scst_device *dev;
3284         struct scst_tgt_dev *tgt_dev;
3285
3286         TRACE_ENTRY();
3287
3288         clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3289         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3290                 struct scst_mgmt_cmd *m;
3291                 spin_lock_irq(&scst_list_lock);
3292                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3293                                 mgmt_cmd_list_entry);
3294                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3295                         "cmd list", m);
3296                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3297                 spin_unlock_irq(&scst_list_lock);
3298         }
3299
3300         mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3301         if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3302                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3303
3304         if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3305                 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3306                       mcmd->sess->tgt->tgtt->name);
3307                 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3308                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3309                       mcmd->sess->tgt->tgtt->name);
3310         }
3311
3312         switch (mcmd->fn) {
3313         case SCST_ABORT_TASK_SET:
3314         case SCST_CLEAR_TASK_SET:
3315         case SCST_LUN_RESET:
3316                 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3317                 break;
3318
3319         case SCST_TARGET_RESET:
3320         case SCST_ABORT_ALL_TASKS:
3321         case SCST_NEXUS_LOSS:
3322                 down(&scst_mutex);
3323                 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3324                         scst_unblock_dev(dev);
3325                 }
3326                 up(&scst_mutex);
3327                 break;
3328
3329         case SCST_NEXUS_LOSS_SESS:
3330         case SCST_ABORT_ALL_TASKS_SESS:
3331                 down(&scst_mutex);
3332                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3333                                 sess_tgt_dev_list_entry) {
3334                         scst_unblock_dev(tgt_dev->acg_dev->dev);
3335                 }
3336                 up(&scst_mutex);
3337                 break;
3338
3339         case SCST_CLEAR_ACA:
3340         default:
3341                 break;
3342         }
3343
3344         mcmd->tgt_specific = NULL;
3345
3346         TRACE_EXIT();
3347         return;
3348 }
3349
3350 /* Returns >0, if cmd should be requeued */
3351 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3352 {
3353         int res = 0;
3354
3355         TRACE_ENTRY();
3356
3357         TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3358
3359         while (1) {
3360                 switch (mcmd->state) {
3361                 case SCST_MGMT_CMD_STATE_INIT:
3362                         res = scst_mgmt_cmd_init(mcmd);
3363                         if (res)
3364                                 goto out;
3365                         break;
3366
3367                 case SCST_MGMT_CMD_STATE_READY:
3368                         if (scst_mgmt_cmd_exec(mcmd))
3369                                 goto out;
3370                         break;
3371
3372                 case SCST_MGMT_CMD_STATE_DONE:
3373                         scst_mgmt_cmd_send_done(mcmd);
3374                         break;
3375
3376                 case SCST_MGMT_CMD_STATE_FINISHED:
3377                         goto out_free;
3378
3379 #ifdef EXTRACHECKS
3380                 case SCST_MGMT_CMD_STATE_EXECUTING:
3381                         BUG();
3382 #endif
3383
3384                 default:
3385                         PRINT_ERROR_PR("Unknown state %d of management command",
3386                                     mcmd->state);
3387                         res = -1;
3388                         goto out_free;
3389                 }
3390         }
3391
3392 out:
3393         TRACE_EXIT_RES(res);
3394         return res;
3395
3396 out_free:
3397         scst_free_mgmt_cmd(mcmd, 1);
3398         goto out;
3399 }
3400
3401 static inline int test_mgmt_cmd_list(void)
3402 {
3403         int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3404                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3405                   test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3406         return res;
3407 }
3408
3409 int scst_mgmt_cmd_thread(void *arg)
3410 {
3411         struct scst_mgmt_cmd *mcmd;
3412
3413         TRACE_ENTRY();
3414
3415         daemonize("scsi_tgt_mc");
3416         recalc_sigpending();
3417         current->flags |= PF_NOFREEZE;
3418
3419         spin_lock_irq(&scst_list_lock);
3420         while (1) {
3421                 wait_queue_t wait;
3422                 init_waitqueue_entry(&wait, current);
3423
3424                 if (!test_mgmt_cmd_list()) {
3425                         add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3426                                                  &wait);
3427                         for (;;) {
3428                                 set_current_state(TASK_INTERRUPTIBLE);
3429                                 if (test_mgmt_cmd_list())
3430                                         break;
3431                                 spin_unlock_irq(&scst_list_lock);
3432                                 schedule();
3433                                 spin_lock_irq(&scst_list_lock);
3434                         }
3435                         set_current_state(TASK_RUNNING);
3436                         remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3437                 }
3438
3439                 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3440                        !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3441                 {
3442                         int rc;
3443                         mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3444                                           typeof(*mcmd), mgmt_cmd_list_entry);
3445                         TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3446                               mcmd);
3447                         list_move_tail(&mcmd->mgmt_cmd_list_entry,
3448                                        &scst_mgmt_cmd_list);
3449                         spin_unlock_irq(&scst_list_lock);
3450                         rc = scst_process_mgmt_cmd(mcmd);
3451                         spin_lock_irq(&scst_list_lock);
3452                         if (rc > 0) {
3453                                 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3454                                         "of active mgmt cmd list", mcmd);
3455                                 list_move(&mcmd->mgmt_cmd_list_entry,
3456                                        &scst_active_mgmt_cmd_list);
3457                         }
3458                 }
3459
3460                 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3461                     list_empty(&scst_active_mgmt_cmd_list)) 
3462                 {
3463                         break;
3464                 }
3465         }
3466         spin_unlock_irq(&scst_list_lock);
3467
3468         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3469                 smp_mb__after_atomic_dec();
3470                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3471                 up(scst_shutdown_mutex);
3472         }
3473
3474         TRACE_EXIT();
3475         return 0;
3476 }
3477
3478 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3479         *sess, int fn, int atomic, void *tgt_specific)
3480 {
3481         struct scst_mgmt_cmd *mcmd = NULL;
3482
3483         TRACE_ENTRY();
3484
3485         if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3486                 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3487                             "(target %s)", sess->tgt->tgtt->name);
3488                 goto out;
3489         }
3490
3491         mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3492         if (mcmd == NULL)
3493                 goto out;
3494
3495         mcmd->sess = sess;
3496         mcmd->fn = fn;
3497         mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3498         mcmd->tgt_specific = tgt_specific;
3499
3500 out:
3501         TRACE_EXIT();
3502         return mcmd;
3503 }
3504
3505 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3506         struct scst_mgmt_cmd *mcmd)
3507 {
3508         unsigned long flags;
3509         int res = 0;
3510
3511         TRACE_ENTRY();
3512
3513         scst_sess_get(sess);
3514
3515         spin_lock_irqsave(&scst_list_lock, flags);
3516
3517         sess->sess_cmd_count++;
3518
3519 #ifdef EXTRACHECKS
3520         if (unlikely(sess->shutting_down)) {
3521                 PRINT_ERROR_PR("%s",
3522                         "New mgmt cmd while shutting down the session");
3523                 BUG();
3524         }
3525 #endif
3526
3527         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3528                 switch(sess->init_phase) {
3529                 case SCST_SESS_IPH_INITING:
3530                         TRACE_DBG("Adding mcmd %p to init deferred mcmd list", 
3531                                 mcmd);
3532                         list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3533                                 &sess->init_deferred_mcmd_list);
3534                         goto out_unlock;
3535                 case SCST_SESS_IPH_SUCCESS:
3536                         break;
3537                 case SCST_SESS_IPH_FAILED:
3538                         res = -1;
3539                         goto out_unlock;
3540                 default:
3541                         BUG();
3542                 }
3543         }
3544
3545         TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3546         list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3547
3548         spin_unlock_irqrestore(&scst_list_lock, flags);
3549
3550         wake_up(&scst_mgmt_cmd_list_waitQ);
3551
3552 out:
3553         TRACE_EXIT();
3554         return res;
3555
3556 out_unlock:
3557         spin_unlock_irqrestore(&scst_list_lock, flags);
3558         goto out;
3559 }
3560
3561 /* 
3562  * Must not been called in parallel with scst_unregister_session() for the 
3563  * same sess
3564  */
3565 int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
3566                         const uint8_t *lun, int lun_len, int atomic,
3567                         void *tgt_specific)
3568 {
3569         int res = -EFAULT;
3570         struct scst_mgmt_cmd *mcmd = NULL;
3571
3572         TRACE_ENTRY();
3573
3574         if (unlikely(fn == SCST_ABORT_TASK)) {
3575                 PRINT_ERROR_PR("%s() for ABORT TASK called", __FUNCTION__);
3576                 res = -EINVAL;
3577                 goto out;
3578         }
3579
3580         mcmd = scst_pre_rx_mgmt_cmd(sess, fn, atomic, tgt_specific);
3581         if (mcmd == NULL)
3582                 goto out;
3583
3584         mcmd->lun = scst_unpack_lun(lun, lun_len);
3585         if (mcmd->lun == (lun_t)-1)
3586                 goto out_free;
3587
3588         TRACE(TRACE_MGMT, "sess=%p, lun=%Ld", sess, (uint64_t)mcmd->lun);
3589
3590         if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
3591                 goto out_free;
3592
3593         res = 0;
3594
3595 out:
3596         TRACE_EXIT_RES(res);
3597         return res;
3598
3599 out_free:
3600         scst_free_mgmt_cmd(mcmd, 0);
3601         mcmd = NULL;
3602         goto out;
3603 }
3604
3605 /* 
3606  * Must not been called in parallel with scst_unregister_session() for the 
3607  * same sess
3608  */
3609 int scst_rx_mgmt_fn_tag(struct scst_session *sess, int fn, uint32_t tag,
3610                        int atomic, void *tgt_specific)
3611 {
3612         int res = -EFAULT;
3613         struct scst_mgmt_cmd *mcmd = NULL;
3614
3615         TRACE_ENTRY();
3616
3617         if (unlikely(fn != SCST_ABORT_TASK)) {
3618                 PRINT_ERROR_PR("%s(%d) called", __FUNCTION__, fn);
3619                 res = -EINVAL;
3620                 goto out;
3621         }
3622
3623         mcmd = scst_pre_rx_mgmt_cmd(sess, fn, atomic, tgt_specific);
3624         if (mcmd == NULL)
3625                 goto out;
3626
3627         mcmd->tag = tag;
3628
3629         TRACE(TRACE_MGMT, "sess=%p, tag=%d", sess, mcmd->tag);
3630
3631         if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
3632                 goto out_free;
3633
3634         res = 0;
3635