c1af0378152ba2123de188da254f811609c174e4
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28
29 #include "scst_debug.h"
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static int scst_do_job_init(struct list_head *init_cmd_list);
34
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
36         int left_locked);
37
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39         struct scst_mgmt_cmd *mcmd);
40
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43         unsigned long *pflags, int left_locked)
44 {
45         int res;
46
47         TRACE_ENTRY();
48
49         TRACE_DBG("Moving cmd %p to cmd list", cmd);
50         list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
51
52         /* This is an inline func., so unneeded code will be optimized out */
53         if (pflags)
54                 spin_unlock_irqrestore(&scst_list_lock, *pflags);
55         else
56                 spin_unlock_irq(&scst_list_lock);
57
58         res = __scst_process_active_cmd(cmd, context, left_locked);
59
60         TRACE_EXIT_RES(res);
61         return res;
62 }
63
64 static inline void scst_schedule_tasklet(void)
65 {
66         struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
67
68 #if 0 /* Looks like #else is better for performance */
69         if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
70                 tasklet_schedule(t);
71         else {
72                 /* 
73                  * We suppose that other CPU(s) are rather idle, so we
74                  * ask one of them to help
75                  */
76                 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77                         "instead", smp_processor_id());
78                 wake_up(&scst_list_waitQ);
79         }
80 #else
81         tasklet_schedule(t);
82 #endif
83 }
84
85 /* 
86  * Must not been called in parallel with scst_unregister_session() for the 
87  * same sess
88  */
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90                              const uint8_t *lun, int lun_len,
91                              const uint8_t *cdb, int cdb_len, int atomic)
92 {
93         struct scst_cmd *cmd;
94
95         TRACE_ENTRY();
96
97 #ifdef EXTRACHECKS
98         if (unlikely(sess->shutting_down)) {
99                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
100                 BUG();
101         }
102 #endif
103
104         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
105         if (cmd == NULL)
106                 goto out;
107
108         cmd->sess = sess;
109         cmd->tgt = sess->tgt;
110         cmd->tgtt = sess->tgt->tgtt;
111         cmd->state = SCST_CMD_STATE_INIT_WAIT;
112
113         /* 
114          * For both wrong lun and CDB defer the error reporting for
115          * scst_cmd_init_done()
116          */
117
118         cmd->lun = scst_unpack_lun(lun, lun_len);
119
120         if (cdb_len <= MAX_COMMAND_SIZE) {
121                 memcpy(cmd->cdb, cdb, cdb_len);
122                 cmd->cdb_len = cdb_len;
123         }
124
125         TRACE_DBG("cmd %p, sess %p", cmd, sess);
126         scst_sess_get(sess);
127
128 out:
129         TRACE_EXIT();
130         return cmd;
131 }
132
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
134 {
135         int res = 0;
136         unsigned long flags = 0;
137         struct scst_session *sess = cmd->sess;
138
139         TRACE_ENTRY();
140
141         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142         TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag, 
143                 (uint64_t)cmd->lun, cmd->cdb_len);
144         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145                 cmd->cdb, cmd->cdb_len);
146
147         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
149         {
150                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
152                         cmd->tgtt->name);
153                 pref_context = SCST_CONTEXT_TASKLET;
154         }
155
156         spin_lock_irqsave(&scst_list_lock, flags);
157
158         /* Let's make it here, this will save us a lock or atomic */
159         sess->sess_cmd_count++;
160
161         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
162
163         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164                 switch(sess->init_phase) {
165                 case SCST_SESS_IPH_SUCCESS:
166                         break;
167                 case SCST_SESS_IPH_INITING:
168                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169                         list_add_tail(&cmd->cmd_list_entry, 
170                                 &sess->init_deferred_cmd_list);
171                         goto out_unlock_flags;
172                 case SCST_SESS_IPH_FAILED:
173                         scst_set_busy(cmd);
174                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
175                         TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176                         list_add_tail(&cmd->cmd_list_entry, 
177                                 &scst_active_cmd_list);
178                         goto active;
179                 default:
180                         BUG();
181                 }
182         }
183
184         if (unlikely(cmd->lun == (lun_t)-1)) {
185                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186                 scst_set_cmd_error(cmd,
187                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
191                 goto active;
192         }
193
194         if (unlikely(cmd->cdb_len == 0)) {
195                 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196                 scst_set_cmd_error(cmd,
197                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
201                 goto active;
202         }
203
204         cmd->state = SCST_CMD_STATE_INIT;
205
206         TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207         list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
208
209         switch (pref_context) {
210         case SCST_CONTEXT_DIRECT:
211         case SCST_CONTEXT_DIRECT_ATOMIC:
212                 res = scst_do_job_init(&scst_init_cmd_list);
213                 if (res > 0)
214                         goto out_unlock_flags;
215                 break;
216
217         case SCST_CONTEXT_THREAD:
218                 goto out_thread_unlock_flags;
219
220         case SCST_CONTEXT_TASKLET:
221                 scst_schedule_tasklet();
222                 goto out_unlock_flags;
223
224         default:
225                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
226                             pref_context);
227                 goto out_thread_unlock_flags;
228         }
229
230 active:
231         switch (pref_context) {
232         case SCST_CONTEXT_DIRECT:
233         case SCST_CONTEXT_DIRECT_ATOMIC:
234                 scst_process_active_cmd(cmd, pref_context, &flags, 0);
235                 break;
236
237         case SCST_CONTEXT_THREAD:
238                 goto out_thread_unlock_flags;
239
240         case SCST_CONTEXT_TASKLET:
241                 scst_schedule_tasklet();
242                 goto out_unlock_flags;
243
244         default:
245                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
246                             pref_context);
247                 goto out_thread_unlock_flags;
248         }
249
250 out:
251         TRACE_EXIT();
252         return;
253
254 out_unlock_flags:
255         spin_unlock_irqrestore(&scst_list_lock, flags);
256         goto out;
257
258 out_thread_unlock_flags:
259         cmd->non_atomic_only = 1;
260         spin_unlock_irqrestore(&scst_list_lock, flags);
261         wake_up(&scst_list_waitQ);
262         goto out;
263 }
264
265 static int scst_parse_cmd(struct scst_cmd *cmd)
266 {
267         int res = SCST_CMD_STATE_RES_CONT_SAME;
268         int state;
269         struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270         struct scst_device *dev = cmd->dev;
271         struct scst_info_cdb cdb_info;
272         int atomic = scst_cmd_atomic(cmd);
273         int set_dir = 1;
274
275         TRACE_ENTRY();
276
277         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278                 TRACE_DBG("ABORTED set, returning ABORTED "
279                         "for cmd %p", cmd);
280                 goto out_xmit;
281         }
282
283         if (atomic && !dev->handler->parse_atomic) {
284                 TRACE_DBG("Dev handler %s parse() can not be "
285                       "called in atomic context, rescheduling to the thread",
286                       dev->handler->name);
287                 res = SCST_CMD_STATE_RES_NEED_THREAD;
288                 goto out;
289         }
290
291         /*
292          * Expected transfer data supplied by the SCSI transport via the
293          * target driver are untrusted, so we prefer to fetch them from CDB.
294          * Additionally, not all transports support supplying the expected
295          * transfer data.
296          */
297
298         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
299                         &cdb_info) != 0)) 
300         {
301                 static int t;
302                 if (t < 10) {
303                         t++;
304                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305                                 "Should you update scst_scsi_op_table?",
306                                 cmd->cdb[0], dev->handler->name);
307                 }
308                 if (scst_cmd_is_expected_set(cmd)) {
309                         TRACE(TRACE_MINOR, "Using initiator supplied values: "
310                                 "direction %d, transfer_len %d",
311                                 cmd->expected_data_direction,
312                                 cmd->expected_transfer_len);
313                         cmd->data_direction = cmd->expected_data_direction;
314                         cmd->bufflen = cmd->expected_transfer_len;
315                         /* Restore (most probably) lost CDB length */
316                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317                         if (cmd->cdb_len == -1) {
318                                 PRINT_ERROR_PR("Unable to get CDB length for "
319                                         "opcode 0x%02x. Returning INVALID "
320                                         "OPCODE", cmd->cdb[0]);
321                                 scst_set_cmd_error(cmd,
322                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
323                                 goto out_xmit;
324                         }
325                 }
326                 else {
327                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328                              "target %s not supplied expected values. "
329                              "Returning INVALID OPCODE.", cmd->cdb[0], 
330                              dev->handler->name, cmd->tgtt->name);
331                         scst_set_cmd_error(cmd,
332                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
333                         goto out_xmit;
334                 }
335         } else {
336                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337                         "set %s), transfer_len=%d (expected len %d), flags=%d",
338                         cdb_info.op_name, cdb_info.direction,
339                         cmd->expected_data_direction,
340                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341                         cdb_info.transfer_len, cmd->expected_transfer_len,
342                         cdb_info.flags);
343
344                 /* Restore (most probably) lost CDB length */
345                 cmd->cdb_len = cdb_info.cdb_len;
346
347                 cmd->data_direction = cdb_info.direction;
348                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349                         cmd->bufflen = cdb_info.transfer_len;
350                 /* else cmd->bufflen remained as it was inited in 0 */
351         }
352
353         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355                             "(opcode 0x%02x)", cmd->cdb[0]);
356                 scst_set_cmd_error(cmd,
357                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
358                 goto out_xmit;
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362                 PRINT_ERROR_PR("Linked commands are not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (likely(!scst_is_cmd_local(cmd))) {
370                 TRACE_DBG("Calling dev handler %s parse(%p)",
371                       dev->handler->name, cmd);
372                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373                 state = dev->handler->parse(cmd, &cdb_info);
374                 TRACE_DBG("Dev handler %s parse() returned %d",
375                         dev->handler->name, state);
376
377                 if (cmd->data_len == -1)
378                         cmd->data_len = cmd->bufflen;
379
380                 if (state == SCST_CMD_STATE_DEFAULT)
381                         state = SCST_CMD_STATE_PREPARE_SPACE;
382         }
383         else
384                 state = SCST_CMD_STATE_PREPARE_SPACE;
385
386 #ifdef EXTRACHECKS
387         if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
388                 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
389                         (state != SCST_CMD_STATE_DEV_PARSE)) ||
390                     ((cmd->bufflen != 0) && 
391                         (cmd->data_direction == SCST_DATA_NONE)) ||
392                     ((cmd->bufflen == 0) && 
393                         (cmd->data_direction != SCST_DATA_NONE)) ||
394                     ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
395                         (state > SCST_CMD_STATE_PREPARE_SPACE))) 
396                 {
397                         PRINT_ERROR_PR("Dev handler %s parse() returned "
398                                        "invalid cmd data_direction %d, "
399                                        "bufflen %zd or state %d (opcode 0x%x)",
400                                        dev->handler->name, 
401                                        cmd->data_direction, cmd->bufflen,
402                                        state, cmd->cdb[0]);
403                         goto out_error;
404                 }
405         }
406 #endif
407
408         switch (state) {
409         case SCST_CMD_STATE_PREPARE_SPACE:
410         case SCST_CMD_STATE_DEV_PARSE:
411         case SCST_CMD_STATE_RDY_TO_XFER:
412         case SCST_CMD_STATE_SEND_TO_MIDLEV:
413         case SCST_CMD_STATE_DEV_DONE:
414         case SCST_CMD_STATE_XMIT_RESP:
415         case SCST_CMD_STATE_FINISHED:
416                 cmd->state = state;
417                 res = SCST_CMD_STATE_RES_CONT_SAME;
418                 break;
419
420         case SCST_CMD_STATE_REINIT:
421                 cmd->tgt_dev_saved = tgt_dev_saved;
422                 cmd->state = state;
423                 res = SCST_CMD_STATE_RES_RESTART;
424                 set_dir = 0;
425                 break;
426
427         case SCST_CMD_STATE_NEED_THREAD_CTX:
428                 TRACE_DBG("Dev handler %s parse() requested thread "
429                       "context, rescheduling", dev->handler->name);
430                 res = SCST_CMD_STATE_RES_NEED_THREAD;
431                 set_dir = 0;
432                 break;
433
434         default:
435                 if (state >= 0) {
436                         PRINT_ERROR_PR("Dev handler %s parse() returned "
437                              "invalid cmd state %d (opcode %d)", 
438                              dev->handler->name, state, cmd->cdb[0]);
439                 } else {
440                         PRINT_ERROR_PR("Dev handler %s parse() returned "
441                                 "error %d (opcode %d)", dev->handler->name, 
442                                 state, cmd->cdb[0]);
443                 }
444                 goto out_error;
445         }
446
447         if ((cmd->resp_data_len == -1) && set_dir) {
448                 if (cmd->data_direction == SCST_DATA_READ)
449                         cmd->resp_data_len = cmd->bufflen;
450                 else
451                          cmd->resp_data_len = 0;
452         }
453         
454 out:
455         TRACE_EXIT_HRES(res);
456         return res;
457
458 out_error:
459         /* dev_done() will be called as part of the regular cmd's finish */
460         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
461         cmd->state = SCST_CMD_STATE_DEV_DONE;
462         res = SCST_CMD_STATE_RES_CONT_SAME;
463         goto out;
464
465 out_xmit:
466         cmd->state = SCST_CMD_STATE_XMIT_RESP;
467         res = SCST_CMD_STATE_RES_CONT_SAME;
468         goto out;
469 }
470
471 static int scst_prepare_space(struct scst_cmd *cmd)
472 {
473         int r, res = SCST_CMD_STATE_RES_CONT_SAME;
474
475         TRACE_ENTRY();
476
477         if (cmd->data_direction == SCST_DATA_NONE) {
478                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
479                 goto out;
480         }
481
482         if (cmd->data_buf_tgt_alloc) {
483                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
484                 r = cmd->tgtt->alloc_data_buf(cmd);
485                 cmd->data_buf_alloced = (r == 0);
486         } else
487                 r = scst_alloc_space(cmd);
488
489         if (r != 0) {
490                 if (scst_cmd_atomic(cmd)) {
491                         TRACE_MEM("%s", "Atomic memory allocation failed, "
492                               "rescheduling to the thread");
493                         res = SCST_CMD_STATE_RES_NEED_THREAD;
494                         goto out;
495                 } else
496                         goto out_no_space;
497         }
498
499         switch (cmd->data_direction) {
500         case SCST_DATA_WRITE:
501                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
502                 break;
503
504         default:
505                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
506                 break;
507         }
508
509 out:
510         TRACE_EXIT_HRES(res);
511         return res;
512
513 out_no_space:
514         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
515                 "(size %zd), sending BUSY status", cmd->bufflen);
516         scst_set_busy(cmd);
517         cmd->state = SCST_CMD_STATE_DEV_DONE;
518         res = SCST_CMD_STATE_RES_CONT_SAME;
519         goto out;
520 }
521
522 /* No locks */
523 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
524 {
525         struct scst_tgt *tgt = cmd->sess->tgt;
526         int res = 0;
527         unsigned long flags;
528
529         TRACE_ENTRY();
530
531         spin_lock_irqsave(&tgt->tgt_lock, flags);
532         tgt->retry_cmds++;
533         smp_mb();
534         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
535               tgt->retry_cmds);
536         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
537                 /* At least one cmd finished, so try again */
538                 tgt->retry_cmds--;
539                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
540                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
541                       "retry_cmds=%d)", finished_cmds,
542                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
543                 res = -1;
544                 goto out_unlock_tgt;
545         }
546
547         TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
548         /* IRQ already off */
549         spin_lock(&scst_list_lock);
550         list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
551         spin_unlock(&scst_list_lock);
552
553         if (!tgt->retry_timer_active) {
554                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
555                 add_timer(&tgt->retry_timer);
556                 tgt->retry_timer_active = 1;
557         }
558
559 out_unlock_tgt:
560         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
561
562         TRACE_EXIT_RES(res);
563         return res;
564 }
565
566 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
567 {
568         int res, rc;
569         int atomic = scst_cmd_atomic(cmd);
570
571         TRACE_ENTRY();
572
573         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
574         {
575                 TRACE_DBG("ABORTED set, returning ABORTED for "
576                         "cmd %p", cmd);
577                 goto out_dev_done;
578         }
579
580         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
581                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
582                       "called in atomic context, rescheduling to the thread");
583                 res = SCST_CMD_STATE_RES_NEED_THREAD;
584                 goto out;
585         }
586
587         while (1) {
588                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
589
590                 res = SCST_CMD_STATE_RES_CONT_NEXT;
591                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
592
593                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
594 #ifdef DEBUG_RETRY
595                 if (((scst_random() % 100) == 75))
596                         rc = SCST_TGT_RES_QUEUE_FULL;
597                 else
598 #endif
599                         rc = cmd->tgtt->rdy_to_xfer(cmd);
600                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
601
602                 if (likely(rc == SCST_TGT_RES_SUCCESS))
603                         goto out;
604
605                 /* Restore the previous state */
606                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
607
608                 switch (rc) {
609                 case SCST_TGT_RES_QUEUE_FULL:
610                 {
611                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
612                                 break;
613                         else
614                                 continue;
615                 }
616
617                 case SCST_TGT_RES_NEED_THREAD_CTX:
618                 {
619                         TRACE_DBG("Target driver %s "
620                               "rdy_to_xfer() requested thread "
621                               "context, rescheduling", cmd->tgtt->name);
622                         res = SCST_CMD_STATE_RES_NEED_THREAD;
623                         break;
624                 }
625
626                 default:
627                         goto out_error_rc;
628                 }
629                 break;
630         }
631
632 out:
633         TRACE_EXIT_HRES(res);
634         return res;
635
636 out_error_rc:
637         if (rc == SCST_TGT_RES_FATAL_ERROR) {
638                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
639                      "fatal error", cmd->tgtt->name);
640         } else {
641                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
642                             "value %d", cmd->tgtt->name, rc);
643         }
644         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
645
646 out_dev_done:
647         cmd->state = SCST_CMD_STATE_DEV_DONE;
648         res = SCST_CMD_STATE_RES_CONT_SAME;
649         goto out;
650 }
651
652 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
653 {
654         unsigned long flags;
655
656         TRACE_ENTRY();
657
658         TRACE_DBG("Preferred context: %d", pref_context);
659         TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
660         cmd->non_atomic_only = 0;
661
662         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
663                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
664         {
665                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
666                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
667                         cmd->tgtt->name);
668                 pref_context = SCST_CONTEXT_TASKLET;
669         }
670
671         switch (status) {
672         case SCST_RX_STATUS_SUCCESS:
673                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
674                 break;
675
676         case SCST_RX_STATUS_ERROR_SENSE_SET:
677                 cmd->state = SCST_CMD_STATE_DEV_DONE;
678                 break;
679
680         case SCST_RX_STATUS_ERROR_FATAL:
681                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
682                 /* go through */
683         case SCST_RX_STATUS_ERROR:
684                 scst_set_cmd_error(cmd,
685                            SCST_LOAD_SENSE(scst_sense_hardw_error));
686                 cmd->state = SCST_CMD_STATE_DEV_DONE;
687                 break;
688
689         default:
690                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
691                             status);
692                 break;
693         }
694
695         switch (pref_context) {
696         case SCST_CONTEXT_DIRECT:
697         case SCST_CONTEXT_DIRECT_ATOMIC:
698                 scst_check_retries(cmd->tgt, 0);
699                 __scst_process_active_cmd(cmd, pref_context, 0);
700                 break;
701
702         default:
703                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
704                             pref_context);
705                 /* go through */
706         case SCST_CONTEXT_THREAD:
707                 spin_lock_irqsave(&scst_list_lock, flags);
708                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
709                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
710                 cmd->non_atomic_only = 1;
711                 spin_unlock_irqrestore(&scst_list_lock, flags);
712                 scst_check_retries(cmd->tgt, 1);
713                 wake_up(&scst_list_waitQ);
714                 break;
715
716         case SCST_CONTEXT_TASKLET:
717                 spin_lock_irqsave(&scst_list_lock, flags);
718                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
719                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
720                 spin_unlock_irqrestore(&scst_list_lock, flags);
721                 scst_schedule_tasklet();
722                 scst_check_retries(cmd->tgt, 0);
723                 break;
724         }
725
726         TRACE_EXIT();
727         return;
728 }
729
730 /* No locks supposed to be held */
731 static void scst_check_sense(struct scst_cmd *cmd, struct scsi_request *req,
732                              int *next_state)
733 {
734         int sense_valid;
735         struct scst_device *dev = cmd->dev;
736         int dbl_ua_possible, ua_sent = 0;
737
738         TRACE_ENTRY();
739
740         /* If we had a internal bus reset behind us, set the command error UA */
741         if ((dev->scsi_dev != NULL) &&
742             unlikely(cmd->host_status == DID_RESET) &&
743             scst_is_ua_command(cmd))
744         {
745                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
746                       dev->scsi_dev->was_reset, cmd->host_status);
747                 scst_set_cmd_error(cmd,
748                    SCST_LOAD_SENSE(scst_sense_reset_UA));
749                 /* just in case */
750                 cmd->ua_ignore = 0;
751                 /* It looks like it is safe to clear was_reset here */
752                 dev->scsi_dev->was_reset = 0;
753                 smp_mb();
754         }
755
756         if (req != NULL) {
757                 sense_valid = SCST_SENSE_VALID(req->sr_sense_buffer);
758                 if (sense_valid) {
759                         memcpy(cmd->sense_buffer, req->sr_sense_buffer,
760                                sizeof(cmd->sense_buffer));
761                 }
762         } else
763                 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
764
765         dbl_ua_possible = dev->dev_double_ua_possible;
766         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
767         if (unlikely(dbl_ua_possible)) {
768                 spin_lock_bh(&dev->dev_lock);
769                 barrier(); /* to reread dev_double_ua_possible */
770                 dbl_ua_possible = dev->dev_double_ua_possible;
771                 if (dbl_ua_possible)
772                         ua_sent = dev->dev_reset_ua_sent;
773                 else
774                         spin_unlock_bh(&dev->dev_lock);
775         }
776
777         if (sense_valid) {
778                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
779                              sizeof(cmd->sense_buffer));
780                 /* Check Unit Attention Sense Key */
781                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
782                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
783                                 if (dbl_ua_possible) 
784                                 {
785                                         if (ua_sent) {
786                                                 TRACE(TRACE_MGMT, "%s", 
787                                                         "Double UA detected");
788                                                 /* Do retry */
789                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
790                                                         "(tag %d)", cmd, cmd->tag);
791                                                 cmd->status = 0;
792                                                 cmd->masked_status = 0;
793                                                 cmd->msg_status = 0;
794                                                 cmd->host_status = DID_OK;
795                                                 cmd->driver_status = 0;
796                                                 memset(cmd->sense_buffer, 0,
797                                                         sizeof(cmd->sense_buffer));
798                                                 cmd->retry = 1;
799                                                 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
800                                                 /* 
801                                                  * Dev is still blocked by this cmd, so
802                                                  * it's OK to clear SCST_DEV_SERIALIZED
803                                                  * here.
804                                                  */
805                                                 dev->dev_double_ua_possible = 0;
806                                                 dev->dev_serialized = 0;
807                                                 dev->dev_reset_ua_sent = 0;
808                                                 goto out_unlock;
809                                         } else
810                                                 dev->dev_reset_ua_sent = 1;
811                                 }
812                         }
813                         if (cmd->ua_ignore == 0) {
814                                 if (unlikely(dbl_ua_possible)) {
815                                         __scst_process_UA(dev, cmd,
816                                                 cmd->sense_buffer,
817                                                 sizeof(cmd->sense_buffer), 0);
818                                 } else {
819                                         scst_process_UA(dev, cmd,
820                                                 cmd->sense_buffer,
821                                                 sizeof(cmd->sense_buffer), 0);
822                                 }
823                         }
824                 }
825         }
826
827         if (unlikely(dbl_ua_possible)) {
828                 if (ua_sent && scst_is_ua_command(cmd)) {
829                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
830                         dev->dev_double_ua_possible = 0;
831                         dev->dev_serialized = 0;
832                         dev->dev_reset_ua_sent = 0;
833                 }
834                 spin_unlock_bh(&dev->dev_lock);
835         }
836
837 out:
838         TRACE_EXIT();
839         return;
840
841 out_unlock:
842         spin_unlock_bh(&dev->dev_lock);
843         goto out;
844 }
845
846 static int scst_check_auto_sense(struct scst_cmd *cmd)
847 {
848         int res = 0;
849
850         TRACE_ENTRY();
851
852         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
853             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
854              SCST_NO_SENSE(cmd->sense_buffer)))
855         {
856                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
857                       "cmd->status=%x, cmd->masked_status=%x, "
858                       "cmd->msg_status=%x, cmd->host_status=%x, "
859                       "cmd->driver_status=%x", cmd->status, cmd->masked_status, 
860                       cmd->msg_status, cmd->host_status, cmd->driver_status);
861                 res = 1;
862         } else if (unlikely(cmd->host_status)) {
863                 if ((cmd->host_status == DID_REQUEUE) ||
864                     (cmd->host_status == DID_IMM_RETRY) ||
865                     (cmd->host_status == DID_SOFT_ERROR)) {
866                         scst_set_busy(cmd);
867                 } else {
868                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
869                                 "received, returning HARDWARE ERROR instead",
870                                 cmd->host_status);
871                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
872                 }
873         }
874
875         TRACE_EXIT_RES(res);
876         return res;
877 }
878
879 static void scst_do_cmd_done(struct scst_cmd *cmd,
880         struct scsi_request *req, int *next_state)
881 {
882         TRACE_ENTRY();
883
884         cmd->status = req->sr_result & 0xff;
885         cmd->masked_status = status_byte(req->sr_result);
886         cmd->msg_status = msg_byte(req->sr_result);
887         cmd->host_status = host_byte(req->sr_result);
888         cmd->driver_status = driver_byte(req->sr_result);
889         TRACE(TRACE_SCSI, "req->sr_result=%x, cmd->status=%x, "
890               "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
891               "cmd->driver_status=%x", req->sr_result, cmd->status,
892               cmd->masked_status, cmd->msg_status, cmd->host_status,
893               cmd->driver_status);
894
895         scst_check_sense(cmd, req, next_state);
896
897         cmd->bufflen = req->sr_bufflen; //??
898
899         /* Clear out request structure */
900         req->sr_use_sg = 0;
901         req->sr_sglist_len = 0;
902         req->sr_bufflen = 0;
903         req->sr_buffer = NULL;
904         req->sr_underflow = 0;
905         req->sr_request->rq_disk = NULL; /* disown request blk */ ;
906
907         TRACE_EXIT();
908         return;
909 }
910
911 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
912                                             struct scsi_request **req)
913 {
914         struct scst_cmd *cmd = NULL;
915
916         if (scsi_cmd && (*req = scsi_cmd->sc_request))
917                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
918
919         if (cmd == NULL) {
920                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
921                 if (*req)
922                         scsi_release_request(*req);
923         }
924
925         return cmd;
926 }
927
928 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
929 {
930         struct scsi_request *req = NULL;
931         struct scst_cmd *cmd;
932         int next_state;
933         unsigned char type;
934
935         TRACE_ENTRY();
936
937         WARN_ON(in_irq());
938
939         /*
940          * We don't use scsi_cmd->resid, because:
941          * 1. Many low level initiator drivers don't use (set) this field
942          * 2. We determine the command's buffer size directly from CDB, 
943          *    so scsi_cmd->resid is not relevant for us, and target drivers 
944          *    should know the residual, if necessary, by comparing expected 
945          *    and actual transfer sizes.
946          */
947
948         cmd = scst_get_cmd(scsi_cmd, &req);
949         if (cmd == NULL)
950                 goto out;
951
952         cmd->completed = 1;
953
954         scst_dec_on_dev_cmd(cmd);
955
956         type = cmd->dev->handler->type;
957         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
958             cmd->tgt_dev->acg_dev->rd_only_flag &&
959             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
960              type == TYPE_TAPE)) {
961                 int32_t length;
962                 uint8_t *address;
963
964                 length = scst_get_buf_first(cmd, &address);
965                 TRACE_DBG("length %d", length);
966                 if (unlikely(length <= 0)) {
967                         goto out;
968                 }
969                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
970                         address[2] |= 0x80;   /* Write Protect*/
971                 }
972                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
973                         address[3] |= 0x80;   /* Write Protect*/
974                 }
975                 scst_put_buf(cmd, address);
976         }
977
978         next_state = SCST_CMD_STATE_DEV_DONE;
979
980         scst_do_cmd_done(cmd, req, &next_state);
981
982         scst_release_request(cmd);
983
984         cmd->state = next_state;
985         cmd->non_atomic_only = 0;
986
987         __scst_process_active_cmd(cmd, scst_get_context(), 0);
988
989 out:
990         TRACE_EXIT();
991         return;
992 }
993
994 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
995 {
996         TRACE_ENTRY();
997
998         BUG_ON(in_irq());
999
1000         scst_dec_on_dev_cmd(cmd);
1001
1002         if (next_state == SCST_CMD_STATE_DEFAULT)
1003                 next_state = SCST_CMD_STATE_DEV_DONE;
1004
1005         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1006 #if defined(DEBUG) || defined(TRACING)
1007                 if (cmd->sg) {
1008                         int i;
1009                         struct scatterlist *sg = cmd->sg;
1010                         TRACE(TRACE_RECV_TOP, 
1011                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1012                               cmd->sg_cnt, sg, (void*)sg[0].page);
1013                         for(i = 0; i < cmd->sg_cnt; ++i) {
1014                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1015                                         "Exec'd sg:", page_address(sg[i].page),
1016                                         sg[i].length);
1017                         }
1018                 }
1019 #endif
1020         }
1021
1022
1023 #ifdef EXTRACHECKS
1024         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1025             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1026             (next_state != SCST_CMD_STATE_FINISHED)) 
1027         {
1028                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1029                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1030                 scst_set_cmd_error(cmd,
1031                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1032                 next_state = SCST_CMD_STATE_DEV_DONE;
1033         }
1034
1035         if (scst_check_auto_sense(cmd)) {
1036                 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1037                         "opcode %d", cmd->cdb[0]);
1038         }
1039 #endif
1040
1041         scst_check_sense(cmd, NULL, &next_state);
1042
1043         cmd->state = next_state;
1044         cmd->non_atomic_only = 0;
1045
1046         __scst_process_active_cmd(cmd, scst_get_context(), 0);
1047
1048         TRACE_EXIT();
1049         return;
1050 }
1051
1052 static int scst_report_luns_local(struct scst_cmd *cmd)
1053 {
1054         int res = SCST_EXEC_COMPLETED;
1055         int dev_cnt = 0;
1056         int buffer_size;
1057         struct scst_tgt_dev *tgt_dev = NULL;
1058         uint8_t *buffer;
1059
1060         TRACE_ENTRY();
1061
1062         cmd->status = 0;
1063         cmd->masked_status = 0;
1064         cmd->msg_status = 0;
1065         cmd->host_status = DID_OK;
1066         cmd->driver_status = 0;
1067
1068         /* ToDo: use full SG buffer, not only the first entry */
1069         buffer_size = scst_get_buf_first(cmd, &buffer);
1070         if (unlikely(buffer_size <= 0))
1071                 goto out_err;
1072
1073         if (buffer_size < 16) {
1074                 goto out_put_err;
1075         }
1076
1077         memset(buffer, 0, buffer_size);
1078
1079         /* sess->sess_tgt_dev_list is protected by suspended activity */
1080         list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1081                             sess_tgt_dev_list_entry) 
1082         {
1083                 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1084                         buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1085                         buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1086                 }
1087                 dev_cnt++;
1088                 /* Tmp, until ToDo above done */
1089                 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1090                         break;
1091         }
1092
1093         /* Set the response header */
1094         dev_cnt *= 8;
1095         buffer[0] = (dev_cnt >> 24) & 0xff;
1096         buffer[1] = (dev_cnt >> 16) & 0xff;
1097         buffer[2] = (dev_cnt >> 8) & 0xff;
1098         buffer[3] = dev_cnt & 0xff;
1099
1100         dev_cnt += 8;
1101
1102         scst_put_buf(cmd, buffer);
1103
1104         if (buffer_size > dev_cnt)
1105                 scst_set_resp_data_len(cmd, dev_cnt);
1106         
1107 out_done:
1108         cmd->completed = 1;
1109
1110         /* Report the result */
1111         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1112
1113         TRACE_EXIT_RES(res);
1114         return res;
1115         
1116 out_put_err:
1117         scst_put_buf(cmd, buffer);
1118
1119 out_err:
1120         scst_set_cmd_error(cmd,
1121                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1122         goto out_done;
1123 }
1124
1125 static int scst_pre_select(struct scst_cmd *cmd)
1126 {
1127         int res = SCST_EXEC_NOT_COMPLETED;
1128
1129         TRACE_ENTRY();
1130
1131         if (scst_cmd_atomic(cmd)) {
1132                 res = SCST_EXEC_NEED_THREAD;
1133                 goto out;
1134         }
1135
1136         scst_block_dev(cmd->dev, 1);
1137         /* Device will be unblocked in scst_done_cmd_check() */
1138
1139         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1140                 int rc = scst_set_pending_UA(cmd);
1141                 if (rc == 0) {
1142                         res = SCST_EXEC_COMPLETED;
1143                         cmd->completed = 1;
1144                         /* Report the result */
1145                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1146                         goto out;
1147                 }
1148         }
1149
1150 out:
1151         TRACE_EXIT_RES(res);
1152         return res;
1153 }
1154
1155 static inline void scst_report_reserved(struct scst_cmd *cmd)
1156 {
1157         TRACE_ENTRY();
1158
1159         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1160         cmd->completed = 1;
1161         /* Report the result */
1162         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1163
1164         TRACE_EXIT();
1165         return;
1166 }
1167
1168 static int scst_reserve_local(struct scst_cmd *cmd)
1169 {
1170         int res = SCST_EXEC_NOT_COMPLETED;
1171         struct scst_device *dev;
1172         struct scst_tgt_dev *tgt_dev_tmp;
1173
1174         TRACE_ENTRY();
1175
1176         if (scst_cmd_atomic(cmd)) {
1177                 res = SCST_EXEC_NEED_THREAD;
1178                 goto out;
1179         }
1180
1181         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1182                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1183                      "(lun=%Ld)", (uint64_t)cmd->lun);
1184                 scst_set_cmd_error(cmd,
1185                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1186                 cmd->completed = 1;
1187                 res = SCST_EXEC_COMPLETED;
1188                 goto out;
1189         }
1190
1191         dev = cmd->dev;
1192         scst_block_dev(dev, 1);
1193         /* Device will be unblocked in scst_done_cmd_check() */
1194
1195         spin_lock_bh(&dev->dev_lock);
1196
1197         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1198                 scst_report_reserved(cmd);
1199                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1200                 res = SCST_EXEC_COMPLETED;
1201                 goto out_unlock;
1202         }
1203
1204         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1205                             dev_tgt_dev_list_entry) 
1206         {
1207                 if (cmd->tgt_dev != tgt_dev_tmp)
1208                         set_bit(SCST_TGT_DEV_RESERVED, 
1209                                 &tgt_dev_tmp->tgt_dev_flags);
1210         }
1211         dev->dev_reserved = 1;
1212
1213 out_unlock:
1214         spin_unlock_bh(&dev->dev_lock);
1215         
1216 out:
1217         TRACE_EXIT_RES(res);
1218         return res;
1219 }
1220
1221 static int scst_release_local(struct scst_cmd *cmd)
1222 {
1223         int res = SCST_EXEC_NOT_COMPLETED;
1224         struct scst_tgt_dev *tgt_dev_tmp;
1225         struct scst_device *dev;
1226
1227         TRACE_ENTRY();
1228
1229         dev = cmd->dev;
1230
1231         scst_block_dev(dev, 1);
1232         cmd->blocking = 1;
1233         TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1234
1235         spin_lock_bh(&dev->dev_lock);
1236
1237         /* 
1238          * The device could be RELEASED behind us, if RESERVING session 
1239          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1240          * matter, so use lock and no retest for DEV_RESERVED bits again
1241          */
1242         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1243                 res = SCST_EXEC_COMPLETED;
1244                 cmd->status = 0;
1245                 cmd->masked_status = 0;
1246                 cmd->msg_status = 0;
1247                 cmd->host_status = DID_OK;
1248                 cmd->driver_status = 0;
1249         } else {
1250                 list_for_each_entry(tgt_dev_tmp,
1251                                     &dev->dev_tgt_dev_list,
1252                                     dev_tgt_dev_list_entry) 
1253                 {
1254                         clear_bit(SCST_TGT_DEV_RESERVED, 
1255                                 &tgt_dev_tmp->tgt_dev_flags);
1256                 }
1257                 dev->dev_reserved = 0;
1258         }
1259
1260         spin_unlock_bh(&dev->dev_lock);
1261
1262         if (res == SCST_EXEC_COMPLETED) {
1263                 cmd->completed = 1;
1264                 /* Report the result */
1265                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1266         }
1267
1268         TRACE_EXIT_RES(res);
1269         return res;
1270 }
1271
1272 /* 
1273  * The result of cmd execution, if any, should be reported 
1274  * via scst_cmd_done_local() 
1275  */
1276 static int scst_pre_exec(struct scst_cmd *cmd)
1277 {
1278         int res = SCST_EXEC_NOT_COMPLETED, rc;
1279         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1280
1281         TRACE_ENTRY();
1282
1283         /* Reserve check before Unit Attention */
1284         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1285             (cmd->cdb[0] != INQUIRY) &&
1286             (cmd->cdb[0] != REPORT_LUNS) &&
1287             (cmd->cdb[0] != RELEASE) &&
1288             (cmd->cdb[0] != RELEASE_10) &&
1289             (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1290             (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1291             (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) 
1292         {
1293                 scst_report_reserved(cmd);
1294                 res = SCST_EXEC_COMPLETED;
1295                 goto out;
1296         }
1297
1298         /* If we had a internal bus reset, set the command error unit attention */
1299         if ((cmd->dev->scsi_dev != NULL) &&
1300             unlikely(cmd->dev->scsi_dev->was_reset) &&
1301             scst_is_ua_command(cmd)) 
1302         {
1303                 struct scst_device *dev = cmd->dev;
1304                 int done = 0;
1305                 /* Prevent more than 1 cmd to be triggered by was_reset */
1306                 spin_lock_bh(&dev->dev_lock);
1307                 barrier(); /* to reread was_reset */
1308                 if (dev->scsi_dev->was_reset) {
1309                         TRACE(TRACE_MGMT, "was_reset is %d", 1);
1310                         scst_set_cmd_error(cmd,
1311                                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1312                         /* It looks like it is safe to clear was_reset here */
1313                         dev->scsi_dev->was_reset = 0;
1314                         smp_mb();
1315                         done = 1;
1316                 }
1317                 spin_unlock_bh(&dev->dev_lock);
1318
1319                 if (done)
1320                         goto out_done;
1321         }
1322
1323         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1324             scst_is_ua_command(cmd)) 
1325         {
1326                 rc = scst_set_pending_UA(cmd);
1327                 if (rc == 0)
1328                         goto out_done;
1329         }
1330
1331         /* Check READ_ONLY device status */
1332         if (tgt_dev->acg_dev->rd_only_flag &&
1333             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1334              cmd->cdb[0] == WRITE_10 ||
1335              cmd->cdb[0] == WRITE_12 ||
1336              cmd->cdb[0] == WRITE_16 ||
1337              cmd->cdb[0] == WRITE_VERIFY ||
1338              cmd->cdb[0] == WRITE_VERIFY_12 ||
1339              cmd->cdb[0] == WRITE_VERIFY_16 ||
1340              (cmd->dev->handler->type == TYPE_TAPE &&
1341               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1342         {
1343                 scst_set_cmd_error(cmd,
1344                            SCST_LOAD_SENSE(scst_sense_data_protect));
1345                 goto out_done;
1346         }
1347 out:
1348         TRACE_EXIT_RES(res);
1349         return res;
1350
1351 out_done:
1352         res = SCST_EXEC_COMPLETED;
1353         cmd->completed = 1;
1354         /* Report the result */
1355         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1356         goto out;
1357 }
1358
1359 /* 
1360  * The result of cmd execution, if any, should be reported 
1361  * via scst_cmd_done_local() 
1362  */
1363 static inline int scst_local_exec(struct scst_cmd *cmd)
1364 {
1365         int res = SCST_EXEC_NOT_COMPLETED;
1366
1367         TRACE_ENTRY();
1368
1369         /*
1370          * Adding new commands here don't forget to update
1371          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1372          */
1373
1374         switch (cmd->cdb[0]) {
1375         case MODE_SELECT:
1376         case MODE_SELECT_10:
1377         case LOG_SELECT:
1378                 res = scst_pre_select(cmd);
1379                 break;
1380         case RESERVE:
1381         case RESERVE_10:
1382                 res = scst_reserve_local(cmd);
1383                 break;
1384         case RELEASE:
1385         case RELEASE_10:
1386                 res = scst_release_local(cmd);
1387                 break;
1388         case REPORT_LUNS:
1389                 res = scst_report_luns_local(cmd);
1390                 break;
1391         }
1392
1393         TRACE_EXIT_RES(res);
1394         return res;
1395 }
1396
1397 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1398 {
1399         int rc = SCST_EXEC_NOT_COMPLETED;
1400
1401         TRACE_ENTRY();
1402
1403         cmd->sent_to_midlev = 1;
1404         cmd->state = SCST_CMD_STATE_EXECUTING;
1405         cmd->scst_cmd_done = scst_cmd_done_local;
1406
1407         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1408         smp_mb__after_set_bit();
1409
1410         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1411                 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1412                 goto out_aborted;
1413         }
1414
1415         rc = scst_pre_exec(cmd);
1416         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1417         if (rc != SCST_EXEC_NOT_COMPLETED) {
1418                 if (rc == SCST_EXEC_COMPLETED)
1419                         goto out;
1420                 else if (rc == SCST_EXEC_NEED_THREAD)
1421                         goto out_clear;
1422                 else
1423                         goto out_rc_error;
1424         }
1425
1426         rc = scst_local_exec(cmd);
1427         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1428         if (rc != SCST_EXEC_NOT_COMPLETED) {
1429                 if (rc == SCST_EXEC_COMPLETED)
1430                         goto out;
1431                 else if (rc == SCST_EXEC_NEED_THREAD)
1432                         goto out_clear;
1433                 else
1434                         goto out_rc_error;
1435         }
1436
1437         if (cmd->dev->handler->exec) {
1438                 struct scst_device *dev = cmd->dev;
1439                 TRACE_DBG("Calling dev handler %s exec(%p)",
1440                       dev->handler->name, cmd);
1441                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1442                 cmd->scst_cmd_done = scst_cmd_done_local;
1443                 rc = dev->handler->exec(cmd);
1444                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1445                 TRACE_DBG("Dev handler %s exec() returned %d",
1446                       dev->handler->name, rc);
1447                 if (rc != SCST_EXEC_NOT_COMPLETED) {
1448                         if (rc == SCST_EXEC_COMPLETED)
1449                                 goto out;
1450                         else if (rc == SCST_EXEC_NEED_THREAD)
1451                                 goto out_clear;
1452                         else
1453                                 goto out_rc_error;
1454                 }
1455         }
1456
1457         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1458         
1459         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1460                 PRINT_ERROR_PR("Command for virtual device must be "
1461                         "processed by device handler (lun %Ld)!",
1462                         (uint64_t)cmd->lun);
1463                 goto out_error;
1464         }
1465         
1466         if (scst_alloc_request(cmd) != 0) {
1467                 PRINT_INFO_PR("%s", "Unable to allocate request, "
1468                         "sending BUSY status");
1469                 goto out_busy;
1470         }
1471         
1472         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1473                     (void *)cmd->scsi_req->sr_buffer,
1474                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1475                     cmd->retries);
1476
1477         rc = SCST_EXEC_COMPLETED;
1478
1479 out:
1480         TRACE_EXIT();
1481         return rc;
1482
1483 out_clear:
1484         /* Restore the state */
1485         cmd->sent_to_midlev = 0;
1486         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1487         goto out;
1488
1489 out_rc_error:
1490         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1491                     "invalid code %d", cmd->dev->handler->name, rc);
1492         /* go through */
1493
1494 out_error:
1495         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1496         cmd->completed = 1;
1497         cmd->state = SCST_CMD_STATE_DEV_DONE;
1498         rc = SCST_EXEC_COMPLETED;
1499         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1500         goto out;
1501         
1502 out_busy:
1503         scst_set_busy(cmd);
1504         cmd->completed = 1;
1505         cmd->state = SCST_CMD_STATE_DEV_DONE;
1506         rc = SCST_EXEC_COMPLETED;
1507         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1508         goto out;
1509
1510 out_aborted:
1511         rc = SCST_EXEC_COMPLETED;
1512         /* Report the result. The cmd is not completed */
1513         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1514         goto out;
1515 }
1516
1517 static int scst_send_to_midlev(struct scst_cmd *cmd)
1518 {
1519         int res, rc;
1520         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1521         struct scst_device *dev = cmd->dev;
1522         int expected_sn;
1523         int count;
1524         int atomic = scst_cmd_atomic(cmd);
1525
1526         TRACE_ENTRY();
1527
1528         res = SCST_CMD_STATE_RES_CONT_NEXT;
1529
1530         if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1531                 TRACE_DBG("Dev handler %s exec() can not be "
1532                       "called in atomic context, rescheduling to the thread",
1533                       dev->handler->name);
1534                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1535                 goto out;
1536         }
1537
1538         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1539                 goto out;
1540
1541         scst_inc_cmd_count(); /* protect dev & tgt_dev */
1542
1543         if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1544                 rc = scst_do_send_to_midlev(cmd);
1545                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1546                 if (rc == SCST_EXEC_NEED_THREAD) {
1547                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1548                               "thread context, rescheduling");
1549                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1550                         scst_dec_on_dev_cmd(cmd);
1551                         goto out_dec_cmd_count;
1552                 } else {
1553                         BUG_ON(rc != SCST_EXEC_COMPLETED);
1554                         goto out_unplug;
1555                 }
1556         }
1557
1558         expected_sn = tgt_dev->expected_sn;
1559         if (cmd->sn != expected_sn) {
1560                 spin_lock_bh(&tgt_dev->sn_lock);
1561                 tgt_dev->def_cmd_count++;
1562                 smp_mb();
1563                 barrier(); /* to reread expected_sn */
1564                 expected_sn = tgt_dev->expected_sn;
1565                 if (cmd->sn != expected_sn) {
1566                         scst_dec_on_dev_cmd(cmd);
1567                         TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1568                               "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1569                         list_add_tail(&cmd->sn_cmd_list_entry,
1570                                       &tgt_dev->deferred_cmd_list);
1571                         spin_unlock_bh(&tgt_dev->sn_lock);
1572                         /* !! At this point cmd can be already freed !! */
1573                         goto out_dec_cmd_count;
1574                 } else {
1575                         TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1576                               "expected_sn %d, continuing", expected_sn);
1577                         tgt_dev->def_cmd_count--;
1578                         spin_unlock_bh(&tgt_dev->sn_lock);
1579                 }
1580         }
1581
1582         count = 0;
1583         while(1) {
1584                 rc = scst_do_send_to_midlev(cmd);
1585                 if (rc == SCST_EXEC_NEED_THREAD) {
1586                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1587                               "thread context, rescheduling");
1588                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1589                         scst_dec_on_dev_cmd(cmd);
1590                         if (count != 0)
1591                                 goto out_unplug;
1592                         else
1593                                 goto out_dec_cmd_count;
1594                 }
1595                 BUG_ON(rc != SCST_EXEC_COMPLETED);
1596                 /* !! At this point cmd can be already freed !! */
1597                 count++;
1598                 expected_sn = __scst_inc_expected_sn(tgt_dev);
1599                 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1600                 if (cmd == NULL)
1601                         break;
1602                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1603                         break;
1604         }
1605
1606 out_unplug:
1607         if (dev->scsi_dev != NULL)
1608                 generic_unplug_device(dev->scsi_dev->request_queue);
1609
1610 out_dec_cmd_count:
1611         scst_dec_cmd_count();
1612         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1613
1614 out:
1615         TRACE_EXIT_HRES(res);
1616         return res;
1617 }
1618
1619 static struct scst_cmd *scst_create_prepare_internal_cmd(
1620         struct scst_cmd *orig_cmd, int bufsize)
1621 {
1622         struct scst_cmd *res;
1623         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1624
1625         TRACE_ENTRY();
1626
1627         res = scst_alloc_cmd(gfp_mask);
1628         if (unlikely(res == NULL)) {
1629                 goto out;
1630         }
1631
1632         res->sess = orig_cmd->sess;
1633         res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1634         res->atomic = scst_cmd_atomic(orig_cmd);
1635         res->internal = 1;
1636         res->tgtt = orig_cmd->tgtt;
1637         res->tgt = orig_cmd->tgt;
1638         res->dev = orig_cmd->dev;
1639         res->tgt_dev = orig_cmd->tgt_dev;
1640         res->lun = orig_cmd->lun;
1641         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1642         res->data_direction = SCST_DATA_UNKNOWN;
1643         res->orig_cmd = orig_cmd;
1644
1645         res->bufflen = bufsize;
1646         if (bufsize > 0) {
1647                 if (scst_alloc_space(res) != 0)
1648                         PRINT_ERROR("Unable to create buffer (size %d) for "
1649                                 "internal cmd", bufsize);
1650                         goto out_free_res;
1651         }
1652
1653 out:
1654         TRACE_EXIT_HRES((unsigned long)res);
1655         return res;
1656
1657 out_free_res:
1658         scst_destroy_cmd(res);
1659         res = NULL;
1660         goto out;
1661 }
1662
1663 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1664 {
1665         TRACE_ENTRY();
1666
1667         if (cmd->bufflen > 0)
1668                 scst_release_space(cmd);
1669         scst_destroy_cmd(cmd);
1670
1671         TRACE_EXIT();
1672         return;
1673 }
1674
1675 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1676 {
1677         int res = SCST_CMD_STATE_RES_RESTART;
1678 #define sbuf_size 252
1679         static const unsigned char request_sense[6] =
1680             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1681         struct scst_cmd *rs_cmd;
1682
1683         TRACE_ENTRY();
1684
1685         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1686         if (rs_cmd != 0)
1687                 goto out_error;
1688
1689         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1690         rs_cmd->cdb_len = sizeof(request_sense);
1691         rs_cmd->data_direction = SCST_DATA_READ;
1692
1693         spin_lock_irq(&scst_list_lock);
1694         list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1695         spin_unlock_irq(&scst_list_lock);
1696
1697 out:
1698         TRACE_EXIT_RES(res);
1699         return res;
1700
1701 out_error:
1702         res = -1;
1703         goto out;
1704 #undef sbuf_size
1705 }
1706
1707 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1708 {
1709         struct scst_cmd *orig_cmd = cmd->orig_cmd;
1710         uint8_t *buf;
1711         int len;
1712
1713         TRACE_ENTRY();
1714
1715         BUG_ON(orig_cmd);
1716
1717         len = scst_get_buf_first(cmd, &buf);
1718
1719         if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1720             (!SCST_NO_SENSE(buf))) 
1721         {
1722                 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
1723                         buf, len);
1724                 memcpy(orig_cmd->sense_buffer, buf,
1725                         (sizeof(orig_cmd->sense_buffer) > len) ?
1726                                 len : sizeof(orig_cmd->sense_buffer));
1727         } else {
1728                 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1729                         "REQUEST SENSE, returning HARDWARE ERROR");
1730                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1731         }
1732
1733         scst_put_buf(cmd, buf);
1734
1735         scst_free_internal_cmd(cmd);
1736
1737         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1738         return orig_cmd;
1739 }
1740
1741 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1742 {
1743         int res = 0, rc;
1744         unsigned char type;
1745
1746         TRACE_ENTRY();
1747
1748         if (cmd->cdb[0] == REQUEST_SENSE) {
1749                 if (cmd->internal)
1750                         cmd = scst_complete_request_sense(cmd);
1751         } else if (scst_check_auto_sense(cmd)) {
1752                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1753                             "without sense data (opcode 0x%x), issuing "
1754                             "REQUEST SENSE", cmd->cdb[0]);
1755                 rc = scst_prepare_request_sense(cmd);
1756                 if (res > 0) {
1757                         *pres = rc;
1758                         res = 1;
1759                         goto out;
1760                 } else {
1761                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1762                                     "returning HARDWARE ERROR");
1763                         scst_set_cmd_error(cmd,
1764                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
1765                 }
1766         }
1767
1768         type = cmd->dev->handler->type;
1769         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1770             cmd->tgt_dev->acg_dev->rd_only_flag &&
1771             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1772              type == TYPE_TAPE))
1773         {
1774                 int32_t length;
1775                 uint8_t *address;
1776
1777                 length = scst_get_buf_first(cmd, &address);
1778                 if (length <= 0)
1779                         goto out;
1780                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1781                         address[2] |= 0x80;   /* Write Protect*/
1782                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1783                         address[3] |= 0x80;   /* Write Protect*/
1784                 scst_put_buf(cmd, address);
1785         }
1786
1787         /* 
1788          * Check and clear NormACA option for the device, if necessary,
1789          * since we don't support ACA
1790          */
1791         if ((cmd->cdb[0] == INQUIRY) &&
1792             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1793             (cmd->resp_data_len > SCST_INQ_BYTE3))
1794         {
1795                 uint8_t *buffer;
1796                 int buflen;
1797
1798                 /* ToDo: all pages ?? */
1799                 buflen = scst_get_buf_first(cmd, &buffer);
1800                 if (buflen > 0) {
1801                         if (buflen > SCST_INQ_BYTE3) {
1802 #ifdef EXTRACHECKS
1803                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1804                                         PRINT_INFO_PR("NormACA set for device: "
1805                                             "lun=%Ld, type 0x%02x", 
1806                                             (uint64_t)cmd->lun, buffer[0]);
1807                                 }
1808 #endif
1809                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1810                         } else
1811                                 scst_set_cmd_error(cmd,
1812                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1813
1814                         scst_put_buf(cmd, buffer);
1815                 }
1816         }
1817
1818         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
1819                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
1820                                                 &cmd->tgt_dev->tgt_dev_flags)) {
1821                         struct scst_tgt_dev *tgt_dev_tmp;
1822                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
1823                               (uint64_t)cmd->lun, cmd->masked_status);
1824                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
1825                                      sizeof(cmd->sense_buffer));
1826                         /* Clearing the reservation */
1827                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
1828                                             dev_tgt_dev_list_entry) {
1829                                 clear_bit(SCST_TGT_DEV_RESERVED, 
1830                                         &tgt_dev_tmp->tgt_dev_flags);
1831                         }
1832                         cmd->dev->dev_reserved = 0;
1833                 }
1834                 scst_unblock_dev(cmd->dev);
1835         }
1836         
1837         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
1838                      (cmd->cdb[0] == MODE_SELECT_10) ||
1839                      (cmd->cdb[0] == LOG_SELECT)))
1840         {
1841                 if (cmd->status == 0) {
1842                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
1843                                 "setting the SELECT UA (lun=%Ld)", 
1844                                 (uint64_t)cmd->lun);
1845                         spin_lock_bh(&scst_temp_UA_lock);
1846                         if (cmd->cdb[0] == LOG_SELECT) {
1847                                 scst_set_sense(scst_temp_UA,
1848                                         sizeof(scst_temp_UA),
1849                                         UNIT_ATTENTION, 0x2a, 0x02);
1850                         } else {
1851                                 scst_set_sense(scst_temp_UA,
1852                                         sizeof(scst_temp_UA),
1853                                         UNIT_ATTENTION, 0x2a, 0x01);
1854                         }
1855                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
1856                                 sizeof(scst_temp_UA), 1);
1857                         spin_unlock_bh(&scst_temp_UA_lock);
1858                 }
1859                 scst_unblock_dev(cmd->dev);
1860         }
1861
1862 out:
1863         TRACE_EXIT_RES(res);
1864         return res;
1865 }
1866
1867 static int scst_dev_done(struct scst_cmd *cmd)
1868 {
1869         int res = SCST_CMD_STATE_RES_CONT_SAME;
1870         int state;
1871         int atomic = scst_cmd_atomic(cmd);
1872
1873         TRACE_ENTRY();
1874
1875         if (atomic && !cmd->dev->handler->dev_done_atomic &&
1876             cmd->dev->handler->dev_done) 
1877         {
1878                 TRACE_DBG("Dev handler %s dev_done() can not be "
1879                       "called in atomic context, rescheduling to the thread",
1880                       cmd->dev->handler->name);
1881                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1882                 goto out;
1883         }
1884
1885         if (scst_done_cmd_check(cmd, &res))
1886                 goto out;
1887
1888         state = SCST_CMD_STATE_XMIT_RESP;
1889         if (likely(!scst_is_cmd_local(cmd)) && 
1890             likely(cmd->dev->handler->dev_done != NULL))
1891         {
1892                 int rc;
1893                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
1894                       cmd->dev->handler->name, cmd);
1895                 rc = cmd->dev->handler->dev_done(cmd);
1896                 TRACE_DBG("Dev handler %s dev_done() returned %d",
1897                       cmd->dev->handler->name, rc);
1898                 if (rc != SCST_CMD_STATE_DEFAULT)
1899                         state = rc;
1900         }
1901
1902         switch (state) {
1903         case SCST_CMD_STATE_REINIT:
1904                 cmd->state = state;
1905                 res = SCST_CMD_STATE_RES_RESTART;
1906                 break;
1907
1908         case SCST_CMD_STATE_DEV_PARSE:
1909         case SCST_CMD_STATE_PREPARE_SPACE:
1910         case SCST_CMD_STATE_RDY_TO_XFER:
1911         case SCST_CMD_STATE_SEND_TO_MIDLEV:
1912         case SCST_CMD_STATE_DEV_DONE:
1913         case SCST_CMD_STATE_XMIT_RESP:
1914         case SCST_CMD_STATE_FINISHED:
1915                 cmd->state = state;
1916                 res = SCST_CMD_STATE_RES_CONT_SAME;
1917                 break;
1918
1919         case SCST_CMD_STATE_NEED_THREAD_CTX:
1920                 TRACE_DBG("Dev handler %s dev_done() requested "
1921                       "thread context, rescheduling",
1922                       cmd->dev->handler->name);
1923                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1924                 break;
1925
1926         default:
1927                 if (state >= 0) {
1928                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
1929                                 "invalid cmd state %d", 
1930                                 cmd->dev->handler->name, state);
1931                 } else {
1932                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
1933                                 "error %d", cmd->dev->handler->name, 
1934                                 state);
1935                 }
1936                 scst_set_cmd_error(cmd,
1937                            SCST_LOAD_SENSE(scst_sense_hardw_error));
1938                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
1939                 res = SCST_CMD_STATE_RES_CONT_SAME;
1940                 break;
1941         }
1942
1943 out:
1944         TRACE_EXIT_HRES(res);
1945         return res;
1946 }
1947
1948 static int scst_xmit_response(struct scst_cmd *cmd)
1949 {
1950         int res, rc;
1951         int atomic = scst_cmd_atomic(cmd);
1952
1953         TRACE_ENTRY();
1954
1955         /* 
1956          * Check here also in order to avoid unnecessary delays of other
1957          * commands.
1958          */
1959         if (unlikely(cmd->sent_to_midlev == 0) &&
1960             (cmd->tgt_dev != NULL))
1961         {
1962                 TRACE(TRACE_SCSI_SERIALIZING,
1963                       "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
1964                 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
1965                 cmd->sent_to_midlev = 1;
1966         }
1967
1968         if (atomic && !cmd->tgtt->xmit_response_atomic) {
1969                 TRACE_DBG("%s", "xmit_response() can not be "
1970                       "called in atomic context, rescheduling to the thread");
1971                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1972                 goto out;
1973         }
1974
1975         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
1976         smp_mb__after_set_bit();
1977
1978         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1979                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
1980                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
1981                                 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
1982                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
1983                 }
1984         }
1985
1986         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
1987                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
1988                         cmd, cmd->tag);
1989                 cmd->state = SCST_CMD_STATE_FINISHED;
1990                 res = SCST_CMD_STATE_RES_CONT_SAME;
1991                 goto out;
1992         }
1993
1994 #ifdef DEBUG_TM
1995         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
1996                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
1997                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
1998                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1999                         goto out;
2000                 }
2001                 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2002                         cmd, cmd->tag);
2003                 schedule_timeout_uninterruptible(HZ);
2004         }
2005 #endif
2006
2007         while (1) {
2008                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2009
2010                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2011                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2012
2013                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2014
2015 #if defined(DEBUG) || defined(TRACING)
2016                 if (cmd->sg) {
2017                         int i;
2018                         struct scatterlist *sg = cmd->sg;
2019                         TRACE(TRACE_SEND_BOT, 
2020                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2021                               cmd->sg_cnt, sg, (void*)sg[0].page);
2022                         for(i = 0; i < cmd->sg_cnt; ++i) {
2023                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2024                                     "Xmitting sg:", page_address(sg[i].page),
2025                                     sg[i].length);
2026                         }
2027                 }
2028 #endif
2029
2030 #ifdef DEBUG_RETRY
2031                 if (((scst_random() % 100) == 77))
2032                         rc = SCST_TGT_RES_QUEUE_FULL;
2033                 else
2034 #endif
2035                         rc = cmd->tgtt->xmit_response(cmd);
2036                 TRACE_DBG("xmit_response() returned %d", rc);
2037
2038                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2039                         goto out;
2040
2041                 /* Restore the previous state */
2042                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2043
2044                 switch (rc) {
2045                 case SCST_TGT_RES_QUEUE_FULL:
2046                 {
2047                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2048                                 break;
2049                         else
2050                                 continue;
2051                 }
2052
2053                 case SCST_TGT_RES_NEED_THREAD_CTX:
2054                 {
2055                         TRACE_DBG("Target driver %s xmit_response() "
2056                               "requested thread context, rescheduling",
2057                               cmd->tgtt->name);
2058                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2059                         break;
2060                 }
2061
2062                 default:
2063                         goto out_error;
2064                 }
2065                 break;
2066         }
2067
2068 out:
2069         /* Caution: cmd can be already dead here */
2070         TRACE_EXIT_HRES(res);
2071         return res;
2072
2073 out_error:
2074         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2075                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2076                         "fatal error", cmd->tgtt->name);
2077         } else {
2078                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2079                         "invalid value %d", cmd->tgtt->name, rc);
2080         }
2081         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2082         cmd->state = SCST_CMD_STATE_FINISHED;
2083         res = SCST_CMD_STATE_RES_CONT_SAME;
2084         goto out;
2085 }
2086
2087 static int scst_finish_cmd(struct scst_cmd *cmd)
2088 {
2089         int res;
2090
2091         TRACE_ENTRY();
2092
2093         spin_lock_irq(&scst_list_lock);
2094
2095         TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2096         list_del(&cmd->cmd_list_entry);
2097
2098         if (cmd->mgmt_cmnd)
2099                 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2100
2101         if (likely(cmd->tgt_dev != NULL)) {
2102                 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2103                 tgt_dev->cmd_count--;
2104                 if (!list_empty(&tgt_dev->thr_cmd_list)) {
2105                         struct scst_cmd *t = 
2106                                 list_entry(tgt_dev->thr_cmd_list.next,
2107                                         typeof(*t), cmd_list_entry);
2108                         scst_unthrottle_cmd(t);
2109                         if (!cmd->processible_env)
2110                                 wake_up(&scst_list_waitQ);
2111                 }
2112         }
2113
2114         cmd->sess->sess_cmd_count--;
2115
2116         list_del(&cmd->search_cmd_list_entry);
2117
2118         spin_unlock_irq(&scst_list_lock);
2119
2120         scst_free_cmd(cmd);
2121
2122         res = SCST_CMD_STATE_RES_CONT_NEXT;
2123
2124         TRACE_EXIT_HRES(res);
2125         return res;
2126 }
2127
2128 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2129 {
2130         int res = 0;
2131         unsigned long flags;
2132         int context;
2133
2134         TRACE_ENTRY();
2135
2136         BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2137
2138         if (in_irq())
2139                 context = SCST_CONTEXT_TASKLET;
2140         else
2141                 context = scst_get_context();
2142
2143         TRACE_DBG("Context: %d", context);
2144         cmd->non_atomic_only = 0;
2145         cmd->state = SCST_CMD_STATE_FINISHED;
2146
2147         switch (context) {
2148         case SCST_CONTEXT_DIRECT:
2149         case SCST_CONTEXT_DIRECT_ATOMIC:
2150                 flags = 0;
2151                 scst_check_retries(cmd->tgt, 0);
2152                 res = __scst_process_active_cmd(cmd, context, 0);
2153                 BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
2154                 break;
2155
2156         case SCST_CONTEXT_TASKLET:
2157         {
2158                 spin_lock_irqsave(&scst_list_lock, flags);
2159                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2160                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2161                 spin_unlock_irqrestore(&scst_list_lock, flags);
2162                 scst_schedule_tasklet();
2163                 scst_check_retries(cmd->tgt, 0);
2164                 break;
2165         }
2166
2167         default:
2168                 BUG();
2169                 break;
2170         }
2171
2172         TRACE_EXIT();
2173         return;
2174 }
2175
2176 /*
2177  * Returns 0 on success, > 0 when we need to wait for unblock,
2178  * < 0 if there is no device (lun) or device type handler.
2179  * Called under scst_list_lock and IRQs disabled
2180  */
2181 static int scst_translate_lun(struct scst_cmd *cmd)
2182 {
2183         struct scst_tgt_dev *tgt_dev = NULL;
2184         int res = 0;
2185
2186         TRACE_ENTRY();
2187
2188         scst_inc_cmd_count();   
2189
2190         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2191                 res = -1;
2192                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2193                       (uint64_t)cmd->lun);
2194                 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2195                                     sess_tgt_dev_list_entry) 
2196                 {
2197                         if (tgt_dev->acg_dev->lun == cmd->lun) {
2198                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2199
2200                                 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2201                                         PRINT_INFO_PR("Dev handler for device "
2202                                           "%Ld is NULL, the device will not be "
2203                                           "visible remotely", (uint64_t)cmd->lun);
2204                                         break;
2205                                 }
2206                                 
2207                                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2208                                         cmd->tgt_dev_saved->cmd_count--;
2209                                         TRACE(TRACE_SCSI_SERIALIZING,
2210                                               "SCST_CMD_STATE_REINIT: "
2211                                               "incrementing expected_sn on tgt_dev_saved %p",
2212                                               cmd->tgt_dev_saved);
2213                                         scst_inc_expected_sn_unblock(
2214                                                 cmd->tgt_dev_saved, cmd, 1);
2215                                 }
2216                                 cmd->tgt_dev = tgt_dev;
2217                                 tgt_dev->cmd_count++;
2218                                 cmd->dev = tgt_dev->acg_dev->dev;
2219
2220                                 /* ToDo: cmd->queue_type */
2221
2222                                 /* scst_list_lock is enough to protect that */
2223                                 cmd->sn = tgt_dev->next_sn;
2224                                 tgt_dev->next_sn++;
2225
2226                                 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2227                                         "cmd->sn: %d", cmd->sn);
2228
2229                                 res = 0;
2230                                 break;
2231                         }
2232                 }
2233                 if (res != 0) {
2234                         TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2235                                 "unexisting LU?", (uint64_t)cmd->lun);
2236                         scst_dec_cmd_count();
2237                 }
2238         } else {
2239                 if ( !cmd->sess->waiting) {
2240                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2241                               cmd->sess);
2242                         list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2243                                       &scst_dev_wait_sess_list);
2244                         cmd->sess->waiting = 1;
2245                 }
2246                 scst_dec_cmd_count();
2247                 res = 1;
2248         }
2249
2250         TRACE_EXIT_RES(res);
2251         return res;
2252 }
2253
2254 /* Called under scst_list_lock and IRQs disabled */
2255 static int scst_process_init_cmd(struct scst_cmd *cmd)
2256 {
2257         int res = 0;
2258
2259         TRACE_ENTRY();
2260
2261         res = scst_translate_lun(cmd);
2262         if (likely(res == 0)) {
2263                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2264                 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS)
2265 #if 0 /* don't know how it's better */
2266                 {
2267                         scst_throttle_cmd(cmd);
2268                 } else {
2269                         BUG_ON(!list_empty(&cmd->tgt_dev->thr_cmd_list));
2270                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2271                         list_move_tail(&cmd->cmd_list_entry, 
2272                                 &scst_active_cmd_list);
2273                 }
2274 #else
2275                 {
2276                         TRACE(TRACE_RETRY, "Too many pending commands in "
2277                                 "session, returning BUSY to initiator \"%s\"",
2278                                 (cmd->sess->initiator_name[0] == '\0') ?
2279                                   "Anonymous" : cmd->sess->initiator_name);
2280                         scst_set_busy(cmd);
2281                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2282                 }
2283                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2284                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2285 #endif
2286         } else if (res < 0) {
2287                 TRACE_DBG("Finishing cmd %p", cmd);
2288                 scst_set_cmd_error(cmd,
2289                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2290                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2291                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2292                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2293         }
2294
2295         TRACE_EXIT_RES(res);
2296         return res;
2297 }
2298
2299 /* 
2300  * Called under scst_list_lock and IRQs disabled
2301  * We don't drop it anywhere inside, because command execution
2302  * have to be serialized, i.e. commands must be executed in order
2303  * of their arrival, and we set this order inside scst_translate_lun().
2304  */
2305 static int scst_do_job_init(struct list_head *init_cmd_list)
2306 {
2307         int res = 1;
2308
2309         TRACE_ENTRY();
2310
2311         if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2312                 while (!list_empty(init_cmd_list)) {
2313                         struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2314                                                           typeof(*cmd),
2315                                                           cmd_list_entry);
2316                         res = scst_process_init_cmd(cmd);
2317                         if (res > 0)
2318                                 break;
2319                 }
2320         }
2321
2322         TRACE_EXIT_RES(res);
2323         return res;
2324 }
2325
2326 /* Called with no locks held */
2327 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2328         int left_locked)
2329 {
2330         int res;
2331
2332         TRACE_ENTRY();
2333
2334         BUG_ON(in_irq());
2335
2336         cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2337                         SCST_CONTEXT_DIRECT_ATOMIC);
2338         cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2339
2340         do {
2341                 switch (cmd->state) {
2342                 case SCST_CMD_STATE_DEV_PARSE:
2343                         res = scst_parse_cmd(cmd);
2344                         break;
2345
2346                 case SCST_CMD_STATE_PREPARE_SPACE:
2347                         res = scst_prepare_space(cmd);
2348                         break;
2349
2350                 case SCST_CMD_STATE_RDY_TO_XFER:
2351                         res = scst_rdy_to_xfer(cmd);
2352                         break;
2353
2354                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2355                         res = scst_send_to_midlev(cmd);
2356                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2357                         break;
2358
2359                 case SCST_CMD_STATE_DEV_DONE:
2360                         res = scst_dev_done(cmd);
2361                         break;
2362
2363                 case SCST_CMD_STATE_XMIT_RESP:
2364                         res = scst_xmit_response(cmd);
2365                         break;
2366
2367                 case SCST_CMD_STATE_FINISHED:
2368                         res = scst_finish_cmd(cmd);
2369                         break;
2370
2371                 default:
2372                         PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2373                                cmd, cmd->state);
2374                         BUG();
2375                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2376                         break;
2377                 }
2378         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2379
2380         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2381                 if (left_locked)
2382                         spin_lock_irq(&scst_list_lock);
2383         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2384                 spin_lock_irq(&scst_list_lock);
2385
2386                 switch (cmd->state) {
2387                 case SCST_CMD_STATE_DEV_PARSE:
2388                 case SCST_CMD_STATE_PREPARE_SPACE:
2389                 case SCST_CMD_STATE_RDY_TO_XFER:
2390                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2391                 case SCST_CMD_STATE_DEV_DONE:
2392                 case SCST_CMD_STATE_XMIT_RESP:
2393                 case SCST_CMD_STATE_FINISHED:
2394                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2395                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2396                         break;
2397 #ifdef EXTRACHECKS
2398                 /* not very valid commands */
2399                 case SCST_CMD_STATE_DEFAULT:
2400                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2401                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2402                                 "useful list (left on scst cmd list)", cmd, 
2403                                 cmd->state);
2404                         spin_unlock_irq(&scst_list_lock);
2405                         BUG();
2406                         spin_lock_irq(&scst_list_lock);
2407                         break;
2408 #endif
2409                 default:
2410                         break;
2411                 }
2412                 cmd->non_atomic_only = 1;
2413                 if (!left_locked)
2414                         spin_unlock_irq(&scst_list_lock);
2415                 wake_up(&scst_list_waitQ);
2416         } else if (res == SCST_CMD_STATE_RES_RESTART) {
2417                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2418                         spin_lock_irq(&scst_list_lock);
2419                         TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2420                         list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2421                         if (!left_locked)
2422                                 spin_unlock_irq(&scst_list_lock);
2423                 } else
2424                         BUG();
2425         } else
2426                 BUG();
2427
2428         TRACE_EXIT_RES(res);
2429         return res;
2430 }
2431
2432 /* Called under scst_list_lock and IRQs disabled */
2433 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2434 {
2435         int res;
2436         struct scst_cmd *cmd;
2437         int atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2438                         SCST_CONTEXT_DIRECT_ATOMIC);
2439
2440         TRACE_ENTRY();
2441
2442         tm_dbg_check_released_cmds();
2443
2444 restart:
2445         list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2446                 if (atomic && cmd->non_atomic_only) {
2447                         TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2448                         continue;
2449                 }
2450                 if (tm_dbg_check_cmd(cmd) != 0)
2451                         goto restart;
2452                 res = scst_process_active_cmd(cmd, context, NULL, 1);
2453                 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2454                         goto restart;
2455                 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2456                         goto restart;
2457                 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2458                         break;
2459                 } else
2460                         BUG();
2461         }
2462
2463         TRACE_EXIT();
2464         return;
2465 }
2466
2467 static inline int test_cmd_lists(void)
2468 {
2469         int res = !list_empty(&scst_active_cmd_list) ||
2470             (!list_empty(&scst_init_cmd_list) &&
2471              !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2472             test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2473             unlikely(scst_shut_threads_count > 0) ||
2474             tm_dbg_is_release();
2475         return res;
2476 }
2477
2478 int scst_cmd_thread(void *arg)
2479 {
2480         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2481         int n;
2482
2483         TRACE_ENTRY();
2484
2485         spin_lock(&lock);
2486         n = scst_thread_num++;
2487         spin_unlock(&lock);
2488         daemonize("scsi_tgt%d", n);
2489         recalc_sigpending();
2490         set_user_nice(current, 10);
2491         current->flags |= PF_NOFREEZE;
2492
2493         spin_lock_irq(&scst_list_lock);
2494         while (1) {
2495                 wait_queue_t wait;
2496                 init_waitqueue_entry(&wait, current);
2497
2498                 if (!test_cmd_lists()) {
2499                         add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2500                         for (;;) {
2501                                 set_current_state(TASK_INTERRUPTIBLE);
2502                                 if (test_cmd_lists())
2503                                         break;
2504                                 spin_unlock_irq(&scst_list_lock);
2505                                 schedule();
2506                                 spin_lock_irq(&scst_list_lock);
2507                         }
2508                         set_current_state(TASK_RUNNING);
2509                         remove_wait_queue(&scst_list_waitQ, &wait);
2510                 }
2511
2512                 scst_do_job_init(&scst_init_cmd_list);
2513                 scst_do_job_active(&scst_active_cmd_list,
2514                                    SCST_CONTEXT_THREAD|SCST_PROCESSIBLE_ENV);
2515
2516                 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2517                     list_empty(&scst_cmd_list) &&
2518                     list_empty(&scst_active_cmd_list) &&
2519                     list_empty(&scst_init_cmd_list)) {
2520                         break;
2521                 }
2522                 
2523                 if (unlikely(scst_shut_threads_count > 0)) {
2524                         scst_shut_threads_count--;
2525                         break;
2526                 }
2527         }
2528         spin_unlock_irq(&scst_list_lock);
2529
2530         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2531                 smp_mb__after_atomic_dec();
2532                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2533                 up(scst_shutdown_mutex);
2534         }
2535
2536         TRACE_EXIT();
2537         return 0;
2538 }
2539
2540 void scst_cmd_tasklet(long p)
2541 {
2542         TRACE_ENTRY();
2543
2544         spin_lock_irq(&scst_list_lock);
2545
2546         scst_do_job_init(&scst_init_cmd_list);
2547         scst_do_job_active(&scst_active_cmd_list, 
2548                 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2549
2550         spin_unlock_irq(&scst_list_lock);
2551
2552         TRACE_EXIT();
2553         return;
2554 }
2555
2556 /*
2557  * Returns 0 on success, < 0 if there is no device handler or
2558  * > 0 if SCST_FLAG_SUSPENDED set.
2559  */
2560 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2561 {
2562         struct scst_tgt_dev *tgt_dev = NULL;
2563         int res = -1;
2564
2565         TRACE_ENTRY();
2566
2567         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2568               (uint64_t)mcmd->lun);
2569
2570         spin_lock_irq(&scst_list_lock);
2571         scst_inc_cmd_count();   
2572         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2573                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2574                                     sess_tgt_dev_list_entry) 
2575                 {
2576                         if (tgt_dev->acg_dev->lun == mcmd->lun) {
2577                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2578                                 mcmd->mcmd_tgt_dev = tgt_dev;
2579                                 res = 0;
2580                                 break;
2581                         }
2582                 }
2583                 if (mcmd->mcmd_tgt_dev == NULL)
2584                         scst_dec_cmd_count();
2585         } else {
2586                 if ( !mcmd->sess->waiting) {
2587                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2588                               mcmd->sess);
2589                         list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2590                                       &scst_dev_wait_sess_list);
2591                         mcmd->sess->waiting = 1;
2592                 }
2593                 scst_dec_cmd_count();
2594                 res = 1;
2595         }
2596         spin_unlock_irq(&scst_list_lock);
2597
2598         TRACE_EXIT_HRES(res);
2599         return res;
2600 }
2601
2602 /* Called under scst_list_lock and IRQ off */
2603 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2604         struct scst_mgmt_cmd *mcmd)
2605 {
2606         TRACE_ENTRY();
2607
2608         TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2609                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2610                 mcmd->cmd_wait_count);
2611
2612         cmd->mgmt_cmnd = NULL;
2613
2614         if (cmd->completed)
2615                 mcmd->completed_cmd_count++;
2616
2617         mcmd->cmd_wait_count--;
2618         if (mcmd->cmd_wait_count > 0) {
2619                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2620                         mcmd->cmd_wait_count);
2621                 goto out;
2622         }
2623
2624         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2625
2626         if (mcmd->completed) {
2627                 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2628                         mcmd);
2629                 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2630                         &scst_active_mgmt_cmd_list);
2631         }
2632
2633         wake_up(&scst_mgmt_cmd_list_waitQ);
2634
2635 out:
2636         TRACE_EXIT();
2637         return;
2638 }
2639
2640 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2641         struct scst_tgt_dev *tgt_dev, int set_status)
2642 {
2643         int res = SCST_DEV_TM_NOT_COMPLETED;
2644         if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2645                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2646                       tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2647                 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd, 
2648                         tgt_dev);
2649                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2650                       tgt_dev->acg_dev->dev->handler->name, res);
2651                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2652                         mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ? 
2653                                                 SCST_MGMT_STATUS_SUCCESS :
2654                                                 SCST_MGMT_STATUS_FAILED;
2655                 }
2656         }
2657         return res;
2658 }
2659
2660 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2661 {
2662         switch(mgmt_fn) {
2663                 case SCST_ABORT_TASK:
2664                 case SCST_ABORT_TASK_SET:
2665                 case SCST_CLEAR_TASK_SET:
2666                         return 1;
2667                 default:
2668                         return 0;
2669         }
2670 }
2671
2672 /* 
2673  * Called under scst_list_lock and IRQ off (to protect cmd
2674  * from being destroyed).
2675  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2676  */
2677 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2678         int other_ini, int call_dev_task_mgmt_fn)
2679 {
2680         TRACE_ENTRY();
2681
2682         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2683
2684         if (other_ini) {
2685                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2686                 smp_mb__after_set_bit();
2687         }
2688         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2689         smp_mb__after_set_bit();
2690
2691         if (test_bit(SCST_CMD_THROTTELED, &cmd->cmd_flags))
2692                 scst_unthrottle_cmd(cmd);
2693
2694         if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2695                  scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2696
2697         if (mcmd) {
2698                 int defer;
2699                 if (cmd->tgtt->tm_sync_reply)
2700                         defer = 1;
2701                 else {
2702                         if (scst_is_strict_mgmt_fn(mcmd->fn))
2703                                 defer = test_bit(SCST_CMD_EXECUTING,
2704                                         &cmd->cmd_flags);
2705                         else
2706                                 defer = test_bit(SCST_CMD_XMITTING,
2707                                         &cmd->cmd_flags);
2708                 }
2709
2710                 if (defer) {
2711                         /*
2712                          * Delay the response until the command's finish in
2713                          * order to guarantee that "no further responses from
2714                          * the task are sent to the SCSI initiator port" after
2715                          * response from the TM function is sent (SAM)
2716                          */
2717                         TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2718                                 "xmitted (state %d), deferring ABORT...", cmd,
2719                                 cmd->tag, cmd->state);
2720 #ifdef EXTRACHECKS
2721                         if (cmd->mgmt_cmnd) {
2722                                 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2723                                         "has non-NULL mgmt_cmnd %p!!! Current "
2724                                         "mcmd %p\n", cmd, cmd->tag, cmd->state,
2725                                         cmd->mgmt_cmnd, mcmd);
2726                         }
2727 #endif
2728                         BUG_ON(cmd->mgmt_cmnd);
2729                         mcmd->cmd_wait_count++;
2730                         cmd->mgmt_cmnd = mcmd;
2731                 }
2732         }
2733
2734         tm_dbg_release_cmd(cmd);
2735
2736         TRACE_EXIT();
2737         return;
2738 }
2739
2740 /* Called under scst_list_lock and IRQ off */
2741 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2742 {
2743         int res;
2744         if (mcmd->cmd_wait_count != 0) {
2745                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2746                         "wait", mcmd->cmd_wait_count);
2747                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2748                 res = -1;
2749         } else {
2750                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2751                 res = 0;
2752         }
2753         mcmd->completed = 1;
2754         return res;
2755 }
2756
2757 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2758 {
2759         struct scst_device *dev;
2760         int wake = 0;
2761
2762         TRACE_ENTRY();
2763
2764         if (!scst_mutex_held)
2765                 down(&scst_mutex);
2766
2767         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2768                 struct scst_cmd *cmd, *tcmd;
2769                 spin_lock_bh(&dev->dev_lock);
2770                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2771                                         blocked_cmd_list_entry) {
2772                         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2773                                 list_del(&cmd->blocked_cmd_list_entry);
2774                                 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2775                                         "to active cmd list", cmd);
2776                                 spin_lock_irq(&scst_list_lock);
2777                                 list_move_tail(&cmd->cmd_list_entry,
2778                                         &scst_active_cmd_list);
2779                                 spin_unlock_irq(&scst_list_lock);
2780                                 wake = 1;
2781                         }
2782                 }
2783                 spin_unlock_bh(&dev->dev_lock);
2784         }
2785
2786         if (!scst_mutex_held)
2787                 up(&scst_mutex);
2788
2789         if (wake)
2790                 wake_up(&scst_list_waitQ);
2791
2792         TRACE_EXIT();
2793         return;
2794 }
2795
2796 /* Returns 0 if the command processing should be continued, <0 otherwise */
2797 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2798         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2799 {
2800         struct scst_cmd *cmd;
2801         struct scst_session *sess = tgt_dev->sess;
2802
2803         TRACE_ENTRY();
2804
2805         spin_lock_irq(&scst_list_lock);
2806
2807         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2808         list_for_each_entry(cmd, &sess->search_cmd_list, 
2809                         search_cmd_list_entry) {
2810                 if ((cmd->tgt_dev == NULL) && 
2811                     (cmd->lun == tgt_dev->acg_dev->lun))
2812                         continue;
2813                 if (cmd->tgt_dev != tgt_dev)
2814                         continue;
2815                 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2816         }
2817         spin_unlock_irq(&scst_list_lock);
2818
2819         scst_unblock_aborted_cmds(scst_mutex_held);
2820
2821         TRACE_EXIT();
2822         return;
2823 }
2824
2825 /* Returns 0 if the command processing should be continued, <0 otherwise */
2826 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2827 {
2828         int res;
2829         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2830         struct scst_device *dev = tgt_dev->acg_dev->dev;
2831
2832         TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2833                 tgt_dev->acg_dev->lun, mcmd);
2834
2835         spin_lock_bh(&dev->dev_lock);
2836         __scst_block_dev(dev);
2837         spin_unlock_bh(&dev->dev_lock);
2838
2839         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2840         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2841
2842         res = scst_set_mcmd_next_state(mcmd);
2843
2844         TRACE_EXIT_RES(res);
2845         return res;
2846 }
2847
2848 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
2849 {
2850         /*
2851          * No need for special protection for SCST_FLAG_TM_ACTIVE, since
2852          * we could be called from the only thread.
2853          */
2854         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
2855                 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
2856                         mcmd);
2857                 if (!locked)
2858                         spin_lock_irq(&scst_list_lock);
2859                 list_move_tail(&mcmd->mgmt_cmd_list_entry, 
2860                         &scst_delayed_mgmt_cmd_list);
2861                 if (!locked)
2862                         spin_unlock_irq(&scst_list_lock);
2863                 return -1;
2864         } else {
2865                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
2866                 return 0;
2867         }
2868 }
2869
2870 /* Returns 0 if the command processing should be continued, 
2871  * >0, if it should be requeued, <0 otherwise */
2872 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
2873 {
2874         int res = 0;
2875
2876         TRACE_ENTRY();
2877
2878         res = scst_check_delay_mgmt_cmd(mcmd, 1);
2879         if (res != 0)
2880                 goto out;
2881
2882         if (mcmd->fn == SCST_ABORT_TASK) {
2883                 struct scst_session *sess = mcmd->sess;
2884                 struct scst_cmd *cmd;
2885
2886                 spin_lock_irq(&scst_list_lock);
2887                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
2888                 if (cmd == NULL) {
2889                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
2890                                 "tag %d not found", mcmd->tag);
2891                         mcmd->status = SCST_MGMT_STATUS_FAILED;
2892                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2893                 } else {
2894                         TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
2895                                 "aborting it", cmd, mcmd->tag, cmd->sn);
2896                         mcmd->cmd_to_abort = cmd;
2897                         scst_abort_cmd(cmd, mcmd, 0, 1);
2898                         res = scst_set_mcmd_next_state(mcmd);
2899                         mcmd->cmd_to_abort = NULL; /* just in case */
2900                 }
2901                 spin_unlock_irq(&scst_list_lock);
2902         } else {
2903                 int rc;
2904                 rc = scst_mgmt_translate_lun(mcmd);
2905                 if (rc < 0) {
2906                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
2907                                 "found", (uint64_t)mcmd->lun);
2908                         mcmd->status = SCST_MGMT_STATUS_FAILED;
2909                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2910                 } else if (rc == 0)
2911                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
2912                 else
2913                         res = rc;
2914         }
2915
2916 out:
2917         TRACE_EXIT_RES(res);
2918         return res;
2919 }
2920
2921 /* Returns 0 if the command processing should be continued, <0 otherwise */
2922 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
2923 {
2924         int res, rc;
2925         struct scst_device *dev, *d;
2926         struct scst_tgt_dev *tgt_dev;
2927         int cont, c;
2928         LIST_HEAD(host_devs);
2929
2930         TRACE_ENTRY();
2931
2932         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
2933                 mcmd, mcmd->sess->sess_cmd_count);
2934
2935         down(&scst_mutex);
2936
2937         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2938                 int found = 0;
2939
2940                 spin_lock_bh(&dev->dev_lock);
2941                 __scst_block_dev(dev);
2942                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
2943                 spin_unlock_bh(&dev->dev_lock);
2944
2945                 cont = 0;
2946                 c = 0;
2947                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2948                         dev_tgt_dev_list_entry) 
2949                 {
2950                         cont = 1;
2951                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2952                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
2953                                 c = 1;
2954                         else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
2955                                         mcmd->status = SCST_MGMT_STATUS_FAILED;
2956                 }
2957                 if (cont && !c)
2958                         continue;
2959                 
2960                 if (dev->scsi_dev == NULL)
2961                         continue;
2962
2963                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
2964                         if (dev->scsi_dev->host->host_no ==
2965                                     d->scsi_dev->host->host_no) 
2966                         {
2967                                 found = 1;
2968                                 break;
2969                         }
2970                 }
2971                 if (!found)
2972                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
2973         }
2974
2975         /*
2976          * We suppose here that for all commands that already on devices
2977          * on/after scsi_reset_provider() completion callbacks will be called.
2978          */
2979
2980         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
2981                 /* dev->scsi_dev must be non-NULL here */
2982                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
2983                       dev->scsi_dev->host->host_no);
2984                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
2985                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
2986                       dev->scsi_dev->host->host_no,
2987                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
2988                 if (rc != SUCCESS) {
2989                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
2990                         mcmd->status = SCST_MGMT_STATUS_FAILED;
2991                 }
2992         }
2993
2994         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2995                 if (dev->scsi_dev != NULL)
2996                         dev->scsi_dev->was_reset = 0;
2997         }
2998
2999         up(&scst_mutex);
3000
3001         spin_lock_irq(&scst_list_lock);
3002         tm_dbg_task_mgmt("TARGET RESET");
3003         res = scst_set_mcmd_next_state(mcmd);
3004         spin_unlock_irq(&scst_list_lock);
3005
3006         TRACE_EXIT_RES(res);
3007         return res;
3008 }
3009
3010 /* Returns 0 if the command processing should be continued, <0 otherwise */
3011 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3012 {
3013         int res, rc;
3014         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3015         struct scst_device *dev = tgt_dev->acg_dev->dev;
3016
3017         TRACE_ENTRY();
3018
3019         TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3020                 mcmd);
3021
3022         spin_lock_bh(&dev->dev_lock);
3023         __scst_block_dev(dev);
3024         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3025         spin_unlock_bh(&dev->dev_lock);
3026
3027         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3028         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3029                 goto out_tm_dbg;
3030
3031         if (dev->scsi_dev != NULL) {
3032                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3033                       dev->scsi_dev->host->host_no);
3034                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3035                 if (rc != SUCCESS)
3036                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3037                 dev->scsi_dev->was_reset = 0;
3038         }
3039
3040 out_tm_dbg:
3041         spin_lock_irq(&scst_list_lock);
3042         tm_dbg_task_mgmt("LUN RESET");
3043         res = scst_set_mcmd_next_state(mcmd);
3044         spin_unlock_irq(&scst_list_lock);
3045
3046         TRACE_EXIT_RES(res);
3047         return res;
3048 }
3049
3050 /* Returns 0 if the command processing should be continued, <0 otherwise */
3051 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3052         int nexus_loss)
3053 {
3054         int res;
3055         struct scst_session *sess = mcmd->sess;
3056         struct scst_tgt_dev *tgt_dev;
3057
3058         TRACE_ENTRY();
3059
3060         if (nexus_loss) {
3061                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3062                         mcmd);
3063         } else {
3064                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3065                         mcmd);
3066         }
3067
3068         down(&scst_mutex);
3069         list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3070                 sess_tgt_dev_list_entry) 
3071         {
3072                 struct scst_device *dev = tgt_dev->acg_dev->dev;
3073                 int rc;
3074
3075                 spin_lock_bh(&dev->dev_lock);
3076                 __scst_block_dev(dev);
3077                 spin_unlock_bh(&dev->dev_lock);
3078
3079                 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3080                 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3081                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3082
3083                 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3084                 if (nexus_loss)
3085                         scst_reset_tgt_dev(tgt_dev, 1);
3086         }
3087         up(&scst_mutex);
3088
3089         spin_lock_irq(&scst_list_lock);
3090         res = scst_set_mcmd_next_state(mcmd);
3091         spin_unlock_irq(&scst_list_lock);
3092
3093         TRACE_EXIT_RES(res);
3094         return res;
3095 }
3096
3097 /* Returns 0 if the command processing should be continued, <0 otherwise */
3098 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3099         int nexus_loss)
3100 {
3101         int res;
3102         struct scst_tgt *tgt = mcmd->sess->tgt;
3103         struct scst_session *sess;
3104         struct scst_device *dev;
3105         struct scst_tgt_dev *tgt_dev;
3106
3107         TRACE_ENTRY();
3108
3109         if (nexus_loss) {
3110                 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3111                         mcmd);
3112         } else {
3113                 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3114                         mcmd);
3115         }
3116
3117         down(&scst_mutex);
3118
3119         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3120                 spin_lock_bh(&dev->dev_lock);
3121                 __scst_block_dev(dev);
3122                 spin_unlock_bh(&dev->dev_lock);
3123         }
3124
3125         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3126                 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3127                         sess_tgt_dev_list_entry) 
3128                 {
3129                         int rc;
3130
3131                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3132                         if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3133                                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3134
3135                         __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3136                         if (nexus_loss)
3137                                 scst_reset_tgt_dev(tgt_dev, 1);
3138                 }
3139         }
3140
3141         up(&scst_mutex);
3142
3143         spin_lock_irq(&scst_list_lock);
3144         res = scst_set_mcmd_next_state(mcmd);
3145         spin_unlock_irq(&scst_list_lock);
3146
3147         TRACE_EXIT_RES(res);
3148         return res;
3149 }
3150
3151 /* Returns 0 if the command processing should be continued, <0 otherwise */
3152 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3153 {
3154         int res = 0;
3155
3156         TRACE_ENTRY();
3157
3158         mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3159
3160         switch (mcmd->fn) {
3161         case SCST_ABORT_TASK_SET:
3162         case SCST_CLEAR_TASK_SET:
3163                 res = scst_abort_task_set(mcmd);
3164                 break;
3165
3166         case SCST_LUN_RESET:
3167                 res = scst_lun_reset(mcmd);
3168                 break;
3169
3170         case SCST_TARGET_RESET:
3171                 res = scst_target_reset(mcmd);
3172                 break;
3173
3174         case SCST_ABORT_ALL_TASKS_SESS:
3175                 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3176                 break;
3177
3178         case SCST_NEXUS_LOSS_SESS:
3179                 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3180                 break;
3181
3182         case SCST_ABORT_ALL_TASKS:
3183                 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3184                 break;
3185
3186         case SCST_NEXUS_LOSS:
3187                 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3188                 break;
3189
3190         case SCST_CLEAR_ACA:
3191                 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3192                 /* Nothing to do (yet) */
3193                 break;
3194
3195         default:
3196                 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3197                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3198                 break;
3199         }
3200
3201         TRACE_EXIT_RES(res);
3202         return res;
3203 }
3204
3205 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3206 {
3207         struct scst_device *dev;
3208         struct scst_tgt_dev *tgt_dev;
3209
3210         TRACE_ENTRY();
3211
3212         clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3213         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3214                 struct scst_mgmt_cmd *m;
3215                 spin_lock_irq(&scst_list_lock);
3216                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3217                                 mgmt_cmd_list_entry);
3218                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3219                         "cmd list", m);
3220                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3221                 spin_unlock_irq(&scst_list_lock);
3222         }
3223
3224         mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3225         if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3226                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3227
3228         if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3229                 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3230                       mcmd->sess->tgt->tgtt->name);
3231                 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3232                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3233                       mcmd->sess->tgt->tgtt->name);
3234         }
3235
3236         switch (mcmd->fn) {
3237         case SCST_ABORT_TASK_SET:
3238         case SCST_CLEAR_TASK_SET:
3239         case SCST_LUN_RESET:
3240                 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3241                 break;
3242
3243         case SCST_TARGET_RESET:
3244         case SCST_ABORT_ALL_TASKS:
3245         case SCST_NEXUS_LOSS:
3246                 down(&scst_mutex);
3247                 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3248                         scst_unblock_dev(dev);
3249                 }
3250                 up(&scst_mutex);
3251                 break;
3252
3253         case SCST_NEXUS_LOSS_SESS:
3254         case SCST_ABORT_ALL_TASKS_SESS:
3255                 down(&scst_mutex);
3256                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3257                                 sess_tgt_dev_list_entry) {
3258                         scst_unblock_dev(tgt_dev->acg_dev->dev);
3259                 }
3260                 up(&scst_mutex);
3261                 break;
3262
3263         case SCST_CLEAR_ACA:
3264         default:
3265                 break;
3266         }
3267
3268         mcmd->tgt_specific = NULL;
3269
3270         TRACE_EXIT();
3271         return;
3272 }
3273
3274 /* Returns >0, if cmd should be requeued */
3275 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3276 {
3277         int res = 0;
3278
3279         TRACE_ENTRY();
3280
3281         TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3282
3283         while (1) {
3284                 switch (mcmd->state) {
3285                 case SCST_MGMT_CMD_STATE_INIT:
3286                         res = scst_mgmt_cmd_init(mcmd);
3287                         if (res)
3288                                 goto out;
3289                         break;
3290
3291                 case SCST_MGMT_CMD_STATE_READY:
3292                         if (scst_mgmt_cmd_exec(mcmd))
3293                                 goto out;
3294                         break;
3295
3296                 case SCST_MGMT_CMD_STATE_DONE:
3297                         scst_mgmt_cmd_send_done(mcmd);
3298                         break;
3299
3300                 case SCST_MGMT_CMD_STATE_FINISHED:
3301                         goto out_free;
3302
3303 #ifdef EXTRACHECKS
3304                 case SCST_MGMT_CMD_STATE_EXECUTING:
3305                         BUG();
3306 #endif
3307
3308                 default:
3309                         PRINT_ERROR_PR("Unknown state %d of management command",
3310                                     mcmd->state);
3311                         res = -1;
3312                         goto out_free;
3313                 }
3314         }
3315
3316 out:
3317         TRACE_EXIT_RES(res);
3318         return res;
3319
3320 out_free:
3321         scst_free_mgmt_cmd(mcmd, 1);
3322         goto out;
3323 }
3324
3325 static inline int test_mgmt_cmd_list(void)
3326 {
3327         int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3328                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3329                   test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3330         return res;
3331 }
3332
3333 int scst_mgmt_cmd_thread(void *arg)
3334 {
3335         struct scst_mgmt_cmd *mcmd;
3336
3337         TRACE_ENTRY();
3338
3339         daemonize("scsi_tgt_mc");
3340         recalc_sigpending();
3341         current->flags |= PF_NOFREEZE;
3342
3343         spin_lock_irq(&scst_list_lock);
3344         while (1) {
3345                 wait_queue_t wait;
3346                 init_waitqueue_entry(&wait, current);
3347
3348                 if (!test_mgmt_cmd_list()) {
3349                         add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3350                                                  &wait);
3351                         for (;;) {
3352                                 set_current_state(TASK_INTERRUPTIBLE);
3353                                 if (test_mgmt_cmd_list())
3354                                         break;
3355                                 spin_unlock_irq(&scst_list_lock);
3356                                 schedule();
3357                                 spin_lock_irq(&scst_list_lock);
3358                         }
3359                         set_current_state(TASK_RUNNING);
3360                         remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3361                 }
3362
3363                 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3364                        !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3365                 {
3366                         int rc;
3367                         mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3368                                           typeof(*mcmd), mgmt_cmd_list_entry);
3369                         TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3370                               mcmd);
3371                         list_move_tail(&mcmd->mgmt_cmd_list_entry,
3372                                        &scst_mgmt_cmd_list);
3373                         spin_unlock_irq(&scst_list_lock);
3374                         rc = scst_process_mgmt_cmd(mcmd);
3375                         spin_lock_irq(&scst_list_lock);
3376                         if (rc > 0) {
3377                                 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3378                                         "of active mgmt cmd list", mcmd);
3379                                 list_move(&mcmd->mgmt_cmd_list_entry,
3380                                        &scst_active_mgmt_cmd_list);
3381                         }
3382                 }
3383
3384                 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3385                     list_empty(&scst_active_mgmt_cmd_list)) 
3386                 {
3387                         break;
3388                 }
3389         }
3390         spin_unlock_irq(&scst_list_lock);
3391
3392         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3393                 smp_mb__after_atomic_dec();
3394                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3395                 up(scst_shutdown_mutex);
3396         }
3397
3398         TRACE_EXIT();
3399         return 0;
3400 }
3401
3402 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3403         *sess, int fn, int atomic, void *tgt_specific)
3404 {
3405         struct scst_mgmt_cmd *mcmd = NULL;
3406
3407         TRACE_ENTRY();
3408
3409         if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3410                 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3411                             "(target %s)", sess->tgt->tgtt->name);
3412                 goto out;
3413         }
3414
3415         mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3416         if (mcmd == NULL)
3417                 goto out;
3418
3419         mcmd->sess = sess;
3420         mcmd->fn = fn;
3421         mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3422         mcmd->tgt_specific = tgt_specific;
3423
3424 out:
3425         TRACE_EXIT();
3426         return mcmd;
3427 }
3428
3429 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3430         struct scst_mgmt_cmd *mcmd)
3431 {
3432         unsigned long flags;
3433         int res = 0;
3434
3435         TRACE_ENTRY();
3436
3437         scst_sess_get(sess);
3438
3439         spin_lock_irqsave(&scst_list_lock, flags);
3440
3441         sess->sess_cmd_count++;
3442
3443 #ifdef EXTRACHECKS
3444         if (unlikely(sess->shutting_down)) {
3445                 PRINT_ERROR_PR("%s",
3446                         "New mgmt cmd while shutting down the session");
3447                 BUG();
3448         }
3449 #endif
3450
3451         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3452                 switch(sess->init_phase) {
3453                 case SCST_SESS_IPH_INITING:
3454                         TRACE_DBG("Adding mcmd %p to init deferred mcmd list", 
3455                                 mcmd);
3456                         list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3457                                 &sess->init_deferred_mcmd_list);
3458                         goto out_unlock;
3459                 case SCST_SESS_IPH_SUCCESS:
3460                         break;
3461                 case SCST_SESS_IPH_FAILED:
3462                         res = -1;
3463                         goto out_unlock;
3464                 default:
3465                         BUG();
3466                 }
3467         }
3468
3469         TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3470         list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3471
3472         spin_unlock_irqrestore(&scst_list_lock, flags);
3473
3474         wake_up(&scst_mgmt_cmd_list_waitQ);
3475
3476 out:
3477         TRACE_EXIT();
3478         return res;
3479
3480 out_unlock:
3481         spin_unlock_irqrestore(&scst_list_lock, flags);
3482         goto out;
3483 }
3484
3485 /* 
3486  * Must not been called in parallel with scst_unregister_session() for the 
3487  * same sess
3488  */
3489 int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
3490                         const uint8_t *lun, int lun_len, int atomic,
3491                         void *tgt_specific)
3492 {
3493         int res = -EFAULT;
3494         struct scst_mgmt_cmd *mcmd = NULL;
3495
3496         TRACE_ENTRY();
3497
3498         if (unlikely(fn == SCST_ABORT_TASK)) {
3499                 PRINT_ERROR_PR("%s() for ABORT TASK called", __FUNCTION__);
3500                 res = -EINVAL;
3501                 goto out;
3502         }
3503
3504         mcmd = scst_pre_rx_mgmt_cmd(sess, fn, atomic, tgt_specific);
3505         if (mcmd == NULL)
3506                 goto out;
3507
3508         mcmd->lun = scst_unpack_lun(lun, lun_len);
3509         if (mcmd->lun == (lun_t)-1)
3510                 goto out_free;
3511
3512         TRACE(TRACE_MGMT, "sess=%p, lun=%Ld", sess, (uint64_t)mcmd->lun);
3513
3514         if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
3515                 goto out_free;
3516
3517         res = 0;
3518
3519 out:
3520         TRACE_EXIT_RES(res);
3521         return res;
3522
3523 out_free:
3524         scst_free_mgmt_cmd(mcmd, 0);
3525         mcmd = NULL;
3526         goto out;
3527 }
3528
3529 /* 
3530  * Must not been called in parallel with scst_unregister_session() for the 
3531  * same sess
3532  */
3533 int scst_rx_mgmt_fn_tag(struct scst_session *sess, int fn, uint32_t tag,
3534                        int atomic, void *tgt_specific)
3535 {
3536         int res = -EFAULT;
3537         struct scst_mgmt_cmd *mcmd = NULL;
3538
3539         TRACE_ENTRY();
3540
3541         if (unlikely(fn != SCST_ABORT_TASK)) {
3542                 PRINT_ERROR_PR("%s(%d) called", __FUNCTION__, fn);
3543                 res = -EINVAL;
3544                 goto out;
3545         }
3546
3547         mcmd = scst_pre_rx_mgmt_cmd(sess, fn, atomic, tgt_specific);
3548         if (mcmd == NULL)
3549                 goto out;
3550
3551         mcmd->tag = tag;
3552
3553         TRACE(TRACE_MGMT, "sess=%p, tag=%d", sess, mcmd->tag);
3554
3555         if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
3556                 goto out_free;
3557
3558         res = 0;
3559
3560 out:
3561         TRACE_EXIT_RES(res);
3562         return res;
3563
3564 out_free:
3565         scst_free_mgmt_cmd(mcmd, 0);
3566         mcmd = NULL;
3567         goto out;
3568 }
3569
3570 /* scst_mutex supposed to be held */
3571 static struct scst_acg *scst_find_acg(const char *initiator_name)
3572 {
3573         struct scst_acg *acg, *res = NULL;
3574         struct scst_acn *n;
3575
3576         TRACE_ENTRY();
3577         
3578         list_for_each_entry(acg, &scst_acg_list, scst_acg_list_entry) {
3579                 list_for_each_entry(n, &acg->acn_list, 
3580                         acn_list_entry) 
3581                 {
3582                         if (strcmp(n->name, initiator_name) == 0) {
3583                                 TRACE_DBG("Access control group %s found", 
3584                                         acg->acg_name);
3585                                 res = acg;
3586                                 goto out;
3587                         }
3588                 }
3589         }
3590
3591 out:    
3592         TRACE_EXIT_HRES(res);
3593         return res;
3594 }
3595
3596 static int scst_init_session(struct scst_session *sess)
3597 {
3598         int res = 0;
3599         struct scst_acg *acg;
3600         struct scst_cmd *cmd;
3601         struct scst_mgmt_cmd *mcmd, *tm;
3602         int mwake = 0;
3603
3604         TRACE_ENTRY();
3605         
3606         down(&scst_mutex);
3607
3608         if (sess->initiator_name) {
3609                 acg = scst_find_acg(sess->initiator_name);
3610                 if (acg == NULL) {
3611                         PRINT_INFO_PR("Name %s not found, using default group",
3612                                 sess->initiator_name);
3613                         acg = scst_default_acg;
3614                 }
3615         }
3616         else
3617                 acg = scst_default_acg;
3618
3619         sess->acg = acg;
3620         TRACE_DBG("Assigning session %p to acg %s", sess, acg->acg_name);
3621         list_add_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
3622
3623         TRACE_DBG("Adding sess %p to tgt->sess_list", sess);
3624         list_add_tail(&sess->sess_list_entry, &sess->tgt->sess_list);
3625
3626         res = scst_sess_alloc_tgt_devs(sess);