- Semaphores converted to mutexes
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28 #include <linux/kthread.h>
29
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static void scst_cmd_set_sn(struct scst_cmd *cmd);
34 static int __scst_init_cmd(struct scst_cmd *cmd);
35
36 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
37 {
38         struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
39         unsigned long flags;
40
41         spin_lock_irqsave(&t->tasklet_lock, flags);
42         TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
43                 smp_processor_id());
44         list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
45         spin_unlock_irqrestore(&t->tasklet_lock, flags);
46
47         tasklet_schedule(&t->tasklet);
48 }
49
50 /* 
51  * Must not been called in parallel with scst_unregister_session() for the 
52  * same sess
53  */
54 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
55                              const uint8_t *lun, int lun_len,
56                              const uint8_t *cdb, int cdb_len, int atomic)
57 {
58         struct scst_cmd *cmd;
59
60         TRACE_ENTRY();
61
62 #ifdef EXTRACHECKS
63         if (unlikely(sess->shutting_down)) {
64                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
65                 sBUG();
66         }
67 #endif
68
69         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
70         if (cmd == NULL)
71                 goto out;
72
73         cmd->sess = sess;
74         cmd->tgt = sess->tgt;
75         cmd->tgtt = sess->tgt->tgtt;
76         cmd->state = SCST_CMD_STATE_INIT_WAIT;
77
78         /* 
79          * For both wrong lun and CDB defer the error reporting for
80          * scst_cmd_init_done()
81          */
82
83         cmd->lun = scst_unpack_lun(lun, lun_len);
84
85         if (cdb_len <= SCST_MAX_CDB_SIZE) {
86                 memcpy(cmd->cdb, cdb, cdb_len);
87                 cmd->cdb_len = cdb_len;
88         }
89
90         TRACE_DBG("cmd %p, sess %p", cmd, sess);
91         scst_sess_get(sess);
92
93 out:
94         TRACE_EXIT();
95         return cmd;
96 }
97
98 static int scst_init_cmd(struct scst_cmd *cmd, int context)
99 {
100         int rc;
101
102         TRACE_ENTRY();
103
104         /* See the comment in scst_do_job_init() */
105         if (unlikely(!list_empty(&scst_init_cmd_list))) {
106                 TRACE_MGMT_DBG("%s", "init cmd list busy");
107                 goto out_redirect;
108         }
109         smp_rmb();
110
111         rc = __scst_init_cmd(cmd);
112         if (unlikely(rc > 0))
113                 goto out_redirect;
114         else if (unlikely(rc != 0))
115                 goto out;
116
117         /* Small context optimization */
118         if (((context == SCST_CONTEXT_TASKLET) ||
119              (context == SCST_CONTEXT_DIRECT_ATOMIC)) && 
120             scst_cmd_is_expected_set(cmd)) {
121                 if (cmd->expected_data_direction == SCST_DATA_WRITE) {
122                         if ( !test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
123                                         &cmd->tgt_dev->tgt_dev_flags))
124                                 context = SCST_CONTEXT_THREAD;
125                 } else {
126                         if ( !test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
127                                         &cmd->tgt_dev->tgt_dev_flags))
128                                 context = SCST_CONTEXT_THREAD;
129                 }
130         }
131
132 out:
133         TRACE_EXIT_RES(context);
134         return context;
135
136 out_redirect:
137         if (cmd->preprocessing_only) {
138                 /*
139                  * Poor man solution for single threaded targets, where 
140                  * blocking receiver at least sometimes means blocking all.
141                  */
142                 sBUG_ON(context != SCST_CONTEXT_DIRECT);
143                 scst_set_busy(cmd);
144                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
145         } else {
146                 unsigned long flags;
147                 spin_lock_irqsave(&scst_init_lock, flags);
148                 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
149                         "%d)", cmd, atomic_read(&scst_cmd_count));
150                 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
151                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
152                         scst_init_poll_cnt++;
153                 spin_unlock_irqrestore(&scst_init_lock, flags);
154                 wake_up(&scst_init_cmd_list_waitQ);
155                 context = -1;
156         }
157         goto out;
158 }
159
160 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
161 {
162         unsigned long flags;
163         struct scst_session *sess = cmd->sess;
164
165         TRACE_ENTRY();
166
167         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
168         TRACE(TRACE_SCSI, "tag=%lld, lun=%Ld, CDB len=%d", cmd->tag, 
169                 (uint64_t)cmd->lun, cmd->cdb_len);
170         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
171                 cmd->cdb, cmd->cdb_len);
172
173 #ifdef EXTRACHECKS
174         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
175                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
176         {
177                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
178                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
179                         cmd->tgtt->name);
180                 pref_context = SCST_CONTEXT_TASKLET;
181         }
182 #endif
183
184         atomic_inc(&sess->sess_cmd_count);
185
186         spin_lock_irqsave(&sess->sess_list_lock, flags);
187
188         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
189
190         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
191                 switch(sess->init_phase) {
192                 case SCST_SESS_IPH_SUCCESS:
193                         break;
194                 case SCST_SESS_IPH_INITING:
195                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
196                         list_add_tail(&cmd->cmd_list_entry, 
197                                 &sess->init_deferred_cmd_list);
198                         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
199                         goto out;
200                 case SCST_SESS_IPH_FAILED:
201                         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
202                         scst_set_busy(cmd);
203                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
204                         goto active;
205                 default:
206                         sBUG();
207                 }
208         }
209
210         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
211
212         if (unlikely(cmd->lun == (lun_t)-1)) {
213                 PRINT_ERROR_PR("Wrong LUN %d, finishing cmd", -1);
214                 scst_set_cmd_error(cmd,
215                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
216                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
217                 goto active;
218         }
219
220         if (unlikely(cmd->cdb_len == 0)) {
221                 PRINT_ERROR_PR("Wrong CDB len %d, finishing cmd", 0);
222                 scst_set_cmd_error(cmd,
223                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
224                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
225                 goto active;
226         }
227
228         cmd->state = SCST_CMD_STATE_INIT;
229         /* cmd must be inited here to keep the order */
230         pref_context = scst_init_cmd(cmd, pref_context);
231         if (unlikely(pref_context < 0))
232                 goto out;
233
234 active:
235         /* Here cmd must not be in any cmd list, no locks */
236         switch (pref_context) {
237         case SCST_CONTEXT_TASKLET:
238                 scst_schedule_tasklet(cmd);
239                 break;
240
241         case SCST_CONTEXT_DIRECT:
242         case SCST_CONTEXT_DIRECT_ATOMIC:
243                 scst_process_active_cmd(cmd, pref_context);
244                 /* For *NEED_THREAD wake_up() is already done */
245                 break;
246
247         default:
248                 PRINT_ERROR_PR("Context %x is undefined, using the thread one",
249                         pref_context);
250                 /* go through */
251         case SCST_CONTEXT_THREAD:
252                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
253                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
254                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
255                         list_add(&cmd->cmd_list_entry,
256                                 &cmd->cmd_lists->active_cmd_list);
257                 else
258                         list_add_tail(&cmd->cmd_list_entry,
259                                 &cmd->cmd_lists->active_cmd_list);
260                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
261                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
262                 break;
263         }
264
265 out:
266         TRACE_EXIT();
267         return;
268 }
269
270 static int scst_parse_cmd(struct scst_cmd *cmd)
271 {
272         int res = SCST_CMD_STATE_RES_CONT_SAME;
273         int state;
274         struct scst_device *dev = cmd->dev;
275         struct scst_info_cdb cdb_info;
276         int atomic = scst_cmd_atomic(cmd);
277         int orig_bufflen;
278
279         TRACE_ENTRY();
280
281         if (atomic && !dev->handler->parse_atomic) {
282                 TRACE_DBG("Dev handler %s parse() can not be "
283                       "called in atomic context, rescheduling to the thread",
284                       dev->handler->name);
285                 res = SCST_CMD_STATE_RES_NEED_THREAD;
286                 goto out;
287         }
288
289         cmd->inc_expected_sn_on_done = dev->handler->inc_expected_sn_on_done;
290
291         if (cmd->skip_parse || cmd->internal)
292                 goto call_parse;
293
294         /*
295          * Expected transfer data supplied by the SCSI transport via the
296          * target driver are untrusted, so we prefer to fetch them from CDB.
297          * Additionally, not all transports support supplying the expected
298          * transfer data.
299          */
300
301         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
302                         &cdb_info) != 0)) 
303         {
304                 static int t;
305                 if (t < 10) {
306                         t++;
307                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
308                                 "Should you update scst_scsi_op_table?",
309                                 cmd->cdb[0], dev->handler->name);
310                 }
311                 if (scst_cmd_is_expected_set(cmd)) {
312                         TRACE(TRACE_SCSI, "Using initiator supplied values: "
313                                 "direction %d, transfer_len %d",
314                                 cmd->expected_data_direction,
315                                 cmd->expected_transfer_len);
316                         cmd->data_direction = cmd->expected_data_direction;
317                         cmd->bufflen = cmd->expected_transfer_len;
318                         /* Restore (most probably) lost CDB length */
319                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
320                         if (cmd->cdb_len == -1) {
321                                 PRINT_ERROR_PR("Unable to get CDB length for "
322                                         "opcode 0x%02x. Returning INVALID "
323                                         "OPCODE", cmd->cdb[0]);
324                                 scst_set_cmd_error(cmd,
325                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
326                                 goto out_xmit;
327                         }
328                 } else {
329                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
330                              "target %s not supplied expected values. "
331                              "Returning INVALID OPCODE.", cmd->cdb[0], 
332                              dev->handler->name, cmd->tgtt->name);
333                         scst_set_cmd_error(cmd,
334                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
335                         goto out_xmit;
336                 }
337         } else {
338                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
339                         "set %s), transfer_len=%d (expected len %d), flags=%d",
340                         cdb_info.op_name, cdb_info.direction,
341                         cmd->expected_data_direction,
342                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
343                         cdb_info.transfer_len, cmd->expected_transfer_len,
344                         cdb_info.flags);
345
346                 /* Restore (most probably) lost CDB length */
347                 cmd->cdb_len = cdb_info.cdb_len;
348
349                 cmd->data_direction = cdb_info.direction;
350                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
351                         cmd->bufflen = cdb_info.transfer_len;
352                 /* else cmd->bufflen remained as it was inited in 0 */
353         }
354
355         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
356                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
357                             "(opcode 0x%02x)", cmd->cdb[0]);
358                 scst_set_cmd_error(cmd,
359                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
360                 goto out_xmit;
361         }
362
363         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
364                 PRINT_ERROR_PR("Linked commands are not supported "
365                             "(opcode 0x%02x)", cmd->cdb[0]);
366                 scst_set_cmd_error(cmd,
367                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
368                 goto out_xmit;
369         }
370
371 call_parse:
372         orig_bufflen = cmd->bufflen;
373
374         if (likely(!scst_is_cmd_local(cmd))) {
375                 TRACE_DBG("Calling dev handler %s parse(%p)",
376                       dev->handler->name, cmd);
377                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
378                 state = dev->handler->parse(cmd, &cdb_info);
379                 /* Caution: cmd can be already dead here */
380                 TRACE_DBG("Dev handler %s parse() returned %d",
381                         dev->handler->name, state);
382
383                 switch (state) {
384                 case SCST_CMD_STATE_NEED_THREAD_CTX:
385                         TRACE_DBG("Dev handler %s parse() requested thread "
386                               "context, rescheduling", dev->handler->name);
387                         res = SCST_CMD_STATE_RES_NEED_THREAD;
388                         goto out;
389
390                 case SCST_CMD_STATE_STOP:
391                         TRACE_DBG("Dev handler %s parse() requested stop "
392                                 "processing", dev->handler->name);
393                         res = SCST_CMD_STATE_RES_CONT_NEXT;
394                         goto out;
395                 }
396
397                 if (state == SCST_CMD_STATE_DEFAULT)
398                         state = SCST_CMD_STATE_PREPARE_SPACE;
399         }
400         else
401                 state = SCST_CMD_STATE_PREPARE_SPACE;
402
403         if (scst_cmd_is_expected_set(cmd)) {
404                 if (cmd->expected_transfer_len < cmd->bufflen) {
405                         TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
406                                 "cmd->bufflen(%d), using expected_transfer_len "
407                                 "instead", cmd->expected_transfer_len,
408                                 cmd->bufflen);
409                         cmd->bufflen = cmd->expected_transfer_len;
410                 }
411         }
412
413         if (cmd->data_len == -1)
414                 cmd->data_len = cmd->bufflen;
415
416         if (cmd->data_buf_alloced && (orig_bufflen > cmd->bufflen)) {
417                 PRINT_ERROR_PR("Target driver supplied data buffer (size %d), "
418                         "is less, than required (size %d)", cmd->bufflen,
419                         orig_bufflen);
420                 goto out_error;
421         }
422
423 #ifdef EXTRACHECKS
424         if ((state != SCST_CMD_STATE_XMIT_RESP) &&
425             (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
426                 (state != SCST_CMD_STATE_DEV_PARSE)) ||
427             ((cmd->bufflen != 0) && 
428                 (cmd->data_direction == SCST_DATA_NONE) &&
429                 (cmd->status == 0)) ||
430             ((cmd->bufflen == 0) && 
431                 (cmd->data_direction != SCST_DATA_NONE)) ||
432             ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
433                 (state > SCST_CMD_STATE_PREPARE_SPACE))))
434         {
435                 PRINT_ERROR_PR("Dev handler %s parse() returned "
436                                "invalid cmd data_direction %d, "
437                                "bufflen %d or state %d (opcode 0x%x)",
438                                dev->handler->name, 
439                                cmd->data_direction, cmd->bufflen,
440                                state, cmd->cdb[0]);
441                 goto out_error;
442         }
443 #endif
444
445         switch (state) {
446         case SCST_CMD_STATE_PREPARE_SPACE:
447         case SCST_CMD_STATE_DEV_PARSE:
448         case SCST_CMD_STATE_RDY_TO_XFER:
449         case SCST_CMD_STATE_SEND_TO_MIDLEV:
450         case SCST_CMD_STATE_DEV_DONE:
451         case SCST_CMD_STATE_XMIT_RESP:
452         case SCST_CMD_STATE_FINISHED:
453                 cmd->state = state;
454                 res = SCST_CMD_STATE_RES_CONT_SAME;
455                 break;
456
457         default:
458                 if (state >= 0) {
459                         PRINT_ERROR_PR("Dev handler %s parse() returned "
460                              "invalid cmd state %d (opcode %d)", 
461                              dev->handler->name, state, cmd->cdb[0]);
462                 } else {
463                         PRINT_ERROR_PR("Dev handler %s parse() returned "
464                                 "error %d (opcode %d)", dev->handler->name, 
465                                 state, cmd->cdb[0]);
466                 }
467                 goto out_error;
468         }
469
470         if (cmd->resp_data_len == -1) {
471                 if (cmd->data_direction == SCST_DATA_READ)
472                         cmd->resp_data_len = cmd->bufflen;
473                 else
474                          cmd->resp_data_len = 0;
475         }
476         
477 out:
478         TRACE_EXIT_HRES(res);
479         return res;
480
481 out_error:
482         /* dev_done() will be called as part of the regular cmd's finish */
483         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
484         cmd->state = SCST_CMD_STATE_DEV_DONE;
485         res = SCST_CMD_STATE_RES_CONT_SAME;
486         goto out;
487
488 out_xmit:
489         cmd->state = SCST_CMD_STATE_XMIT_RESP;
490         res = SCST_CMD_STATE_RES_CONT_SAME;
491         goto out;
492 }
493
494 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
495 void scst_cmd_mem_work_fn(void *p)
496 #else
497 void scst_cmd_mem_work_fn(struct work_struct *work)
498 #endif
499 {
500         TRACE_ENTRY();
501
502         spin_lock_bh(&scst_cmd_mem_lock);
503
504         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
505         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
506                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
507                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
508         } else {
509                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
510                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
511         }
512         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
513
514         spin_unlock_bh(&scst_cmd_mem_lock);
515
516         TRACE_EXIT();
517         return;
518 }
519
520 int scst_check_mem(struct scst_cmd *cmd)
521 {
522         int res = 0;
523
524         TRACE_ENTRY();
525
526         if (cmd->mem_checked)
527                 goto out;
528
529         spin_lock_bh(&scst_cmd_mem_lock);
530
531         scst_cur_cmd_mem += cmd->bufflen;
532         cmd->mem_checked = 1;
533         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
534                 goto out_unlock;
535
536         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
537                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
538                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
539                 (cmd->sess->initiator_name[0] == '\0') ?
540                   "Anonymous" : cmd->sess->initiator_name,
541                 scst_cur_max_cmd_mem >> 10);
542
543         scst_cur_cmd_mem -= cmd->bufflen;
544         cmd->mem_checked = 0;
545         scst_set_busy(cmd);
546         cmd->state = SCST_CMD_STATE_XMIT_RESP;
547         res = 1;
548
549 out_unlock:
550         spin_unlock_bh(&scst_cmd_mem_lock);
551
552 out:
553         TRACE_EXIT_RES(res);
554         return res;
555 }
556
557 static void scst_low_cur_max_cmd_mem(void)
558 {
559         TRACE_ENTRY();
560
561         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
562                 cancel_delayed_work(&scst_cmd_mem_work);
563                 flush_scheduled_work();
564                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
565         }
566
567         spin_lock_bh(&scst_cmd_mem_lock);
568
569         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
570                                 (scst_cur_cmd_mem >> 2);
571         if (scst_cur_max_cmd_mem < 16*1024*1024)
572                 scst_cur_max_cmd_mem = 16*1024*1024;
573
574         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
575                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
576                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
577                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
578         }
579
580         spin_unlock_bh(&scst_cmd_mem_lock);
581
582         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
583
584         TRACE_EXIT();
585         return;
586 }
587
588 static int scst_prepare_space(struct scst_cmd *cmd)
589 {
590         int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
591
592         TRACE_ENTRY();
593
594         if (cmd->data_direction == SCST_DATA_NONE)
595                 goto prep_done;
596
597         if (cmd->data_buf_tgt_alloc) {
598                 int orig_bufflen = cmd->bufflen;
599
600                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
601
602                 r = cmd->tgtt->alloc_data_buf(cmd);
603                 if (r > 0)
604                         goto alloc;
605                 else if (r == 0) {
606                         cmd->data_buf_alloced = 1;
607                         if (unlikely(orig_bufflen < cmd->bufflen)) {
608                                 PRINT_ERROR_PR("Target driver allocated data "
609                                         "buffer (size %d), is less, than "
610                                         "required (size %d)", orig_bufflen,
611                                         cmd->bufflen);
612                                 goto out_error;
613                         }
614                 } else
615                         goto check;
616         }
617
618 alloc:
619         r = scst_check_mem(cmd);
620         if (unlikely(r != 0))
621                 goto out;
622         else if (!cmd->data_buf_alloced) {
623                 r = scst_alloc_space(cmd);
624         } else {
625                 TRACE_MEM("%s", "data_buf_alloced set, returning");
626         }
627
628 check:
629         if (r != 0) {
630                 if (scst_cmd_atomic(cmd)) {
631                         TRACE_MEM("%s", "Atomic memory allocation failed, "
632                               "rescheduling to the thread");
633                         res = SCST_CMD_STATE_RES_NEED_THREAD;
634                         goto out;
635                 } else
636                         goto out_no_space;
637         }
638
639 prep_done:
640         if (cmd->preprocessing_only) {
641                 if (scst_cmd_atomic(cmd) && 
642                     !cmd->tgtt->preprocessing_done_atomic) {
643                         TRACE_DBG("%s", "preprocessing_done() can not be "
644                               "called in atomic context, rescheduling to "
645                               "the thread");
646                         res = SCST_CMD_STATE_RES_NEED_THREAD;
647                         goto out;
648                 }
649
650                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
651                         TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
652                                 "cmd %p", cmd);
653                         cmd->state = SCST_CMD_STATE_DEV_DONE;
654                         res = SCST_CMD_STATE_RES_CONT_SAME;
655                         goto out;
656                 }
657
658                 res = SCST_CMD_STATE_RES_CONT_NEXT;
659                 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
660
661                 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
662                 cmd->tgtt->preprocessing_done(cmd);
663                 TRACE_DBG("%s", "preprocessing_done() returned");
664                 goto out;
665
666         }
667
668         switch (cmd->data_direction) {
669         case SCST_DATA_WRITE:
670                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
671                 break;
672
673         default:
674                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
675                 break;
676         }
677
678 out:
679         TRACE_EXIT_HRES(res);
680         return res;
681
682 out_no_space:
683         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
684                 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
685         scst_low_cur_max_cmd_mem();
686         scst_set_busy(cmd);
687         cmd->state = SCST_CMD_STATE_DEV_DONE;
688         res = SCST_CMD_STATE_RES_CONT_SAME;
689         goto out;
690
691 out_error:
692         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
693         cmd->state = SCST_CMD_STATE_DEV_DONE;
694         res = SCST_CMD_STATE_RES_CONT_SAME;
695         goto out;
696 }
697
698 void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context)
699 {
700         TRACE_ENTRY();
701
702         TRACE_DBG("Preferred context: %d", pref_context);
703         TRACE_DBG("tag=%lld, status=%#x", scst_cmd_get_tag(cmd), status);
704
705 #ifdef EXTRACHECKS
706         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
707                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
708         {
709                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
710                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
711                         cmd->tgtt->name);
712                 pref_context = SCST_CONTEXT_TASKLET;
713         }
714 #endif
715
716         switch (status) {
717         case SCST_PREPROCESS_STATUS_SUCCESS:
718                 switch (cmd->data_direction) {
719                 case SCST_DATA_WRITE:
720                         cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
721                         break;
722                 default:
723                         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
724                         break;
725                 }
726                 if (cmd->no_sn)
727                         scst_cmd_set_sn(cmd);
728                 /* Small context optimization */
729                 if ((pref_context == SCST_CONTEXT_TASKLET) || 
730                     (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
731                         if (cmd->data_direction == SCST_DATA_WRITE) {
732                                 if ( !test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
733                                                 &cmd->tgt_dev->tgt_dev_flags))
734                                         pref_context = SCST_CONTEXT_THREAD;
735                         } else {
736                                 if ( !test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
737                                                 &cmd->tgt_dev->tgt_dev_flags))
738                                         pref_context = SCST_CONTEXT_THREAD;
739                         }
740                 }
741                 break;
742
743         case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
744                 cmd->state = SCST_CMD_STATE_DEV_DONE;
745                 break;
746
747         case SCST_PREPROCESS_STATUS_ERROR_FATAL:
748                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
749                 /* go through */
750         case SCST_PREPROCESS_STATUS_ERROR:
751                 scst_set_cmd_error(cmd,
752                            SCST_LOAD_SENSE(scst_sense_hardw_error));
753                 cmd->state = SCST_CMD_STATE_DEV_DONE;
754                 break;
755
756         default:
757                 PRINT_ERROR_PR("%s() received unknown status %x", __func__,
758                         status);
759                 cmd->state = SCST_CMD_STATE_DEV_DONE;
760                 break;
761         }
762
763         scst_proccess_redirect_cmd(cmd, pref_context, 1);
764
765         TRACE_EXIT();
766         return;
767 }
768
769 /* No locks */
770 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
771 {
772         struct scst_tgt *tgt = cmd->sess->tgt;
773         int res = 0;
774         unsigned long flags;
775
776         TRACE_ENTRY();
777
778         spin_lock_irqsave(&tgt->tgt_lock, flags);
779         tgt->retry_cmds++;
780         smp_mb();
781         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
782               tgt->retry_cmds);
783         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
784                 /* At least one cmd finished, so try again */
785                 tgt->retry_cmds--;
786                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
787                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
788                       "retry_cmds=%d)", finished_cmds,
789                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
790                 res = -1;
791                 goto out_unlock_tgt;
792         }
793
794         TRACE(TRACE_RETRY, "Adding cmd %p to retry cmd list", cmd);
795         list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
796
797         if (!tgt->retry_timer_active) {
798                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
799                 add_timer(&tgt->retry_timer);
800                 tgt->retry_timer_active = 1;
801         }
802
803 out_unlock_tgt:
804         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
805
806         TRACE_EXIT_RES(res);
807         return res;
808 }
809
810 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
811 {
812         int res, rc;
813         int atomic = scst_cmd_atomic(cmd);
814
815         TRACE_ENTRY();
816
817         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
818                 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
819                 goto out_dev_done;
820         }
821
822         if (cmd->tgtt->rdy_to_xfer == NULL) {
823                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
824                 res = SCST_CMD_STATE_RES_CONT_SAME;
825                 goto out;
826         }
827
828         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
829                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
830                       "called in atomic context, rescheduling to the thread");
831                 res = SCST_CMD_STATE_RES_NEED_THREAD;
832                 goto out;
833         }
834
835         while (1) {
836                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
837
838                 res = SCST_CMD_STATE_RES_CONT_NEXT;
839                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
840
841                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
842 #ifdef DEBUG_RETRY
843                 if (((scst_random() % 100) == 75))
844                         rc = SCST_TGT_RES_QUEUE_FULL;
845                 else
846 #endif
847                         rc = cmd->tgtt->rdy_to_xfer(cmd);
848                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
849
850                 if (likely(rc == SCST_TGT_RES_SUCCESS))
851                         goto out;
852
853                 /* Restore the previous state */
854                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
855
856                 switch (rc) {
857                 case SCST_TGT_RES_QUEUE_FULL:
858                 {
859                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
860                                 break;
861                         else
862                                 continue;
863                 }
864
865                 case SCST_TGT_RES_NEED_THREAD_CTX:
866                 {
867                         TRACE_DBG("Target driver %s "
868                               "rdy_to_xfer() requested thread "
869                               "context, rescheduling", cmd->tgtt->name);
870                         res = SCST_CMD_STATE_RES_NEED_THREAD;
871                         break;
872                 }
873
874                 default:
875                         goto out_error_rc;
876                 }
877                 break;
878         }
879
880 out:
881         TRACE_EXIT_HRES(res);
882         return res;
883
884 out_error_rc:
885         if (rc == SCST_TGT_RES_FATAL_ERROR) {
886                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
887                      "fatal error", cmd->tgtt->name);
888         } else {
889                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
890                             "value %d", cmd->tgtt->name, rc);
891         }
892         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
893
894 out_dev_done:
895         cmd->state = SCST_CMD_STATE_DEV_DONE;
896         res = SCST_CMD_STATE_RES_CONT_SAME;
897         goto out;
898 }
899
900 /* No locks, but might be in IRQ */
901 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
902         int check_retries)
903 {
904         unsigned long flags;
905
906         TRACE_ENTRY();
907
908         TRACE_DBG("Context: %d", context);
909
910         switch(context) {
911         case SCST_CONTEXT_DIRECT:
912         case SCST_CONTEXT_DIRECT_ATOMIC:
913                 if (check_retries)
914                         scst_check_retries(cmd->tgt);
915                 scst_process_active_cmd(cmd, context);
916                 break;
917
918         default:
919                 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
920                             context);
921                 /* go through */
922         case SCST_CONTEXT_THREAD:
923                 if (check_retries)
924                         scst_check_retries(cmd->tgt);
925                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
926                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
927                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
928                         list_add(&cmd->cmd_list_entry,
929                                 &cmd->cmd_lists->active_cmd_list);
930                 else
931                         list_add_tail(&cmd->cmd_list_entry,
932                                 &cmd->cmd_lists->active_cmd_list);
933                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
934                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
935                 break;
936
937         case SCST_CONTEXT_TASKLET:
938                 if (check_retries)
939                         scst_check_retries(cmd->tgt);
940                 scst_schedule_tasklet(cmd);
941                 break;
942         }
943
944         TRACE_EXIT();
945         return;
946 }
947
948 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
949 {
950         TRACE_ENTRY();
951
952         TRACE_DBG("Preferred context: %d", pref_context);
953         TRACE(TRACE_SCSI, "tag=%lld status=%#x", scst_cmd_get_tag(cmd), status);
954
955 #ifdef EXTRACHECKS
956         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
957                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
958         {
959                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
960                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
961                         cmd->tgtt->name);
962                 pref_context = SCST_CONTEXT_TASKLET;
963         }
964 #endif
965
966         switch (status) {
967         case SCST_RX_STATUS_SUCCESS:
968                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
969                 /* Small context optimization */
970                 if ((pref_context == SCST_CONTEXT_TASKLET) || 
971                     (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
972                         if ( !test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC, 
973                                         &cmd->tgt_dev->tgt_dev_flags))
974                                 pref_context = SCST_CONTEXT_THREAD;
975                 }
976                 break;
977
978         case SCST_RX_STATUS_ERROR_SENSE_SET:
979                 cmd->state = SCST_CMD_STATE_DEV_DONE;
980                 break;
981
982         case SCST_RX_STATUS_ERROR_FATAL:
983                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
984                 /* go through */
985         case SCST_RX_STATUS_ERROR:
986                 scst_set_cmd_error(cmd,
987                            SCST_LOAD_SENSE(scst_sense_hardw_error));
988                 cmd->state = SCST_CMD_STATE_DEV_DONE;
989                 break;
990
991         default:
992                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
993                         status);
994                 cmd->state = SCST_CMD_STATE_DEV_DONE;
995                 break;
996         }
997
998         scst_proccess_redirect_cmd(cmd, pref_context, 1);
999
1000         TRACE_EXIT();
1001         return;
1002 }
1003
1004 static int scst_tgt_pre_exec(struct scst_cmd *cmd, int *action)
1005 {
1006         int res = 0, rc;
1007
1008         TRACE_ENTRY();
1009
1010         TRACE_DBG("Calling pre_exec(%p)", cmd);
1011         rc = cmd->tgtt->pre_exec(cmd);
1012         TRACE_DBG("pre_exec() returned %d", rc);
1013
1014         if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1015                 switch(rc) {
1016                 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1017                         cmd->state = SCST_CMD_STATE_DEV_DONE;
1018                         *action = SCST_CMD_STATE_RES_CONT_SAME;
1019                         res = -1;
1020                         break;
1021                 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1022                         set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1023                         /* go through */
1024                 case SCST_PREPROCESS_STATUS_ERROR:
1025                         scst_set_cmd_error(cmd,
1026                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1027                         cmd->state = SCST_CMD_STATE_DEV_DONE;
1028                         *action = SCST_CMD_STATE_RES_CONT_SAME;
1029                         res = -1;
1030                         break;
1031                 default:
1032                         sBUG();
1033                         break;
1034                 }
1035         }
1036
1037         TRACE_EXIT_RES(res);
1038         return res;
1039 }
1040
1041 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
1042 {
1043         struct scst_cmd *c;
1044
1045         if (likely(cmd->queue_type != SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1046                 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
1047         c = scst_check_deferred_commands(cmd->tgt_dev);
1048         if (c != NULL) {
1049                 unsigned long flags;
1050                 spin_lock_irqsave(&c->cmd_lists->cmd_list_lock, flags);
1051                 TRACE_SN("Adding cmd %p to active cmd list", c);
1052                 list_add_tail(&c->cmd_list_entry,
1053                         &c->cmd_lists->active_cmd_list);
1054                 wake_up(&c->cmd_lists->cmd_list_waitQ);
1055                 spin_unlock_irqrestore(&c->cmd_lists->cmd_list_lock, flags);
1056         }
1057 }
1058
1059 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1060         const uint8_t *rq_sense, int rq_sense_len, int resid)
1061 {
1062         unsigned char type;
1063
1064         TRACE_ENTRY();
1065
1066         if (cmd->inc_expected_sn_on_done)
1067                 scst_inc_check_expected_sn(cmd);
1068
1069         cmd->status = result & 0xff;
1070         cmd->msg_status = msg_byte(result);
1071         cmd->host_status = host_byte(result);
1072         cmd->driver_status = driver_byte(result);
1073         if (unlikely(resid != 0)) {
1074 #ifdef EXTRACHECKS
1075                 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1076                         PRINT_ERROR_PR("Wrong resid %d (cmd->resp_data_len=%d)",
1077                                 resid, cmd->resp_data_len);
1078                 } else
1079 #endif
1080                         scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1081         }
1082
1083         /* 
1084          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
1085          * in init_scst()
1086          */
1087         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
1088         memset(&cmd->sense_buffer[rq_sense_len], 0,
1089                 sizeof(cmd->sense_buffer) - rq_sense_len);
1090
1091         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, resid=%d, "
1092               "cmd->msg_status=%x, cmd->host_status=%x, "
1093               "cmd->driver_status=%x", result, cmd->status, resid,
1094               cmd->msg_status, cmd->host_status, cmd->driver_status);
1095
1096         cmd->completed = 1;
1097
1098         scst_dec_on_dev_cmd(cmd, 0);
1099
1100         type = cmd->dev->handler->type;
1101         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1102             cmd->tgt_dev->acg_dev->rd_only_flag &&
1103             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1104              type == TYPE_TAPE)) {
1105                 int32_t length;
1106                 uint8_t *address;
1107
1108                 length = scst_get_buf_first(cmd, &address);
1109                 TRACE_DBG("length %d", length);
1110                 if (unlikely(length <= 0)) {
1111                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1112                                 __func__);
1113                         goto out;
1114                 }
1115                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1116                         address[2] |= 0x80;   /* Write Protect*/
1117                 }
1118                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1119                         address[3] |= 0x80;   /* Write Protect*/
1120                 }
1121                 scst_put_buf(cmd, address);
1122         }
1123
1124 out:
1125         TRACE_EXIT();
1126         return;
1127 }
1128
1129 /* For small context optimization */
1130 static inline int scst_optimize_post_exec_context(struct scst_cmd *cmd,
1131         int context)
1132 {
1133         if ((context == SCST_CONTEXT_TASKLET) || 
1134             (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1135                 if ( !test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC, 
1136                                 &cmd->tgt_dev->tgt_dev_flags))
1137                         context = SCST_CONTEXT_THREAD;
1138         }
1139         return context;
1140 }
1141
1142 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1143 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1144                                             struct scsi_request **req)
1145 {
1146         struct scst_cmd *cmd = NULL;
1147
1148         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1149                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1150
1151         if (cmd == NULL) {
1152                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1153                 if (*req)
1154                         scsi_release_request(*req);
1155         }
1156
1157         return cmd;
1158 }
1159
1160 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1161 {
1162         struct scsi_request *req = NULL;
1163         struct scst_cmd *cmd;
1164
1165         TRACE_ENTRY();
1166
1167         cmd = scst_get_cmd(scsi_cmd, &req);
1168         if (cmd == NULL)
1169                 goto out;
1170
1171         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1172                 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1173
1174         /* Clear out request structure */
1175         req->sr_use_sg = 0;
1176         req->sr_sglist_len = 0;
1177         req->sr_bufflen = 0;
1178         req->sr_buffer = NULL;
1179         req->sr_underflow = 0;
1180         req->sr_request->rq_disk = NULL; /* disown request blk */
1181
1182         scst_release_request(cmd);
1183
1184         cmd->state = SCST_CMD_STATE_DEV_DONE;
1185
1186         scst_proccess_redirect_cmd(cmd,
1187                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1188
1189 out:
1190         TRACE_EXIT();
1191         return;
1192 }
1193 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1194 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1195 {
1196         struct scst_cmd *cmd;
1197
1198         TRACE_ENTRY();
1199
1200         cmd = (struct scst_cmd *)data;
1201         if (cmd == NULL)
1202                 goto out;
1203
1204         scst_do_cmd_done(cmd, result, sense, SCST_SENSE_BUFFERSIZE, resid);
1205
1206         cmd->state = SCST_CMD_STATE_DEV_DONE;
1207
1208         scst_proccess_redirect_cmd(cmd,
1209                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1210
1211 out:
1212         TRACE_EXIT();
1213         return;
1214 }
1215 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1216
1217 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1218 {
1219         TRACE_ENTRY();
1220
1221         scst_dec_on_dev_cmd(cmd, 0);
1222
1223         if (cmd->inc_expected_sn_on_done)
1224                 scst_inc_check_expected_sn(cmd);
1225
1226         if (next_state == SCST_CMD_STATE_DEFAULT)
1227                 next_state = SCST_CMD_STATE_DEV_DONE;
1228
1229 #if defined(DEBUG) || defined(TRACING)
1230         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1231                 if (cmd->sg) {
1232                         int i;
1233                         struct scatterlist *sg = cmd->sg;
1234                         TRACE(TRACE_RECV_TOP, 
1235                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1236                               cmd->sg_cnt, sg, (void*)sg[0].page);
1237                         for(i = 0; i < cmd->sg_cnt; ++i) {
1238                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1239                                         "Exec'd sg", page_address(sg[i].page),
1240                                         sg[i].length);
1241                         }
1242                 }
1243         }
1244 #endif
1245
1246
1247 #ifdef EXTRACHECKS
1248         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1249             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1250             (next_state != SCST_CMD_STATE_FINISHED)) 
1251         {
1252                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1253                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1254                 scst_set_cmd_error(cmd,
1255                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1256                 next_state = SCST_CMD_STATE_DEV_DONE;
1257         }
1258 #endif
1259         cmd->state = next_state;
1260
1261         scst_proccess_redirect_cmd(cmd,
1262                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1263
1264         TRACE_EXIT();
1265         return;
1266 }
1267
1268 static int scst_report_luns_local(struct scst_cmd *cmd)
1269 {
1270         int res = SCST_EXEC_COMPLETED;
1271         int dev_cnt = 0;
1272         int buffer_size;
1273         int i;
1274         struct scst_tgt_dev *tgt_dev = NULL;
1275         uint8_t *buffer;
1276         int offs, overflow = 0;
1277
1278         TRACE_ENTRY();
1279
1280         cmd->status = 0;
1281         cmd->msg_status = 0;
1282         cmd->host_status = DID_OK;
1283         cmd->driver_status = 0;
1284
1285         if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1286                 PRINT_ERROR_PR("Unsupported SELECT REPORT value %x in REPORT "
1287                         "LUNS command", cmd->cdb[2]);
1288                 goto out_err;
1289         }
1290
1291         buffer_size = scst_get_buf_first(cmd, &buffer);
1292         if (unlikely(buffer_size <= 0))
1293                 goto out_err;
1294
1295         if (buffer_size < 16)
1296                 goto out_put_err;
1297
1298         memset(buffer, 0, buffer_size);
1299         offs = 8;
1300
1301         /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1302         for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1303                 struct list_head *sess_tgt_dev_list_head =
1304                         &cmd->sess->sess_tgt_dev_list_hash[i];
1305                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1306                                 sess_tgt_dev_list_entry) {
1307                         if (!overflow) {
1308                                 if (offs >= buffer_size) {
1309                                         scst_put_buf(cmd, buffer);
1310                                         buffer_size = scst_get_buf_next(cmd, &buffer);
1311                                         if (buffer_size > 0) {
1312                                                 memset(buffer, 0, buffer_size);
1313                                                 offs = 0;
1314                                         } else {
1315                                                 overflow = 1;
1316                                                 goto inc_dev_cnt;
1317                                         }
1318                                 }
1319                                 if ((buffer_size - offs) < 8) {
1320                                         PRINT_ERROR_PR("Buffer allocated for REPORT "
1321                                                 "LUNS command doesn't allow to fit 8 "
1322                                                 "byte entry (buffer_size=%d)",
1323                                                 buffer_size);
1324                                         goto out_put_hw_err;
1325                                 }
1326                                 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1327                                 buffer[offs+1] = tgt_dev->lun & 0xff;
1328                                 offs += 8;
1329                         }
1330 inc_dev_cnt:
1331                         dev_cnt++;
1332                 }
1333         }
1334         if (!overflow)
1335                 scst_put_buf(cmd, buffer);
1336
1337         /* Set the response header */
1338         buffer_size = scst_get_buf_first(cmd, &buffer);
1339         if (unlikely(buffer_size <= 0))
1340                 goto out_err;
1341         dev_cnt *= 8;
1342         buffer[0] = (dev_cnt >> 24) & 0xff;
1343         buffer[1] = (dev_cnt >> 16) & 0xff;
1344         buffer[2] = (dev_cnt >> 8) & 0xff;
1345         buffer[3] = dev_cnt & 0xff;
1346         scst_put_buf(cmd, buffer);
1347
1348         dev_cnt += 8;
1349         if (dev_cnt < cmd->resp_data_len)
1350                 scst_set_resp_data_len(cmd, dev_cnt);
1351
1352 out_done:
1353         cmd->completed = 1;
1354
1355         /* Report the result */
1356         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1357
1358         TRACE_EXIT_RES(res);
1359         return res;
1360         
1361 out_put_err:
1362         scst_put_buf(cmd, buffer);
1363
1364 out_err:
1365         scst_set_cmd_error(cmd,
1366                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1367         goto out_done;
1368
1369 out_put_hw_err:
1370         scst_put_buf(cmd, buffer);
1371         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1372         goto out_done;
1373 }
1374
1375 static int scst_pre_select(struct scst_cmd *cmd)
1376 {
1377         int res = SCST_EXEC_NOT_COMPLETED;
1378
1379         TRACE_ENTRY();
1380
1381         if (scst_cmd_atomic(cmd)) {
1382                 res = SCST_EXEC_NEED_THREAD;
1383                 goto out;
1384         }
1385
1386         scst_block_dev(cmd->dev, 1);
1387         /* Device will be unblocked in scst_done_cmd_check() */
1388
1389         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1390                 int rc = scst_set_pending_UA(cmd);
1391                 if (rc == 0) {
1392                         res = SCST_EXEC_COMPLETED;
1393                         cmd->completed = 1;
1394                         /* Report the result */
1395                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1396                         goto out;
1397                 }
1398         }
1399
1400 out:
1401         TRACE_EXIT_RES(res);
1402         return res;
1403 }
1404
1405 static inline void scst_report_reserved(struct scst_cmd *cmd)
1406 {
1407         TRACE_ENTRY();
1408
1409         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1410         cmd->completed = 1;
1411         /* Report the result */
1412         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1413
1414         TRACE_EXIT();
1415         return;
1416 }
1417
1418 static int scst_reserve_local(struct scst_cmd *cmd)
1419 {
1420         int res = SCST_EXEC_NOT_COMPLETED;
1421         struct scst_device *dev;
1422         struct scst_tgt_dev *tgt_dev_tmp;
1423
1424         TRACE_ENTRY();
1425
1426         if (scst_cmd_atomic(cmd)) {
1427                 res = SCST_EXEC_NEED_THREAD;
1428                 goto out;
1429         }
1430
1431         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1432                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1433                      "(lun=%Ld)", (uint64_t)cmd->lun);
1434                 scst_set_cmd_error(cmd,
1435                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1436                 cmd->completed = 1;
1437                 res = SCST_EXEC_COMPLETED;
1438                 goto out;
1439         }
1440
1441         dev = cmd->dev;
1442         scst_block_dev(dev, 1);
1443         /* Device will be unblocked in scst_done_cmd_check() */
1444
1445         spin_lock_bh(&dev->dev_lock);
1446
1447         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1448                 scst_report_reserved(cmd);
1449                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1450                 res = SCST_EXEC_COMPLETED;
1451                 goto out_unlock;
1452         }
1453
1454         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1455                             dev_tgt_dev_list_entry) 
1456         {
1457                 if (cmd->tgt_dev != tgt_dev_tmp)
1458                         set_bit(SCST_TGT_DEV_RESERVED, 
1459                                 &tgt_dev_tmp->tgt_dev_flags);
1460         }
1461         dev->dev_reserved = 1;
1462
1463 out_unlock:
1464         spin_unlock_bh(&dev->dev_lock);
1465         
1466 out:
1467         TRACE_EXIT_RES(res);
1468         return res;
1469 }
1470
1471 static int scst_release_local(struct scst_cmd *cmd)
1472 {
1473         int res = SCST_EXEC_NOT_COMPLETED;
1474         struct scst_tgt_dev *tgt_dev_tmp;
1475         struct scst_device *dev;
1476
1477         TRACE_ENTRY();
1478
1479         dev = cmd->dev;
1480
1481         scst_block_dev(dev, 1);
1482         cmd->blocking = 1;
1483         TRACE_MGMT_DBG("Blocking cmd %p (tag %lld)", cmd, cmd->tag);
1484
1485         spin_lock_bh(&dev->dev_lock);
1486
1487         /* 
1488          * The device could be RELEASED behind us, if RESERVING session 
1489          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1490          * matter, so use lock and no retest for DEV_RESERVED bits again
1491          */
1492         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1493                 res = SCST_EXEC_COMPLETED;
1494                 cmd->status = 0;
1495                 cmd->msg_status = 0;
1496                 cmd->host_status = DID_OK;
1497                 cmd->driver_status = 0;
1498         } else {
1499                 list_for_each_entry(tgt_dev_tmp,
1500                                     &dev->dev_tgt_dev_list,
1501                                     dev_tgt_dev_list_entry) 
1502                 {
1503                         clear_bit(SCST_TGT_DEV_RESERVED, 
1504                                 &tgt_dev_tmp->tgt_dev_flags);
1505                 }
1506                 dev->dev_reserved = 0;
1507         }
1508
1509         spin_unlock_bh(&dev->dev_lock);
1510
1511         if (res == SCST_EXEC_COMPLETED) {
1512                 cmd->completed = 1;
1513                 /* Report the result */
1514                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1515         }
1516
1517         TRACE_EXIT_RES(res);
1518         return res;
1519 }
1520
1521 /* 
1522  * The result of cmd execution, if any, should be reported 
1523  * via scst_cmd_done_local() 
1524  */
1525 static int scst_pre_exec(struct scst_cmd *cmd)
1526 {
1527         int res = SCST_EXEC_NOT_COMPLETED, rc;
1528         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1529
1530         TRACE_ENTRY();
1531
1532         /* Reserve check before Unit Attention */
1533         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags))) {
1534                 if ((cmd->cdb[0] != INQUIRY) && (cmd->cdb[0] != REPORT_LUNS) &&
1535                     (cmd->cdb[0] != RELEASE) && (cmd->cdb[0] != RELEASE_10) &&
1536                     (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1537                     (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1538                     (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE))
1539                 {
1540                         scst_report_reserved(cmd);
1541                         res = SCST_EXEC_COMPLETED;
1542                         goto out;
1543                 }
1544         }
1545
1546         /* If we had a internal bus reset, set the command error unit attention */
1547         if ((cmd->dev->scsi_dev != NULL) &&
1548             unlikely(cmd->dev->scsi_dev->was_reset)) {
1549                 if (scst_is_ua_command(cmd)) 
1550                 {
1551                         struct scst_device *dev = cmd->dev;
1552                         int done = 0;
1553                         /* Prevent more than 1 cmd to be triggered by was_reset */
1554                         spin_lock_bh(&dev->dev_lock);
1555                         barrier(); /* to reread was_reset */
1556                         if (dev->scsi_dev->was_reset) {
1557                                 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1558                                 scst_set_cmd_error(cmd,
1559                                            SCST_LOAD_SENSE(scst_sense_reset_UA));
1560                                 /* It looks like it is safe to clear was_reset here */
1561                                 dev->scsi_dev->was_reset = 0;
1562                                 smp_mb();
1563                                 done = 1;
1564                         }
1565                         spin_unlock_bh(&dev->dev_lock);
1566
1567                         if (done)
1568                                 goto out_done;
1569                 }
1570         }
1571
1572         if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING, 
1573                         &cmd->tgt_dev->tgt_dev_flags))) {
1574                 if (scst_is_ua_command(cmd)) 
1575                 {
1576                         rc = scst_set_pending_UA(cmd);
1577                         if (rc == 0)
1578                                 goto out_done;
1579                 }
1580         }
1581
1582         /* Check READ_ONLY device status */
1583         if (tgt_dev->acg_dev->rd_only_flag &&
1584             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1585              cmd->cdb[0] == WRITE_10 ||
1586              cmd->cdb[0] == WRITE_12 ||
1587              cmd->cdb[0] == WRITE_16 ||
1588              cmd->cdb[0] == WRITE_VERIFY ||
1589              cmd->cdb[0] == WRITE_VERIFY_12 ||
1590              cmd->cdb[0] == WRITE_VERIFY_16 ||
1591              (cmd->dev->handler->type == TYPE_TAPE &&
1592               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1593         {
1594                 scst_set_cmd_error(cmd,
1595                            SCST_LOAD_SENSE(scst_sense_data_protect));
1596                 goto out_done;
1597         }
1598 out:
1599         TRACE_EXIT_RES(res);
1600         return res;
1601
1602 out_done:
1603         res = SCST_EXEC_COMPLETED;
1604         cmd->completed = 1;
1605         /* Report the result */
1606         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1607         goto out;
1608 }
1609
1610 /* 
1611  * The result of cmd execution, if any, should be reported 
1612  * via scst_cmd_done_local() 
1613  */
1614 static inline int scst_local_exec(struct scst_cmd *cmd)
1615 {
1616         int res = SCST_EXEC_NOT_COMPLETED;
1617
1618         TRACE_ENTRY();
1619
1620         /*
1621          * Adding new commands here don't forget to update
1622          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1623          */
1624
1625         switch (cmd->cdb[0]) {
1626         case MODE_SELECT:
1627         case MODE_SELECT_10:
1628         case LOG_SELECT:
1629                 res = scst_pre_select(cmd);
1630                 break;
1631         case RESERVE:
1632         case RESERVE_10:
1633                 res = scst_reserve_local(cmd);
1634                 break;
1635         case RELEASE:
1636         case RELEASE_10:
1637                 res = scst_release_local(cmd);
1638                 break;
1639         case REPORT_LUNS:
1640                 res = scst_report_luns_local(cmd);
1641                 break;
1642         }
1643
1644         TRACE_EXIT_RES(res);
1645         return res;
1646 }
1647
1648 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1649 {
1650         int rc = SCST_EXEC_NOT_COMPLETED;
1651
1652         TRACE_ENTRY();
1653
1654         /* Check here to let an out of SN cmd be queued w/o context switch */
1655         if (scst_cmd_atomic(cmd) && !cmd->dev->handler->exec_atomic) {
1656                 TRACE_DBG("Dev handler %s exec() can not be "
1657                       "called in atomic context, rescheduling to the thread",
1658                       cmd->dev->handler->name);
1659                 rc = SCST_EXEC_NEED_THREAD;
1660                 goto out;
1661         }
1662
1663         cmd->sent_to_midlev = 1;
1664         cmd->state = SCST_CMD_STATE_EXECUTING;
1665         cmd->scst_cmd_done = scst_cmd_done_local;
1666
1667         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1668         smp_mb__after_set_bit();
1669
1670         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1671                 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1672                 goto out_aborted;
1673         }
1674
1675         rc = scst_pre_exec(cmd);
1676         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1677         if (rc != SCST_EXEC_NOT_COMPLETED) {
1678                 if (rc == SCST_EXEC_COMPLETED)
1679                         goto out;
1680                 else if (rc == SCST_EXEC_NEED_THREAD)
1681                         goto out_clear;
1682                 else
1683                         goto out_rc_error;
1684         }
1685
1686         rc = scst_local_exec(cmd);
1687         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1688         if (rc != SCST_EXEC_NOT_COMPLETED) {
1689                 if (rc == SCST_EXEC_COMPLETED)
1690                         goto out;
1691                 else if (rc == SCST_EXEC_NEED_THREAD)
1692                         goto out_clear;
1693                 else
1694                         goto out_rc_error;
1695         }
1696
1697         if (cmd->dev->handler->exec) {
1698                 struct scst_device *dev = cmd->dev;
1699                 TRACE_DBG("Calling dev handler %s exec(%p)",
1700                       dev->handler->name, cmd);
1701                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1702                 cmd->scst_cmd_done = scst_cmd_done_local;
1703                 rc = dev->handler->exec(cmd);
1704                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1705                 TRACE_DBG("Dev handler %s exec() returned %d",
1706                       dev->handler->name, rc);
1707                 if (rc == SCST_EXEC_COMPLETED)
1708                         goto out;
1709                 else if (rc == SCST_EXEC_NEED_THREAD)
1710                         goto out_clear;
1711                 else if (rc != SCST_EXEC_NOT_COMPLETED)
1712                         goto out_rc_error;
1713         }
1714
1715         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1716         
1717         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1718                 PRINT_ERROR_PR("Command for virtual device must be "
1719                         "processed by device handler (lun %Ld)!",
1720                         (uint64_t)cmd->lun);
1721                 goto out_error;
1722         }
1723
1724 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1725         if (unlikely(scst_alloc_request(cmd) != 0)) {
1726                 if (scst_cmd_atomic(cmd)) {
1727                         rc = SCST_EXEC_NEED_THREAD;
1728                         goto out_clear;
1729                 } else {
1730                         PRINT_INFO_PR("%s", "Unable to allocate request, "
1731                                 "sending BUSY status");
1732                         goto out_busy;
1733                 }
1734         }
1735         
1736         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1737                     (void *)cmd->scsi_req->sr_buffer,
1738                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1739                     cmd->retries);
1740 #else
1741         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1742                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1743                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1744                         scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1745         if (unlikely(rc != 0)) {
1746                 if (scst_cmd_atomic(cmd)) {
1747                         rc = SCST_EXEC_NEED_THREAD;
1748                         goto out_clear;
1749                 } else {
1750                         PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1751                         goto out_error;
1752                 }
1753         }
1754 #endif
1755
1756         rc = SCST_EXEC_COMPLETED;
1757
1758 out:
1759         TRACE_EXIT();
1760         return rc;
1761
1762 out_clear:
1763         /* Restore the state */
1764         cmd->sent_to_midlev = 0;
1765         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1766         goto out;
1767
1768 out_rc_error:
1769         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1770                     "invalid code %d", cmd->dev->handler->name, rc);
1771         /* go through */
1772
1773 out_error:
1774         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1775         cmd->completed = 1;
1776         cmd->state = SCST_CMD_STATE_DEV_DONE;
1777         rc = SCST_EXEC_COMPLETED;
1778         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1779         goto out;
1780
1781 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1782 out_busy:
1783         scst_set_busy(cmd);
1784         cmd->completed = 1;
1785         cmd->state = SCST_CMD_STATE_DEV_DONE;
1786         rc = SCST_EXEC_COMPLETED;
1787         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1788         goto out;
1789 #endif
1790
1791 out_aborted:
1792         rc = SCST_EXEC_COMPLETED;
1793         /* Report the result. The cmd is not completed */
1794         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1795         goto out;
1796 }
1797
1798 /* No locks */
1799 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1800 {
1801         if (slot == NULL)
1802                 goto inc;
1803
1804         /* Optimized for lockless fast path */
1805
1806         TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1807                 atomic_read(slot));
1808
1809         if (!atomic_dec_and_test(slot))
1810                 goto out;
1811
1812         TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1813                 tgt_dev->num_free_sn_slots);
1814         if (tgt_dev->num_free_sn_slots != ARRAY_SIZE(tgt_dev->sn_slots)) {
1815                 spin_lock_irq(&tgt_dev->sn_lock);
1816                 if (tgt_dev->num_free_sn_slots != ARRAY_SIZE(tgt_dev->sn_slots)) {
1817                         tgt_dev->num_free_sn_slots++;
1818                         TRACE_SN("Incremented num_free_sn_slots (%d)",
1819                                 tgt_dev->num_free_sn_slots);
1820                         if (tgt_dev->num_free_sn_slots == 0)
1821                                 tgt_dev->cur_sn_slot = slot;
1822                 }
1823                 spin_unlock_irq(&tgt_dev->sn_lock);
1824         }
1825
1826 inc:
1827         /*
1828          * No locks is needed, because only one thread at time can 
1829          * be here (serialized by sn). Also it is supposed that there
1830          * could not be half-incremented halves.
1831          */
1832         tgt_dev->expected_sn++;
1833         smp_mb(); /* write must be before def_cmd_count read */
1834         TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1835
1836 out:
1837         return;
1838 }
1839
1840 static int scst_send_to_midlev(struct scst_cmd *cmd)
1841 {
1842         int res, rc;
1843         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1844         struct scst_device *dev = cmd->dev;
1845         typeof(tgt_dev->expected_sn) expected_sn;
1846         int count;
1847
1848         TRACE_ENTRY();
1849
1850         res = SCST_CMD_STATE_RES_CONT_NEXT;
1851
1852         if (cmd->tgtt->pre_exec != NULL) {
1853                 rc = scst_tgt_pre_exec(cmd, &res);
1854                 if (unlikely(rc != 0))
1855                         goto out;
1856         }
1857
1858         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1859                 goto out;
1860
1861         __scst_get(0); /* protect dev & tgt_dev */
1862
1863         if (unlikely(cmd->internal || cmd->retry)) {
1864                 rc = scst_do_send_to_midlev(cmd);
1865                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1866                 if (rc == SCST_EXEC_NEED_THREAD) {
1867                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1868                               "thread context, rescheduling");
1869                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1870                         scst_dec_on_dev_cmd(cmd, 0);
1871                         goto out_dec_cmd_count;
1872                 } else {
1873                         sBUG_ON(rc != SCST_EXEC_COMPLETED);
1874                         goto out_unplug;
1875                 }
1876         }
1877
1878         EXTRACHECKS_BUG_ON(cmd->no_sn);
1879
1880         if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)) {
1881                 /* 
1882                  * W/o get() there will be a race, when cmd is executed and
1883                  * destroyed before "goto out_unplug"
1884                  */
1885                 scst_cmd_get(cmd);
1886                 if (scst_check_hq_cmd(cmd)) {
1887                         scst_cmd_put(cmd);
1888                         goto exec;
1889                 } else {
1890                         scst_dec_on_dev_cmd(cmd, 0);
1891                         scst_cmd_put(cmd);
1892                         goto out_unplug;
1893                 }
1894         }
1895
1896         expected_sn = tgt_dev->expected_sn;
1897         /* Optimized for lockless fast path */
1898         if ((cmd->sn != expected_sn) || unlikely(test_bit(SCST_TGT_DEV_HQ_ACTIVE,
1899                                                 &tgt_dev->tgt_dev_flags))) {
1900                 spin_lock_irq(&tgt_dev->sn_lock);
1901                 tgt_dev->def_cmd_count++;
1902                 smp_mb();
1903                 barrier(); /* to reread expected_sn & hq_cmd_active */
1904                 expected_sn = tgt_dev->expected_sn;
1905                 if ((cmd->sn != expected_sn) || test_bit(SCST_TGT_DEV_HQ_ACTIVE,
1906                                                 &tgt_dev->tgt_dev_flags)) {
1907                         /* We are under IRQ lock, but dev->dev_lock is BH one */
1908                         int cmd_blocking = scst_dec_on_dev_cmd(cmd, 1);
1909                         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1910                                 /* Necessary to allow aborting out of sn cmds */
1911                                 TRACE_MGMT_DBG("Aborting out of sn cmd %p (tag %lld)",
1912                                         cmd, cmd->tag);
1913                                 tgt_dev->def_cmd_count--;
1914                                 cmd->state = SCST_CMD_STATE_DEV_DONE;
1915                                 res = SCST_CMD_STATE_RES_CONT_SAME;
1916                         } else {
1917                                 TRACE_SN("Deferring cmd %p (sn=%ld, "
1918                                         "expected_sn=%ld, hq_cmd_active=%d)", cmd,
1919                                         cmd->sn, expected_sn, 
1920                                         test_bit(SCST_TGT_DEV_HQ_ACTIVE,
1921                                                 &tgt_dev->tgt_dev_flags));
1922                                 list_add_tail(&cmd->sn_cmd_list_entry,
1923                                               &tgt_dev->deferred_cmd_list);
1924                         }
1925                         spin_unlock_irq(&tgt_dev->sn_lock);
1926                         /* !! At this point cmd can be already freed !! */
1927                         __scst_dec_on_dev_cmd(dev, cmd_blocking);
1928                         goto out_dec_cmd_count;
1929                 } else {
1930                         TRACE_SN("Somebody incremented expected_sn %ld, "
1931                                 "continuing", expected_sn);
1932                         tgt_dev->def_cmd_count--;
1933                         spin_unlock_irq(&tgt_dev->sn_lock);
1934                 }
1935         }
1936
1937 exec:
1938         count = 0;
1939         while(1) {
1940                 atomic_t *slot = cmd->sn_slot;
1941                 int hq = (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE);
1942                 int inc_expected_sn_on_done = cmd->inc_expected_sn_on_done;
1943                 rc = scst_do_send_to_midlev(cmd);
1944                 if (rc == SCST_EXEC_NEED_THREAD) {
1945                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1946                               "thread context, rescheduling");
1947                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1948                         if (unlikely(hq)) {
1949                                 TRACE_SN("Rescheduling HQ cmd %p", cmd);
1950                                 spin_lock_irq(&tgt_dev->sn_lock);
1951                                 clear_bit(SCST_TGT_DEV_HQ_ACTIVE,
1952                                         &tgt_dev->tgt_dev_flags);
1953                                 list_add(&cmd->sn_cmd_list_entry,
1954                                         &tgt_dev->hq_cmd_list);
1955                                 spin_unlock_irq(&tgt_dev->sn_lock);
1956                         }
1957                         scst_dec_on_dev_cmd(cmd, 0);
1958                         if (count != 0)
1959                                 goto out_unplug;
1960                         else
1961                                 goto out_dec_cmd_count;
1962                 }
1963                 sBUG_ON(rc != SCST_EXEC_COMPLETED);
1964                 /* !! At this point cmd can be already freed !! */
1965                 count++;
1966                 if ( !inc_expected_sn_on_done && likely(!hq))
1967                         scst_inc_expected_sn(tgt_dev, slot);
1968                 cmd = scst_check_deferred_commands(tgt_dev);
1969                 if (cmd == NULL)
1970                         break;
1971                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1972                         break;
1973         }
1974
1975 out_unplug:
1976         if (dev->scsi_dev != NULL)
1977                 generic_unplug_device(dev->scsi_dev->request_queue);
1978
1979 out_dec_cmd_count:
1980         __scst_put();
1981         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1982
1983 out:
1984         TRACE_EXIT_HRES(res);
1985         return res;
1986 }
1987
1988 /* No locks supposed to be held */
1989 static int scst_check_sense(struct scst_cmd *cmd)
1990 {
1991         int res = 0;
1992         int sense_valid;
1993         struct scst_device *dev = cmd->dev;
1994         int dbl_ua_possible, ua_sent = 0;
1995
1996         TRACE_ENTRY();
1997
1998         /* If we had a internal bus reset behind us, set the command error UA */
1999         if ((dev->scsi_dev != NULL) &&
2000             unlikely(cmd->host_status == DID_RESET) &&
2001             scst_is_ua_command(cmd))
2002         {
2003                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2004                       dev->scsi_dev->was_reset, cmd->host_status);
2005                 scst_set_cmd_error(cmd,
2006                    SCST_LOAD_SENSE(scst_sense_reset_UA));
2007                 /* just in case */
2008                 cmd->ua_ignore = 0;
2009                 /* It looks like it is safe to clear was_reset here */
2010                 dev->scsi_dev->was_reset = 0;
2011                 smp_mb();
2012         }
2013
2014         sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
2015
2016         dbl_ua_possible = dev->dev_double_ua_possible;
2017         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
2018         if (unlikely(dbl_ua_possible)) {
2019                 spin_lock_bh(&dev->dev_lock);
2020                 barrier(); /* to reread dev_double_ua_possible */
2021                 dbl_ua_possible = dev->dev_double_ua_possible;
2022                 if (dbl_ua_possible)
2023                         ua_sent = dev->dev_reset_ua_sent;
2024                 else
2025                         spin_unlock_bh(&dev->dev_lock);
2026         }
2027
2028         if (sense_valid) {
2029                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2030                              sizeof(cmd->sense_buffer));
2031                 /* Check Unit Attention Sense Key */
2032                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
2033                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
2034                                 if (dbl_ua_possible) 
2035                                 {
2036                                         if (ua_sent) {
2037                                                 TRACE(TRACE_MGMT, "%s", 
2038                                                         "Double UA detected");
2039                                                 /* Do retry */
2040                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
2041                                                         "(tag %lld)", cmd, cmd->tag);
2042                                                 cmd->status = 0;
2043                                                 cmd->msg_status = 0;
2044                                                 cmd->host_status = DID_OK;
2045                                                 cmd->driver_status = 0;
2046                                                 memset(cmd->sense_buffer, 0,
2047                                                         sizeof(cmd->sense_buffer));
2048                                                 cmd->retry = 1;
2049                                                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
2050                                                 res = 1;
2051                                                 /* 
2052                                                  * Dev is still blocked by this cmd, so
2053                                                  * it's OK to clear SCST_DEV_SERIALIZED
2054                                                  * here.
2055                                                  */
2056                                                 dev->dev_double_ua_possible = 0;
2057                                                 dev->dev_serialized = 0;
2058                                                 dev->dev_reset_ua_sent = 0;
2059                                                 goto out_unlock;
2060                                         } else
2061                                                 dev->dev_reset_ua_sent = 1;
2062                                 }
2063                         }
2064                         if (cmd->ua_ignore == 0) {
2065                                 if (unlikely(dbl_ua_possible)) {
2066                                         __scst_process_UA(dev, cmd,
2067                                                 cmd->sense_buffer,
2068                                                 sizeof(cmd->sense_buffer), 0);
2069                                 } else {
2070                                         scst_process_UA(dev, cmd,
2071                                                 cmd->sense_buffer,
2072                                                 sizeof(cmd->sense_buffer), 0);
2073                                 }
2074                         }
2075                 }
2076         }
2077
2078         if (unlikely(dbl_ua_possible)) {
2079                 if (ua_sent && scst_is_ua_command(cmd)) {
2080                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
2081                         dev->dev_double_ua_possible = 0;
2082                         dev->dev_serialized = 0;
2083                         dev->dev_reset_ua_sent = 0;
2084                 }
2085                 spin_unlock_bh(&dev->dev_lock);
2086         }
2087
2088 out:
2089         TRACE_EXIT_RES(res);
2090         return res;
2091
2092 out_unlock:
2093         spin_unlock_bh(&dev->dev_lock);
2094         goto out;
2095 }
2096
2097 static int scst_check_auto_sense(struct scst_cmd *cmd)
2098 {
2099         int res = 0;
2100
2101         TRACE_ENTRY();
2102
2103         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2104             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
2105              SCST_NO_SENSE(cmd->sense_buffer)))
2106         {
2107                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2108                       "cmd->status=%x, cmd->msg_status=%x, "
2109                       "cmd->host_status=%x, cmd->driver_status=%x", cmd->status,
2110                       cmd->msg_status, cmd->host_status, cmd->driver_status);
2111                 res = 1;
2112         } else if (unlikely(cmd->host_status)) {
2113                 if ((cmd->host_status == DID_REQUEUE) ||
2114                     (cmd->host_status == DID_IMM_RETRY) ||
2115                     (cmd->host_status == DID_SOFT_ERROR)) {
2116                         scst_set_busy(cmd);
2117                 } else {
2118                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2119                                 "received, returning HARDWARE ERROR instead",
2120                                 cmd->host_status);
2121                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2122                 }
2123         }
2124
2125         TRACE_EXIT_RES(res);
2126         return res;
2127 }
2128
2129 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
2130 {
2131         int res = 0, rc;
2132         unsigned char type;
2133
2134         TRACE_ENTRY();
2135
2136         if (unlikely(cmd->cdb[0] == REQUEST_SENSE)) {
2137                 if (cmd->internal)
2138                         cmd = scst_complete_request_sense(cmd);
2139         } else if (unlikely(scst_check_auto_sense(cmd))) {
2140                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
2141                             "without sense data (opcode 0x%x), issuing "
2142                             "REQUEST SENSE", cmd->cdb[0]);
2143                 rc = scst_prepare_request_sense(cmd);
2144                 if (rc > 0) {
2145                         *pres = rc;
2146                         res = 1;
2147                         goto out;
2148                 } else {
2149                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
2150                                     "returning HARDWARE ERROR");
2151                         scst_set_cmd_error(cmd,
2152                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
2153                 }
2154         } else if (scst_check_sense(cmd)) {
2155                 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2156                 res = 1;
2157                 goto out;
2158         }
2159
2160         type = cmd->dev->handler->type;
2161         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
2162             cmd->tgt_dev->acg_dev->rd_only_flag &&
2163             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
2164              type == TYPE_TAPE))
2165         {
2166                 int32_t length;
2167                 uint8_t *address;
2168
2169                 length = scst_get_buf_first(cmd, &address);
2170                 if (length <= 0)
2171                         goto out;
2172                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2173                         address[2] |= 0x80;   /* Write Protect*/
2174                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2175                         address[3] |= 0x80;   /* Write Protect*/
2176                 scst_put_buf(cmd, address);
2177         }
2178
2179         /* 
2180          * Check and clear NormACA option for the device, if necessary,
2181          * since we don't support ACA
2182          */
2183         if ((cmd->cdb[0] == INQUIRY) &&
2184             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
2185             (cmd->resp_data_len > SCST_INQ_BYTE3))
2186         {
2187                 uint8_t *buffer;
2188                 int buflen;
2189
2190                 /* ToDo: all pages ?? */
2191                 buflen = scst_get_buf_first(cmd, &buffer);
2192                 if (buflen > 0) {
2193                         if (buflen > SCST_INQ_BYTE3) {
2194 #ifdef EXTRACHECKS
2195                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2196                                         PRINT_INFO_PR("NormACA set for device: "
2197                                             "lun=%Ld, type 0x%02x", 
2198                                             (uint64_t)cmd->lun, buffer[0]);
2199                                 }
2200 #endif
2201                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2202                         } else
2203                                 scst_set_cmd_error(cmd,
2204                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
2205
2206                         scst_put_buf(cmd, buffer);
2207                 }
2208         }
2209
2210         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
2211                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
2212                                                 &cmd->tgt_dev->tgt_dev_flags)) {
2213                         struct scst_tgt_dev *tgt_dev_tmp;
2214                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2215                               (uint64_t)cmd->lun, cmd->status);
2216                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2217                                      sizeof(cmd->sense_buffer));
2218                         /* Clearing the reservation */
2219                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2220                                             dev_tgt_dev_list_entry) {
2221                                 clear_bit(SCST_TGT_DEV_RESERVED, 
2222                                         &tgt_dev_tmp->tgt_dev_flags);
2223                         }
2224                         cmd->dev->dev_reserved = 0;
2225                 }
2226                 scst_unblock_dev(cmd->dev);
2227         }
2228         
2229         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
2230                      (cmd->cdb[0] == MODE_SELECT_10) ||
2231                      (cmd->cdb[0] == LOG_SELECT)))
2232         {
2233                 if (cmd->status == 0) {
2234                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2235                                 "setting the SELECT UA (lun=%Ld)", 
2236                                 (uint64_t)cmd->lun);
2237                         spin_lock_bh(&scst_temp_UA_lock);
2238                         if (cmd->cdb[0] == LOG_SELECT) {
2239                                 scst_set_sense(scst_temp_UA,
2240                                         sizeof(scst_temp_UA),
2241                                         UNIT_ATTENTION, 0x2a, 0x02);
2242                         } else {
2243                                 scst_set_sense(scst_temp_UA,
2244                                         sizeof(scst_temp_UA),
2245                                         UNIT_ATTENTION, 0x2a, 0x01);
2246                         }
2247                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2248                                 sizeof(scst_temp_UA), 1);
2249                         spin_unlock_bh(&scst_temp_UA_lock);
2250                 }
2251                 scst_unblock_dev(cmd->dev);
2252         }
2253
2254 out:
2255         TRACE_EXIT_RES(res);
2256         return res;
2257 }
2258
2259 static int scst_dev_done(struct scst_cmd *cmd)
2260 {
2261         int res = SCST_CMD_STATE_RES_CONT_SAME;
2262         int state;
2263         int atomic = scst_cmd_atomic(cmd);
2264
2265         TRACE_ENTRY();
2266
2267         if (atomic && !cmd->dev->handler->dev_done_atomic) 
2268         {
2269                 TRACE_DBG("Dev handler %s dev_done() can not be "
2270                       "called in atomic context, rescheduling to the thread",
2271                       cmd->dev->handler->name);
2272                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2273                 goto out;
2274         }
2275
2276         if (scst_done_cmd_check(cmd, &res))
2277                 goto out;
2278
2279         state = SCST_CMD_STATE_XMIT_RESP;
2280         if (likely(!scst_is_cmd_local(cmd)) && 
2281             likely(cmd->dev->handler->dev_done != NULL))
2282         {
2283                 int rc;
2284                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2285                       cmd->dev->handler->name, cmd);
2286                 rc = cmd->dev->handler->dev_done(cmd);
2287                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2288                       cmd->dev->handler->name, rc);
2289                 if (rc != SCST_CMD_STATE_DEFAULT)
2290                         state = rc;
2291         }
2292
2293         switch (state) {
2294         case SCST_CMD_STATE_DEV_PARSE:
2295         case SCST_CMD_STATE_PREPARE_SPACE:
2296         case SCST_CMD_STATE_RDY_TO_XFER:
2297         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2298         case SCST_CMD_STATE_DEV_DONE:
2299         case SCST_CMD_STATE_XMIT_RESP:
2300         case SCST_CMD_STATE_FINISHED:
2301                 cmd->state = state;
2302                 res = SCST_CMD_STATE_RES_CONT_SAME;
2303                 break;
2304
2305         case SCST_CMD_STATE_NEED_THREAD_CTX:
2306                 TRACE_DBG("Dev handler %s dev_done() requested "
2307                       "thread context, rescheduling",
2308                       cmd->dev->handler->name);
2309                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2310                 break;
2311
2312         default:
2313                 if (state >= 0) {
2314                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2315                                 "invalid cmd state %d", 
2316                                 cmd->dev->handler->name, state);
2317                 } else {
2318                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2319                                 "error %d", cmd->dev->handler->name, 
2320                                 state);
2321                 }
2322                 scst_set_cmd_error(cmd,
2323                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2324                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2325                 res = SCST_CMD_STATE_RES_CONT_SAME;
2326                 break;
2327         }
2328
2329 out:
2330         TRACE_EXIT_HRES(res);
2331         return res;
2332 }
2333
2334 static int scst_xmit_response(struct scst_cmd *cmd)
2335 {
2336         int res, rc;
2337         int atomic = scst_cmd_atomic(cmd);
2338
2339         TRACE_ENTRY();
2340
2341         /*
2342          * Check here also in order to avoid unnecessary delays of other
2343          * commands.
2344          */
2345         if (unlikely(!cmd->sent_to_midlev) && (cmd->tgt_dev != NULL)) {
2346                 TRACE_SN("cmd %p was not sent to mid-lev (sn %ld)",
2347                         cmd, cmd->sn);
2348                 scst_unblock_deferred(cmd->tgt_dev, cmd);
2349                 cmd->sent_to_midlev = 1;
2350         }
2351
2352         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2353                 TRACE_DBG("%s", "xmit_response() can not be "
2354                       "called in atomic context, rescheduling to the thread");
2355                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2356                 goto out;
2357         }
2358
2359         /*
2360          * If we don't remove cmd from the search list here, before
2361          * submitting it for transmittion, we will have a race, when for
2362          * some reason cmd's release is delayed after transmittion and
2363          * initiator sends cmd with the same tag => it is possible that
2364          * a wrong cmd will be found by find() functions.
2365          */
2366         spin_lock_irq(&cmd->sess->sess_list_lock);
2367         list_del(&cmd->search_cmd_list_entry);
2368         spin_unlock_irq(&cmd->sess->sess_list_lock);
2369
2370         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2371         smp_mb__after_set_bit();
2372
2373         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2374                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2375                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2376                                 "(tag %lld), returning TASK ABORTED", cmd, cmd->tag);
2377                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2378                 }
2379         }
2380
2381         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2382                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %lld), skipping",
2383                         cmd, cmd->tag);
2384                 cmd->state = SCST_CMD_STATE_FINISHED;
2385                 res = SCST_CMD_STATE_RES_CONT_SAME;
2386                 goto out;
2387         }
2388
2389 #ifdef DEBUG_TM
2390         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2391                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2392                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2393                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2394                         goto out;
2395                 }
2396                 TRACE_MGMT_DBG("Delaying cmd %p (tag %lld) for 1 second",
2397                         cmd, cmd->tag);
2398                 schedule_timeout_uninterruptible(HZ);
2399         }
2400 #endif
2401
2402         while (1) {
2403                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2404
2405                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2406                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2407
2408                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2409
2410 #if defined(DEBUG) || defined(TRACING)
2411                 if (cmd->sg) {
2412                         int i;
2413                         struct scatterlist *sg = cmd->sg;
2414                         TRACE(TRACE_SEND_BOT,
2415                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2416                               cmd->sg_cnt, sg, (void*)sg[0].page);
2417                         for(i = 0; i < cmd->sg_cnt; ++i) {
2418                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2419                                     "Xmitting sg", page_address(sg[i].page),
2420                                     sg[i].length);
2421                         }
2422                 }
2423 #endif
2424
2425 #ifdef DEBUG_RETRY
2426                 if (((scst_random() % 100) == 77))
2427                         rc = SCST_TGT_RES_QUEUE_FULL;
2428                 else
2429 #endif
2430                         rc = cmd->tgtt->xmit_response(cmd);
2431                 TRACE_DBG("xmit_response() returned %d", rc);
2432
2433                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2434                         goto out;
2435
2436                 /* Restore the previous state */
2437                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2438
2439                 switch (rc) {
2440                 case SCST_TGT_RES_QUEUE_FULL:
2441                 {
2442                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2443                                 break;
2444                         else
2445                                 continue;
2446                 }
2447
2448                 case SCST_TGT_RES_NEED_THREAD_CTX:
2449                 {
2450                         TRACE_DBG("Target driver %s xmit_response() "
2451                               "requested thread context, rescheduling",
2452                               cmd->tgtt->name);
2453                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2454                         break;
2455                 }
2456
2457                 default:
2458                         goto out_error;
2459                 }
2460                 break;
2461         }
2462
2463 out:
2464         /* Caution: cmd can be already dead here */
2465         TRACE_EXIT_HRES(res);
2466         return res;
2467
2468 out_error:
2469         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2470                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2471                         "fatal error", cmd->tgtt->name);
2472         } else {
2473                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2474                         "invalid value %d", cmd->tgtt->name, rc);
2475         }
2476         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2477         cmd->state = SCST_CMD_STATE_FINISHED;
2478         res = SCST_CMD_STATE_RES_CONT_SAME;
2479         goto out;
2480 }
2481
2482 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2483 {
2484         TRACE_ENTRY();
2485
2486         sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2487
2488         cmd->state = SCST_CMD_STATE_FINISHED;
2489         scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2490
2491         TRACE_EXIT();
2492         return;
2493 }
2494
2495 static int scst_finish_cmd(struct scst_cmd *cmd)
2496 {
2497         int res;
2498
2499         TRACE_ENTRY();
2500
2501         if (cmd->mem_checked) {
2502                 spin_lock_bh(&scst_cmd_mem_lock);
2503                 scst_cur_cmd_mem -= cmd->bufflen;
2504                 spin_unlock_bh(&scst_cmd_mem_lock);
2505         }
2506
2507         atomic_dec(&cmd->sess->sess_cmd_count);
2508
2509         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2510                 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
2511                         "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
2512                         atomic_read(&scst_cmd_count));
2513         }
2514
2515         scst_cmd_put(cmd);
2516
2517         res = SCST_CMD_STATE_RES_CONT_NEXT;
2518
2519         TRACE_EXIT_HRES(res);
2520         return res;
2521 }
2522
2523 /*
2524  * No locks, but it must be externally serialized (see comment for
2525  * scst_cmd_init_done() in scsi_tgt.h)
2526  */
2527 static void scst_cmd_set_sn(struct scst_cmd *cmd)
2528 {
2529         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2530         unsigned long flags;
2531
2532         if (scst_is_implicit_hq(cmd)) {
2533                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "Implicit HQ cmd %p", cmd);
2534                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2535         }
2536
2537         /* Optimized for lockless fast path */
2538
2539         scst_check_debug_sn(cmd);
2540
2541         switch(cmd->queue_type) {
2542         case SCST_CMD_QUEUE_SIMPLE:
2543         case SCST_CMD_QUEUE_UNTAGGED:
2544                 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
2545                         if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
2546                                 tgt_dev->curr_sn++;
2547                                 TRACE_SN("Incremented curr_sn %ld",
2548                                         tgt_dev->curr_sn);
2549                         }
2550                         cmd->sn_slot = tgt_dev->cur_sn_slot;
2551                         cmd->sn = tgt_dev->curr_sn;
2552                         tgt_dev->prev_cmd_ordered = 0;
2553                 } else {
2554                         TRACE(TRACE_MINOR, "%s", "Not enough SN slots");
2555                         goto ordered;
2556                 }
2557                 break;
2558
2559         case SCST_CMD_QUEUE_ORDERED:
2560                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "ORDERED cmd %p "
2561                         "(op %x)", cmd, cmd->cdb[0]);
2562 ordered:
2563                 if (!tgt_dev->prev_cmd_ordered) {
2564                         spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2565                         tgt_dev->num_free_sn_slots--;
2566                         smp_mb();
2567                         if ((tgt_dev->num_free_sn_slots >= 0) &&
2568                             (atomic_read(tgt_dev->cur_sn_slot) > 0)) {
2569                                 do {
2570                                         tgt_dev->cur_sn_slot++;
2571                                         if (tgt_dev->cur_sn_slot == 
2572                                                 tgt_dev->sn_slots +
2573                                                 ARRAY_SIZE(tgt_dev->sn_slots))
2574                                             tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
2575                                 } while(atomic_read(tgt_dev->cur_sn_slot) != 0);
2576                                 TRACE_SN("New cur SN slot %zd",
2577                                         tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2578                         } else
2579                                 tgt_dev->num_free_sn_slots++;
2580                         spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2581                 }
2582                 tgt_dev->prev_cmd_ordered = 1;
2583                 tgt_dev->curr_sn++;
2584                 cmd->sn = tgt_dev->curr_sn;
2585                 break;
2586
2587         case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
2588                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "HQ cmd %p "
2589                         "(op %x)", cmd, cmd->cdb[0]);
2590                 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2591                 /* Add in the head as required by SAM */
2592                 list_add(&cmd->sn_cmd_list_entry, &tgt_dev->hq_cmd_list);
2593                 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2594                 break;
2595
2596         default:
2597                 PRINT_ERROR_PR("Unsupported queue type %d, treating it as "
2598                         "ORDERED", cmd->queue_type);
2599                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2600                 goto ordered;
2601         }
2602
2603         TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
2604                 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
2605                 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
2606                 atomic_read(tgt_dev->cur_sn_slot), 
2607                 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
2608                 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2609
2610         cmd->no_sn = 0;
2611         return;
2612 }
2613
2614 /*
2615  * Returns 0 on success, > 0 when we need to wait for unblock,
2616  * < 0 if there is no device (lun) or device type handler.
2617  *
2618  * No locks, but might be on IRQ, protection is done by the
2619  * suspended activity.
2620  */
2621 static int scst_translate_lun(struct scst_cmd *cmd)
2622 {
2623         struct scst_tgt_dev *tgt_dev = NULL;
2624         int res;
2625
2626         TRACE_ENTRY();
2627
2628         __scst_get(1);
2629
2630         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2631                 struct list_head *sess_tgt_dev_list_head =
2632                         &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
2633                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2634                         (uint64_t)cmd->lun);
2635                 res = -1;
2636                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
2637                                 sess_tgt_dev_list_entry) {
2638                         if (tgt_dev->lun == cmd->lun) {
2639                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2640
2641                                 if (unlikely(tgt_dev->dev->handler == NULL)) {
2642                                         PRINT_INFO_PR("Dev handler for device "
2643                                           "%Ld is NULL, the device will not be "
2644                                           "visible remotely", (uint64_t)cmd->lun);
2645                                         break;
2646                                 }
2647                                 
2648                                 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
2649                                 cmd->tgt_dev = tgt_dev;
2650                                 cmd->dev = tgt_dev->dev;
2651
2652                                 res = 0;
2653                                 break;
2654                         }
2655                 }
2656                 if (res != 0) {
2657                         TRACE(TRACE_MINOR, "tgt_dev for lun %Ld not found, command to "
2658                                 "unexisting LU?", (uint64_t)cmd->lun);
2659                         __scst_put();
2660                 }
2661         } else {
2662                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
2663                 __scst_put();
2664                 res = 1;
2665         }
2666
2667         TRACE_EXIT_RES(res);
2668         return res;
2669 }
2670
2671 /*
2672  * No locks, but might be on IRQ
2673  *
2674  * Returns 0 on success, > 0 when we need to wait for unblock,
2675  * < 0 if there is no device (lun) or device type handler.
2676  */
2677 static int __scst_init_cmd(struct scst_cmd *cmd)
2678 {
2679         int res = 0;
2680
2681         TRACE_ENTRY();
2682
2683         res = scst_translate_lun(cmd);
2684         if (likely(res == 0)) {
2685                 int cnt;
2686                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2687                 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
2688                 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
2689                         TRACE(TRACE_RETRY, "Too many pending commands in "
2690                                 "session, returning BUSY to initiator \"%s\"",
2691                                 (cmd->sess->initiator_name[0] == '\0') ?
2692                                   "Anonymous" : cmd->sess->initiator_name);
2693                         goto out_busy;
2694                 }
2695                 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
2696                 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
2697                         TRACE(TRACE_RETRY, "Too many pending device commands, "
2698                                 "returning BUSY to initiator \"%s\"",
2699                                 (cmd->sess->initiator_name[0] == '\0') ?
2700                                   "Anonymous" : cmd->sess->initiator_name);
2701                         goto out_busy;
2702                 }
2703                 if (!cmd->no_sn)
2704                         scst_cmd_set_sn(cmd);
2705         } else if (res < 0) {
2706                 TRACE_DBG("Finishing cmd %p", cmd);
2707                 scst_set_cmd_error(cmd,
2708                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2709                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2710         } else
2711                 goto out;
2712
2713 out:
2714         TRACE_EXIT_RES(res);
2715         return res;
2716
2717 out_busy:
2718         scst_set_busy(cmd);
2719         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2720         goto out;
2721 }
2722
2723 /* Called under scst_init_lock and IRQs disabled */
2724 static void scst_do_job_init(void)
2725 {
2726         struct scst_cmd *cmd;
2727         int susp;
2728
2729         TRACE_ENTRY();
2730
2731 restart:
2732         susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
2733         if (scst_init_poll_cnt > 0)
2734                 scst_init_poll_cnt--;
2735
2736         list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
2737                 int rc;
2738                 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
2739                         continue;
2740                 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2741                         spin_unlock_irq(&scst_init_lock);
2742                         rc = __scst_init_cmd(cmd);
2743                         spin_lock_irq(&scst_init_lock);
2744                         if (rc > 0) {
2745                                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, restarting");
2746                                 goto restart;
2747                         }
2748                 } else {
2749                         TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %lld)",
2750                                 cmd, cmd->tag);
2751                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2752                 }
2753
2754                 /*
2755                  * Deleting cmd from init cmd list after __scst_init_cmd()
2756                  * is necessary to keep the check in scst_init_cmd() correct
2757                  * to preserve the commands order.
2758                  *
2759                  * We don't care about the race, when init cmd list is empty
2760                  * and one command detected that it just was not empty, so
2761                  * it's inserting to it, but another command at the same time
2762                  * seeing init cmd list empty and goes directly, because it
2763                  * could affect only commands from the same initiator to the
2764                  * same tgt_dev, but init_cmd_done() doesn't guarantee the order
2765                  * in case of simultaneous such calls anyway.
2766                  */
2767                 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
2768                 list_del(&cmd->cmd_list_entry);
2769                 spin_unlock(&scst_init_lock);
2770
2771                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2772                 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
2773                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2774                         list_add(&cmd->cmd_list_entry,
2775                                 &cmd->cmd_lists->active_cmd_list);
2776                 else
2777                         list_add_tail(&cmd->cmd_list_entry,
2778                                 &cmd->cmd_lists->active_cmd_list);
2779                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2780                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2781
2782                 spin_lock(&scst_init_lock);
2783                 goto restart;
2784         }
2785
2786         if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
2787                 goto restart;
2788
2789         TRACE_EXIT();
2790         return;
2791 }
2792
2793 static inline int test_init_cmd_list(void)
2794 {
2795         int res = (!list_empty(&scst_init_cmd_list) &&
2796                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2797                   unlikely(kthread_should_stop()) ||
2798                   (scst_init_poll_cnt > 0);
2799         return res;
2800 }
2801
2802 int scst_init_cmd_thread(void *arg)
2803 {
2804         TRACE_ENTRY();
2805
2806         current->flags |= PF_NOFREEZE;
2807
2808         spin_lock_irq(&scst_init_lock);
2809         while(!kthread_should_stop()) {
2810                 wait_queue_t wait;
2811                 init_waitqueue_entry(&wait, current);
2812
2813                 if (!test_init_cmd_list()) {
2814                         add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
2815                                                  &wait);
2816                         for (;;) {
2817                                 set_current_state(TASK_INTERRUPTIBLE);
2818                                 if (test_init_cmd_list())
2819                                         break;
2820                                 spin_unlock_irq(&scst_init_lock);
2821                                 schedule();
2822                                 spin_lock_irq(&scst_init_lock);
2823                         }
2824                         set_current_state(TASK_RUNNING);
2825                         remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
2826                 }
2827                 scst_do_job_init();
2828         }
2829         spin_unlock_irq(&scst_init_lock);
2830
2831         /*
2832          * If kthread_should_stop() is true, we are guaranteed to be
2833          * on the module unload, so scst_init_cmd_list must be empty.
2834          */
2835         sBUG_ON(!list_empty(&scst_init_cmd_list));
2836
2837         TRACE_EXIT();
2838         return 0;
2839 }
2840
2841 /* Called with no locks held */
2842 void scst_process_active_cmd(struct scst_cmd *cmd, int context)
2843 {
2844         int res;
2845
2846         TRACE_ENTRY();
2847
2848         EXTRACHECKS_BUG_ON(in_irq());
2849
2850         cmd->atomic = (context == SCST_CONTEXT_DIRECT_ATOMIC);
2851
2852         do {
2853                 switch (cmd->state) {
2854                 case SCST_CMD_STATE_DEV_PARSE:
2855                         res = scst_parse_cmd(cmd);
2856                         break;
2857
2858                 case SCST_CMD_STATE_PREPARE_SPACE:
2859                         res = scst_prepare_space(cmd);
2860                         break;
2861
2862                 case SCST_CMD_STATE_RDY_TO_XFER:
2863                         res = scst_rdy_to_xfer(cmd);
2864                         break;
2865
2866                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2867                         if (tm_dbg_check_cmd(cmd) != 0) {
2868                                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2869                                 TRACE_MGMT_DBG("Skipping cmd %p (tag %lld), "
2870                                         "because of TM DBG delay", cmd,
2871                                         cmd->tag);
2872                                 break;
2873                         }
2874                         res = scst_send_to_midlev(cmd);
2875                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2876                         break;
2877
2878                 case SCST_CMD_STATE_DEV_DONE:
2879                         res = scst_dev_done(cmd);
2880                         break;
2881
2882                 case SCST_CMD_STATE_XMIT_RESP:
2883                         res = scst_xmit_response(cmd);
2884                         break;
2885
2886                 case SCST_CMD_STATE_FINISHED:
2887                         res = scst_finish_cmd(cmd);
2888                         break;
2889
2890                 default:
2891                         PRINT_ERROR_PR("cmd (%p) in state %d, but shouldn't be",
2892                                cmd, cmd->state);
2893                         sBUG();
2894                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2895                         break;
2896                 }
2897         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2898
2899         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2900                 /* None */
2901         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2902                 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2903                 switch (cmd->state) {
2904                 case SCST_CMD_STATE_DEV_PARSE:
2905                 case SCST_CMD_STATE_PREPARE_SPACE:
2906                 case SCST_CMD_STATE_RDY_TO_XFER:
2907                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2908                 case SCST_CMD_STATE_DEV_DONE:
2909                 case SCST_CMD_STATE_XMIT_RESP:
2910                 case SCST_CMD_STATE_FINISHED:
2911                         TRACE_DBG("Adding cmd %p to head of active cmd list", cmd);
2912                         list_add(&cmd->cmd_list_entry,
2913                                 &cmd->cmd_lists->active_cmd_list);
2914                         break;
2915 #ifdef EXTRACHECKS
2916                 /* not very valid commands */
2917                 case SCST_CMD_STATE_DEFAULT:
2918                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2919                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2920                                 "useful list (left on scst cmd list)", cmd, 
2921                                 cmd->state);
2922                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2923                         sBUG();
2924                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2925                         break;
2926 #endif
2927                 default:
2928                         break;
2929                 }
2930                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2931                 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2932         } else
2933                 sBUG();
2934
2935         TRACE_EXIT();
2936         return;
2937 }
2938
2939 /* Called under cmd_list_lock and IRQs disabled */
2940 static void scst_do_job_active(struct list_head *cmd_list,
2941         spinlock_t *cmd_list_lock, int context)
2942 {
2943         TRACE_ENTRY();
2944
2945 #ifdef EXTRACHECKS
2946         WARN_ON((context != SCST_CONTEXT_DIRECT_ATOMIC) && 
2947                 (context != SCST_CONTEXT_DIRECT));
2948 #endif
2949
2950         while (!list_empty(cmd_list)) {
2951                 struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
2952                                         cmd_list_entry);
2953                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
2954                 list_del(&cmd->cmd_list_entry);
2955                 spin_unlock_irq(cmd_list_lock);
2956                 scst_process_active_cmd(cmd, context);
2957                 spin_lock_irq(cmd_list_lock);
2958         }
2959
2960         TRACE_EXIT();
2961         return;
2962 }
2963
2964 static inline int test_cmd_lists(struct scst_cmd_lists *p_cmd_lists)
2965 {
2966         int res = !list_empty(&p_cmd_lists->active_cmd_list) ||
2967             unlikely(kthread_should_stop()) ||
2968             tm_dbg_is_release();
2969         return res;
2970 }
2971
2972 int scst_cmd_thread(void *arg)
2973 {
2974         struct scst_cmd_lists *p_cmd_lists = (struct scst_cmd_lists*)arg;
2975
2976         TRACE_ENTRY();
2977
2978 #if 0
2979         set_user_nice(current, 10);
2980 #endif
2981         current->flags |= PF_NOFREEZE;
2982
2983         spin_lock_irq(&p_cmd_lists->cmd_list_lock);
2984         while (!kthread_should_stop()) {
2985                 wait_queue_t wait;
2986                 init_waitqueue_entry(&wait, current);
2987
2988                 if (!test_cmd_lists(p_cmd_lists)) {
2989                         add_wait_queue_exclusive(&p_cmd_lists->cmd_list_waitQ,
2990                                 &wait);
2991                         for (;;) {
2992                                 set_current_state(TASK_INTERRUPTIBLE);
2993                                 if (test_cmd_lists(p_cmd_lists))
2994                                         break;
2995                                 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
2996                                 schedule();
2997                                 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
2998                         }
2999                         set_current_state(TASK_RUNNING);
3000                         remove_wait_queue(&p_cmd_lists->cmd_list_waitQ, &wait);
3001                 }
3002
3003                 if (tm_dbg_is_release()) {
3004                         spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
3005                         tm_dbg_check_released_cmds();
3006                         spin_lock_irq(&p_cmd_lists->cmd_list_lock);
3007                 }
3008
3009                 scst_do_job_active(&p_cmd_lists->active_cmd_list,
3010                         &p_cmd_lists->cmd_list_lock, SCST_CONTEXT_DIRECT);
3011         }
3012         spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
3013
3014 #ifdef EXTRACHECKS
3015         /*
3016          * If kthread_should_stop() is true, we are guaranteed to be either
3017          * on the module unload, or there must be at least one other thread to
3018          * process the commands lists.
3019          */
3020         if (p_cmd_lists == &scst_main_cmd_lists) {
3021                 sBUG_ON((scst_threads_info.nr_cmd_threads == 1) &&
3022                          !list_empty(&scst_main_cmd_lists.active_cmd_list));
3023         }
3024 #endif
3025
3026         TRACE_EXIT();
3027         return 0;
3028 }
3029
3030 void scst_cmd_tasklet(long p)
3031 {
3032         struct scst_tasklet *t = (struct scst_tasklet*)p;
3033
3034         TRACE_ENTRY();
3035
3036         spin_lock_irq(&t->tasklet_lock);
3037         scst_do_job_active(&t->tasklet_cmd_list, &t->tasklet_lock,
3038                 SCST_CONTEXT_DIRECT_ATOMIC);
3039         spin_unlock_irq(&t->tasklet_lock);
3040
3041         TRACE_EXIT();
3042         return;
3043 }
3044
3045 /*
3046  * Returns 0 on success, < 0 if there is no device handler or
3047  * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
3048  * No locks, protection is done by the suspended activity.
3049  */
3050 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
3051 {
3052         struct scst_tgt_dev *tgt_dev = NULL;
3053         struct list_head *sess_tgt_dev_list_head;
3054         int res = -1;
3055
3056         TRACE_ENTRY();
3057
3058         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
3059               (uint64_t)mcmd->lun);
3060
3061         __scst_get(1);
3062
3063         if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
3064                      !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
3065                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3066                 __scst_put();
3067                 res = 1;
3068                 goto out;
3069         }
3070
3071         sess_tgt_dev_list_head =
3072                 &mcmd->sess->sess_tgt_dev_list_hash[HASH_VAL(mcmd->lun)];
3073         list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3074                         sess_tgt_dev_list_entry) {
3075                 if (tgt_dev->lun == mcmd->lun) {
3076                         TRACE_DBG("tgt_dev %p found", tgt_dev);
3077                         mcmd->mcmd_tgt_dev = tgt_dev;
3078                         res = 0;
3079                         break;
3080                 }
3081         }
3082         if (mcmd->mcmd_tgt_dev == NULL)
3083                 __scst_put();
3084
3085 out:
3086         TRACE_EXIT_HRES(res);
3087         return res;
3088 }
3089
3090 /* No locks */
3091 void scst_complete_cmd_mgmt(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd)
3092 {
3093         TRACE_ENTRY();
3094
3095         spin_lock_irq(&scst_mcmd_lock);
3096
3097         TRACE_MGMT_DBG("cmd %p completed (tag %lld, mcmd %p, "
3098                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
3099                 mcmd->cmd_wait_count);
3100
3101         cmd->mgmt_cmnd = NULL;
3102
3103         if (cmd->completed)
3104                 mcmd->completed_cmd_count++;
3105
3106         mcmd->cmd_wait_count--;
3107         if (mcmd->cmd_wait_count > 0) {
3108                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
3109                         mcmd->cmd_wait_count);
3110                 goto out_unlock;
3111         }
3112
3113         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3114
3115         if (mcmd->completed) {
3116                 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list",
3117                         mcmd);
3118                 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3119                         &scst_active_mgmt_cmd_list);
3120         }
3121
3122         spin_unlock_irq(&scst_mcmd_lock);
3123
3124         wake_up(&scst_mgmt_cmd_list_waitQ);
3125
3126 out:
3127         TRACE_EXIT();
3128         return;
3129
3130 out_unlock:
3131         spin_unlock_irq(&scst_mcmd_lock);
3132         goto out;
3133 }
3134
3135 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
3136         struct scst_tgt_dev *tgt_dev, int set_status)
3137 {
3138         int res = SCST_DEV_TM_NOT_COMPLETED;
3139         struct scst_dev_type *h = tgt_dev->dev->handler;
3140
3141         if (h->task_mgmt_fn) {
3142                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
3143                         h->name, mcmd->fn);
3144                 EXTRACHECKS_BUG_ON(in_irq());
3145                 res = h->task_mgmt_fn(mcmd, tgt_dev);
3146                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
3147                       h->name, res);
3148                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
3149                         mcmd->status = res;
3150         }
3151         return res;
3152 }
3153
3154 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
3155 {
3156         switch(mgmt_fn) {
3157                 case SCST_ABORT_TASK:
3158                 case SCST_ABORT_TASK_SET:
3159                 case SCST_CLEAR_TASK_SET:
3160                         return 1;
3161                 default:
3162                         return 0;
3163         }
3164 }
3165
3166 /* 
3167  * Might be called under sess_list_lock and IRQ off + BHs also off
3168  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
3169  */
3170 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
3171         int other_ini, int call_dev_task_mgmt_fn)
3172 {
3173         TRACE_ENTRY();
3174
3175         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %lld)", cmd, cmd->tag);
3176
3177         if (other_ini) {
3178                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3179                 smp_mb__after_set_bit();
3180         }
3181         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
3182         smp_mb__after_set_bit();
3183
3184         if (cmd->tgt_dev == NULL) {
3185                 unsigned long flags;
3186                 spin_lock_irqsave(&scst_init_lock, flags);
3187                 scst_init_poll_cnt++;
3188                 spin_unlock_irqrestore(&scst_init_lock, flags);
3189                 wake_up(&scst_init_cmd_list_waitQ);
3190         }
3191
3192         if (call_dev_task_mgmt_fn && (cmd->tgt_dev != NULL)) {
3193                 EXTRACHECKS_BUG_ON(irqs_disabled());
3194                 scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 1);
3195         }
3196
3197         if (mcmd) {
3198                 unsigned long flags;
3199                 /*
3200                  * Delay the response until the command's finish in
3201                  * order to guarantee that "no further responses from
3202                  * the task are sent to the SCSI initiator port" after
3203                  * response from the TM function is sent (SAM). Plus,
3204                  * we must wait here to be sure that we won't receive
3205                  * double commands with the same tag.
3206                  */
3207                 TRACE(TRACE_MGMT, "cmd %p (tag %lld) being executed/"
3208                         "xmitted (state %d), deferring ABORT...", cmd,
3209                         cmd->tag, cmd->state);
3210 #ifdef EXTRACHECKS
3211                 if (cmd->mgmt_cmnd) {
3212                         printk(KERN_ALERT "cmd %p (tag %lld, state %d) "
3213                                 "has non-NULL mgmt_cmnd %p!!! Current "
3214                                 "mcmd %p\n", cmd, cmd->tag, cmd->state,
3215                                 cmd->mgmt_cmnd, mcmd);
3216                 }
3217 #endif
3218                 sBUG_ON(cmd->mgmt_cmnd);
3219                 spin_lock_irqsave(&scst_mcmd_lock, flags);
3220                 mcmd->cmd_wait_count++;
3221                 spin_unlock_irqrestore(&scst_mcmd_lock, flags);
3222                 /* cmd can't die here or sess_list_lock already taken */
3223                 cmd->mgmt_cmnd = mcmd;
3224         }
3225
3226         tm_dbg_release_cmd(cmd);
3227
3228         TRACE_EXIT();
3229         return;
3230 }
3231
3232 /* No locks */
3233 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
3234 {
3235         int res;
3236         spin_lock_irq(&scst_mcmd_lock);
3237         if (mcmd->cmd_wait_count != 0) {
3238                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
3239                         "wait", mcmd->cmd_wait_count);
3240                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
3241                 res = -1;
3242         } else {
3243                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3244                 res = 0;
3245         }
3246         mcmd->completed = 1;
3247         spin_unlock_irq(&scst_mcmd_lock);
3248         return res;
3249 }
3250
3251 static int __scst_check_unblock_aborted_cmd(struct scst_cmd *cmd)
3252 {
3253         int res;
3254         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3255                 TRACE_MGMT_DBG("Adding aborted blocked cmd %p to active cmd "
3256                         "list", cmd);
3257                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3258                 list_add_tail(&cmd->cmd_list_entry,
3259                         &cmd->cmd_lists->active_cmd_list);
3260                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3261                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3262                 res = 1;
3263         } else
3264                 res = 0;
3265         return res;
3266 }
3267
3268 static void scst_unblock_aborted_cmds(int scst_mutex_held)
3269 {
3270         struct scst_device *dev;
3271
3272         TRACE_ENTRY();
3273
3274         if (!scst_mutex_held)
3275                 mutex_lock(&scst_mutex);
3276
3277         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3278                 struct scst_cmd *cmd, *tcmd;
3279                 struct scst_tgt_dev *tgt_dev;
3280                 spin_lock_bh(&dev->dev_lock);
3281                 local_irq_disable();
3282                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3283                                         blocked_cmd_list_entry) {
3284                         if (__scst_check_unblock_aborted_cmd(cmd))
3285                                 list_del(&cmd->blocked_cmd_list_entry);
3286                 }
3287                 local_irq_enable();
3288                 spin_unlock_bh(&dev->dev_lock);
3289
3290                 local_irq_disable();
3291                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3292                                          dev_tgt_dev_list_entry) {
3293                         spin_lock(&tgt_dev->sn_lock);
3294                         list_for_each_entry_safe(cmd, tcmd,
3295                                         &tgt_dev->deferred_cmd_list,
3296                                         sn_cmd_list_entry) {
3297                                 if (__scst_check_unblock_aborted_cmd(cmd)) {
3298                                         TRACE_MGMT_DBG("Deleting aborted SN "
3299                                                 "cmd %p from SN list", cmd);
3300                                         tgt_dev->def_cmd_count--;
3301                                         list_del(&cmd->sn_cmd_list_entry);
3302                                 }
3303                         }
3304                         spin_unlock(&tgt_dev->sn_lock);
3305                 }
3306                 local_irq_enable();
3307         }
3308
3309         if (!scst_mutex_held)
3310                 mutex_unlock(&scst_mutex);
3311
3312         TRACE_EXIT();
3313         return;
3314 }
3315
3316 /* Returns 0 if the command processing should be continued, <0 otherwise */
3317 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
3318         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
3319 {
3320         struct scst_cmd *cmd;
3321         struct scst_session *sess = tgt_dev->sess;
3322
3323         TRACE_ENTRY();
3324
3325         spin_lock_irq(&sess->sess_list_lock);
3326
3327         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
3328         list_for_each_entry(cmd, &sess->search_cmd_list, 
3329                         search_cmd_list_entry) {
3330                 if ((cmd->tgt_dev == tgt_dev) ||
3331                     ((cmd->tgt_dev == NULL) && 
3332                      (cmd->lun == tgt_dev->lun)))
3333                         scst_abort_cmd(cmd, mcmd, other_ini, 0);
3334         }
3335         spin_unlock_irq(&sess->sess_list_lock);
3336
3337         scst_unblock_aborted_cmds(scst_mutex_held);
3338
3339         TRACE_EXIT();
3340         return;
3341 }
3342
3343 /* Returns 0 if the command processing should be continued, <0 otherwise */
3344 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
3345 {
3346         int res;
3347         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3348         struct scst_device *dev = tgt_dev->dev;
3349
3350         TRACE(TRACE_MGMT, "Aborting task set (lun=%Ld, mcmd=%p)",
3351                 tgt_dev->lun, mcmd);
3352
3353         spin_lock_bh(&dev->dev_lock);
3354         __scst_block_dev(dev);
3355         spin_unlock_bh(&dev->dev_lock);
3356
3357         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
3358         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3359
3360         res = scst_set_mcmd_next_state(mcmd);
3361
3362         TRACE_EXIT_RES(res);
3363         return res;
3364 }
3365
3366 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3367 {
3368         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags) && !mcmd->active) {
3369                 TRACE_MGMT_DBG("Adding mgmt cmd %p to delayed mgmt cmd list",
3370                         mcmd);
3371                 spin_lock_irq(&scst_mcmd_lock);
3372                 list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3373                         &scst_delayed_mgmt_cmd_list);
3374                 spin_unlock_irq(&scst_mcmd_lock);
3375                 return -1;
3376         } else {
3377                 mcmd->active = 1;
3378                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3379                 return 0;
3380         }
3381 }
3382
3383 /* Returns 0 if the command processing should be continued, 
3384  * >0, if it should be requeued, <0 otherwise */
3385 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3386 {
3387         int res = 0;
3388
3389         TRACE_ENTRY();
3390
3391         res = scst_check_delay_mgmt_cmd(mcmd);
3392         if (res != 0)
3393                 goto out;
3394
3395         if (mcmd->fn == SCST_ABORT_TASK) {
3396                 struct scst_session *sess = mcmd->sess;
3397                 struct scst_cmd *cmd;
3398
3399                 spin_lock_irq(&sess->sess_list_lock);
3400                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3401                 if (cmd == NULL) {
3402                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3403                                 "tag %lld not found", mcmd->tag);
3404                         mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
3405                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3406                         spin_unlock_irq(&sess->sess_list_lock);
3407                         goto out;
3408                 }
3409                 scst_cmd_get(cmd);
3410                 spin_unlock_irq(&sess->sess_list_lock);
3411                 TRACE(TRACE_MGMT, "Cmd %p for tag %lld (sn %ld) found, "
3412                         "aborting it", cmd, mcmd->tag, cmd->sn);
3413                 mcmd->cmd_to_abort = cmd;
3414                 scst_abort_cmd(cmd, mcmd, 0, 1);
3415                 scst_unblock_aborted_cmds(0);
3416                 res = scst_set_mcmd_next_state(mcmd);
3417                 mcmd->cmd_to_abort = NULL; /* just in case */
3418                 scst_cmd_put(cmd);
3419         } else {
3420                 int rc;
3421                 rc = scst_mgmt_translate_lun(mcmd);
3422                 if (rc < 0) {
3423                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3424                                 "found", (uint64_t)mcmd->lun);
3425                         mcmd->status = SCST_MGMT_STATUS_LUN_NOT_EXIST;
3426                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3427                 } else if (rc == 0)
3428                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
3429                 else
3430                         res = rc;
3431         }
3432
3433 out:
3434         TRACE_EXIT_RES(res);
3435         return res;
3436 }
3437
3438 /* Returns 0 if the command processing should be continued, <0 otherwise */
3439 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3440 {
3441         int res, rc;
3442         struct scst_device *dev, *d;
3443         struct scst_tgt_dev *tgt_dev;
3444         int cont, c;
3445         LIST_HEAD(host_devs);
3446
3447         TRACE_ENTRY();
3448
3449         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3450                 mcmd, atomic_read(&mcmd->sess->sess_cmd_count));
3451
3452         mutex_lock(&scst_mutex);
3453
3454         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3455                 int found = 0;
3456
3457                 spin_lock_bh(&dev->dev_lock);
3458                 __scst_block_dev(dev);
3459                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3460                 spin_unlock_bh(&dev->dev_lock);
3461
3462                 cont = 0;
3463                 c = 0;
3464                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3465                         dev_tgt_dev_list_entry) 
3466                 {
3467                         cont = 1;
3468                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3469                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3470                                 c = 1;
3471                         else if ((rc < 0) &&
3472                                  (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
3473                                 mcmd->status = rc;
3474                 }
3475                 if (cont && !c)
3476                         continue;
3477                 
3478                 if (dev->scsi_dev == NULL)
3479                         continue;
3480
3481                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3482                         if (dev->scsi_dev->host->host_no ==
3483                                     d->scsi_dev->host->host_no) 
3484                         {
3485                                 found = 1;
3486                                 break;
3487                         }
3488                 }
3489                 if (!found)
3490                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3491         }
3492
3493         /*
3494          * We suppose here that for all commands that already on devices
3495          * on/after scsi_reset_provider() completion callbacks will be called.
3496          */
3497
3498         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3499                 /* dev->scsi_dev must be non-NULL here */
3500                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3501                       dev->scsi_dev->host->host_no);
3502                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3503                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3504                       dev->scsi_dev->host->host_no,
3505                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3506                 if ((rc != SUCCESS) &&
3507                     (mcmd->status == SCST_MGMT_STATUS_SUCCESS)) {
3508                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3509                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3510                 }
3511         }
3512
3513         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3514                 if (dev->scsi_dev != NULL)
3515                         dev->scsi_dev->was_reset = 0;
3516         }
3517
3518         mutex_unlock(&scst_mutex);
3519
3520         tm_dbg_task_mgmt("TARGET RESET", 0);
3521         res = scst_set_mcmd_next_state(mcmd);
3522
3523         TRACE_EXIT_RES(res);
3524         return res;
3525 }
3526
3527 /* Returns 0 if the command processing should be continued, <0 otherwise */
3528 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3529 {
3530         int res, rc;
3531         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3532         struct scst_device *dev = tgt_dev->dev;
3533
3534         TRACE_ENTRY();
3535
3536         TRACE(TRACE_MGMT, "Resetting lun %Ld (mcmd %p)", tgt_dev->lun, mcmd);
3537
3538         spin_lock_bh(&dev->dev_lock);
3539         __scst_block_dev(dev);
3540         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3541         spin_unlock_bh(&dev->dev_lock);
3542
3543         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3544         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3545                 goto out_tm_dbg;
3546
3547         if (dev->scsi_dev != NULL) {
3548                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3549                       dev->scsi_dev->host->host_no);
3550                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3551                 if ((rc != SUCCESS) && (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
3552                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3553                 dev->scsi_dev->was_reset = 0;
3554         }
3555