- Fixes 2 shutdown problems
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28 #include <linux/kthread.h>
29 #include <linux/delay.h>
30
31 #include "scsi_tgt.h"
32 #include "scst_priv.h"
33
34 static void scst_cmd_set_sn(struct scst_cmd *cmd);
35 static int __scst_init_cmd(struct scst_cmd *cmd);
36
37 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
38 {
39         struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
40         unsigned long flags;
41
42         spin_lock_irqsave(&t->tasklet_lock, flags);
43         TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
44                 smp_processor_id());
45         list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
46         spin_unlock_irqrestore(&t->tasklet_lock, flags);
47
48         tasklet_schedule(&t->tasklet);
49 }
50
51 /* 
52  * Must not be called in parallel with scst_unregister_session() for the 
53  * same sess
54  */
55 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
56                              const uint8_t *lun, int lun_len,
57                              const uint8_t *cdb, int cdb_len, int atomic)
58 {
59         struct scst_cmd *cmd;
60
61         TRACE_ENTRY();
62
63 #ifdef EXTRACHECKS
64         if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
65                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
66                 sBUG();
67         }
68 #endif
69
70         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
71         if (cmd == NULL)
72                 goto out;
73
74         cmd->sess = sess;
75         cmd->tgt = sess->tgt;
76         cmd->tgtt = sess->tgt->tgtt;
77         cmd->state = SCST_CMD_STATE_INIT_WAIT;
78
79         /* 
80          * For both wrong lun and CDB defer the error reporting for
81          * scst_cmd_init_done()
82          */
83
84         cmd->lun = scst_unpack_lun(lun, lun_len);
85
86         if (cdb_len <= SCST_MAX_CDB_SIZE) {
87                 memcpy(cmd->cdb, cdb, cdb_len);
88                 cmd->cdb_len = cdb_len;
89         }
90
91         TRACE_DBG("cmd %p, sess %p", cmd, sess);
92         scst_sess_get(sess);
93
94 out:
95         TRACE_EXIT();
96         return cmd;
97 }
98
99 static int scst_init_cmd(struct scst_cmd *cmd, int context)
100 {
101         int rc;
102
103         TRACE_ENTRY();
104
105         /* See the comment in scst_do_job_init() */
106         if (unlikely(!list_empty(&scst_init_cmd_list))) {
107                 TRACE_MGMT_DBG("%s", "init cmd list busy");
108                 goto out_redirect;
109         }
110         smp_rmb();
111
112         rc = __scst_init_cmd(cmd);
113         if (unlikely(rc > 0))
114                 goto out_redirect;
115         else if (unlikely(rc != 0))
116                 goto out;
117
118         /* Small context optimization */
119         if (((context == SCST_CONTEXT_TASKLET) ||
120              (context == SCST_CONTEXT_DIRECT_ATOMIC)) && 
121             scst_cmd_is_expected_set(cmd)) {
122                 if (cmd->expected_data_direction == SCST_DATA_WRITE) {
123                         if ( !test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
124                                         &cmd->tgt_dev->tgt_dev_flags))
125                                 context = SCST_CONTEXT_THREAD;
126                 } else {
127                         if ( !test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
128                                         &cmd->tgt_dev->tgt_dev_flags))
129                                 context = SCST_CONTEXT_THREAD;
130                 }
131         }
132
133 out:
134         TRACE_EXIT_RES(context);
135         return context;
136
137 out_redirect:
138         if (cmd->preprocessing_only) {
139                 /*
140                  * Poor man solution for single threaded targets, where 
141                  * blocking receiver at least sometimes means blocking all.
142                  */
143                 sBUG_ON(context != SCST_CONTEXT_DIRECT);
144                 scst_set_busy(cmd);
145                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
146                 /* Keep initiator away from too many BUSY commands */
147                 if (!in_interrupt() && !in_atomic())
148                         msleep(50);
149                 else
150                         WARN_ON_ONCE(1);
151         } else {
152                 unsigned long flags;
153                 spin_lock_irqsave(&scst_init_lock, flags);
154                 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
155                         "%d)", cmd, atomic_read(&scst_cmd_count));
156                 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
157                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
158                         scst_init_poll_cnt++;
159                 spin_unlock_irqrestore(&scst_init_lock, flags);
160                 wake_up(&scst_init_cmd_list_waitQ);
161                 context = -1;
162         }
163         goto out;
164 }
165
166 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
167 {
168         unsigned long flags;
169         struct scst_session *sess = cmd->sess;
170
171         TRACE_ENTRY();
172
173         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
174         TRACE(TRACE_SCSI, "tag=%llu, lun=%Ld, CDB len=%d", cmd->tag, 
175                 (uint64_t)cmd->lun, cmd->cdb_len);
176         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
177                 cmd->cdb, cmd->cdb_len);
178
179 #ifdef EXTRACHECKS
180         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
181                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
182         {
183                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
184                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
185                         cmd->tgtt->name);
186                 pref_context = SCST_CONTEXT_TASKLET;
187         }
188 #endif
189
190         atomic_inc(&sess->sess_cmd_count);
191
192         spin_lock_irqsave(&sess->sess_list_lock, flags);
193
194         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
195
196         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
197                 switch(sess->init_phase) {
198                 case SCST_SESS_IPH_SUCCESS:
199                         break;
200                 case SCST_SESS_IPH_INITING:
201                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
202                         list_add_tail(&cmd->cmd_list_entry, 
203                                 &sess->init_deferred_cmd_list);
204                         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
205                         goto out;
206                 case SCST_SESS_IPH_FAILED:
207                         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
208                         scst_set_busy(cmd);
209                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
210                         goto active;
211                 default:
212                         sBUG();
213                 }
214         }
215
216         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
217
218         if (unlikely(cmd->lun == (lun_t)-1)) {
219                 PRINT_ERROR_PR("Wrong LUN %d, finishing cmd", -1);
220                 scst_set_cmd_error(cmd,
221                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
222                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
223                 goto active;
224         }
225
226         if (unlikely(cmd->cdb_len == 0)) {
227                 PRINT_ERROR_PR("Wrong CDB len %d, finishing cmd", 0);
228                 scst_set_cmd_error(cmd,
229                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
230                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
231                 goto active;
232         }
233
234         cmd->state = SCST_CMD_STATE_INIT;
235         /* cmd must be inited here to keep the order */
236         pref_context = scst_init_cmd(cmd, pref_context);
237         if (unlikely(pref_context < 0))
238                 goto out;
239
240 active:
241         /* Here cmd must not be in any cmd list, no locks */
242         switch (pref_context) {
243         case SCST_CONTEXT_TASKLET:
244                 scst_schedule_tasklet(cmd);
245                 break;
246
247         case SCST_CONTEXT_DIRECT:
248         case SCST_CONTEXT_DIRECT_ATOMIC:
249                 scst_process_active_cmd(cmd, pref_context);
250                 /* For *NEED_THREAD wake_up() is already done */
251                 break;
252
253         default:
254                 PRINT_ERROR_PR("Context %x is undefined, using the thread one",
255                         pref_context);
256                 /* go through */
257         case SCST_CONTEXT_THREAD:
258                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
259                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
260                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
261                         list_add(&cmd->cmd_list_entry,
262                                 &cmd->cmd_lists->active_cmd_list);
263                 else
264                         list_add_tail(&cmd->cmd_list_entry,
265                                 &cmd->cmd_lists->active_cmd_list);
266                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
267                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
268                 break;
269         }
270
271 out:
272         TRACE_EXIT();
273         return;
274 }
275
276 static int scst_parse_cmd(struct scst_cmd *cmd)
277 {
278         int res = SCST_CMD_STATE_RES_CONT_SAME;
279         int state;
280         struct scst_device *dev = cmd->dev;
281         struct scst_info_cdb cdb_info;
282         int atomic = scst_cmd_atomic(cmd);
283         int orig_bufflen;
284
285         TRACE_ENTRY();
286
287         if (atomic && !dev->handler->parse_atomic) {
288                 TRACE_DBG("Dev handler %s parse() can not be "
289                       "called in atomic context, rescheduling to the thread",
290                       dev->handler->name);
291                 res = SCST_CMD_STATE_RES_NEED_THREAD;
292                 goto out;
293         }
294
295         cmd->inc_expected_sn_on_done = dev->handler->inc_expected_sn_on_done;
296
297         if (cmd->skip_parse || cmd->internal)
298                 goto call_parse;
299
300         /*
301          * Expected transfer data supplied by the SCSI transport via the
302          * target driver are untrusted, so we prefer to fetch them from CDB.
303          * Additionally, not all transports support supplying the expected
304          * transfer data.
305          */
306
307         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
308                         &cdb_info) != 0)) 
309         {
310                 static int t;
311                 if (t < 10) {
312                         t++;
313                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
314                                 "Should you update scst_scsi_op_table?",
315                                 cmd->cdb[0], dev->handler->name);
316                 }
317                 if (scst_cmd_is_expected_set(cmd)) {
318                         TRACE(TRACE_SCSI, "Using initiator supplied values: "
319                                 "direction %d, transfer_len %d",
320                                 cmd->expected_data_direction,
321                                 cmd->expected_transfer_len);
322                         cmd->data_direction = cmd->expected_data_direction;
323                         cmd->bufflen = cmd->expected_transfer_len;
324                         /* Restore (most probably) lost CDB length */
325                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
326                         if (cmd->cdb_len == -1) {
327                                 PRINT_ERROR_PR("Unable to get CDB length for "
328                                         "opcode 0x%02x. Returning INVALID "
329                                         "OPCODE", cmd->cdb[0]);
330                                 scst_set_cmd_error(cmd,
331                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
332                                 goto out_xmit;
333                         }
334                 } else {
335                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
336                              "target %s not supplied expected values. "
337                              "Returning INVALID OPCODE.", cmd->cdb[0], 
338                              dev->handler->name, cmd->tgtt->name);
339                         scst_set_cmd_error(cmd,
340                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
341                         goto out_xmit;
342                 }
343         } else {
344                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
345                         "set %s), transfer_len=%d (expected len %d), flags=%d",
346                         cdb_info.op_name, cdb_info.direction,
347                         cmd->expected_data_direction,
348                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
349                         cdb_info.transfer_len, cmd->expected_transfer_len,
350                         cdb_info.flags);
351
352                 /* Restore (most probably) lost CDB length */
353                 cmd->cdb_len = cdb_info.cdb_len;
354
355                 cmd->data_direction = cdb_info.direction;
356                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
357                         cmd->bufflen = cdb_info.transfer_len;
358                 /* else cmd->bufflen remained as it was inited in 0 */
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
362                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
370                 PRINT_ERROR_PR("Linked commands are not supported "
371                             "(opcode 0x%02x)", cmd->cdb[0]);
372                 scst_set_cmd_error(cmd,
373                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
374                 goto out_xmit;
375         }
376
377 call_parse:
378         orig_bufflen = cmd->bufflen;
379
380         if (likely(!scst_is_cmd_local(cmd))) {
381                 TRACE_DBG("Calling dev handler %s parse(%p)",
382                       dev->handler->name, cmd);
383                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
384                 state = dev->handler->parse(cmd, &cdb_info);
385                 /* Caution: cmd can be already dead here */
386                 TRACE_DBG("Dev handler %s parse() returned %d",
387                         dev->handler->name, state);
388
389                 switch (state) {
390                 case SCST_CMD_STATE_NEED_THREAD_CTX:
391                         TRACE_DBG("Dev handler %s parse() requested thread "
392                               "context, rescheduling", dev->handler->name);
393                         res = SCST_CMD_STATE_RES_NEED_THREAD;
394                         goto out;
395
396                 case SCST_CMD_STATE_STOP:
397                         TRACE_DBG("Dev handler %s parse() requested stop "
398                                 "processing", dev->handler->name);
399                         res = SCST_CMD_STATE_RES_CONT_NEXT;
400                         goto out;
401                 }
402
403                 if (state == SCST_CMD_STATE_DEFAULT)
404                         state = SCST_CMD_STATE_PREPARE_SPACE;
405         }
406         else
407                 state = SCST_CMD_STATE_PREPARE_SPACE;
408
409         if (scst_cmd_is_expected_set(cmd)) {
410                 if (cmd->expected_transfer_len < cmd->bufflen) {
411                         TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
412                                 "cmd->bufflen(%d), using expected_transfer_len "
413                                 "instead", cmd->expected_transfer_len,
414                                 cmd->bufflen);
415                         cmd->bufflen = cmd->expected_transfer_len;
416                 }
417         }
418
419         if (cmd->data_len == -1)
420                 cmd->data_len = cmd->bufflen;
421
422         if (cmd->data_buf_alloced && (orig_bufflen > cmd->bufflen)) {
423                 PRINT_ERROR_PR("Target driver supplied data buffer (size %d), "
424                         "is less, than required (size %d)", cmd->bufflen,
425                         orig_bufflen);
426                 goto out_error;
427         }
428
429 #ifdef EXTRACHECKS
430         if ((state != SCST_CMD_STATE_XMIT_RESP) &&
431             (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
432                 (state != SCST_CMD_STATE_DEV_PARSE)) ||
433             ((cmd->bufflen != 0) && 
434                 (cmd->data_direction == SCST_DATA_NONE) &&
435                 (cmd->status == 0)) ||
436             ((cmd->bufflen == 0) && 
437                 (cmd->data_direction != SCST_DATA_NONE)) ||
438             ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
439                 (state > SCST_CMD_STATE_PREPARE_SPACE))))
440         {
441                 PRINT_ERROR_PR("Dev handler %s parse() returned "
442                                "invalid cmd data_direction %d, "
443                                "bufflen %d or state %d (opcode 0x%x)",
444                                dev->handler->name, 
445                                cmd->data_direction, cmd->bufflen,
446                                state, cmd->cdb[0]);
447                 goto out_error;
448         }
449 #endif
450
451         switch (state) {
452         case SCST_CMD_STATE_PREPARE_SPACE:
453         case SCST_CMD_STATE_DEV_PARSE:
454         case SCST_CMD_STATE_RDY_TO_XFER:
455         case SCST_CMD_STATE_PRE_EXEC:
456         case SCST_CMD_STATE_SEND_TO_MIDLEV:
457         case SCST_CMD_STATE_DEV_DONE:
458         case SCST_CMD_STATE_XMIT_RESP:
459         case SCST_CMD_STATE_FINISHED:
460                 cmd->state = state;
461                 res = SCST_CMD_STATE_RES_CONT_SAME;
462                 break;
463
464         default:
465                 if (state >= 0) {
466                         PRINT_ERROR_PR("Dev handler %s parse() returned "
467                              "invalid cmd state %d (opcode %d)", 
468                              dev->handler->name, state, cmd->cdb[0]);
469                 } else {
470                         PRINT_ERROR_PR("Dev handler %s parse() returned "
471                                 "error %d (opcode %d)", dev->handler->name, 
472                                 state, cmd->cdb[0]);
473                 }
474                 goto out_error;
475         }
476
477         if (cmd->resp_data_len == -1) {
478                 if (cmd->data_direction == SCST_DATA_READ)
479                         cmd->resp_data_len = cmd->bufflen;
480                 else
481                          cmd->resp_data_len = 0;
482         }
483         
484 out:
485         TRACE_EXIT_HRES(res);
486         return res;
487
488 out_error:
489         /* dev_done() will be called as part of the regular cmd's finish */
490         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
491         cmd->state = SCST_CMD_STATE_DEV_DONE;
492         res = SCST_CMD_STATE_RES_CONT_SAME;
493         goto out;
494
495 out_xmit:
496         cmd->state = SCST_CMD_STATE_XMIT_RESP;
497         res = SCST_CMD_STATE_RES_CONT_SAME;
498         goto out;
499 }
500
501
502
503 static int scst_prepare_space(struct scst_cmd *cmd)
504 {
505         int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
506
507         TRACE_ENTRY();
508
509         if (cmd->data_direction == SCST_DATA_NONE)
510                 goto prep_done;
511
512         if (cmd->data_buf_tgt_alloc) {
513                 int orig_bufflen = cmd->bufflen;
514
515                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
516
517                 r = cmd->tgtt->alloc_data_buf(cmd);
518                 if (r > 0)
519                         goto alloc;
520                 else if (r == 0) {
521                         cmd->data_buf_alloced = 1;
522                         if (unlikely(orig_bufflen < cmd->bufflen)) {
523                                 PRINT_ERROR_PR("Target driver allocated data "
524                                         "buffer (size %d), is less, than "
525                                         "required (size %d)", orig_bufflen,
526                                         cmd->bufflen);
527                                 goto out_error;
528                         }
529                 } else
530                         goto check;
531         }
532
533 alloc:
534         if (!cmd->data_buf_alloced) {
535                 r = scst_alloc_space(cmd);
536         } else {
537                 TRACE_MEM("%s", "data_buf_alloced set, returning");
538         }
539         
540 check:
541         if (r != 0) {
542                 if (scst_cmd_atomic(cmd)) {
543                         TRACE_MEM("%s", "Atomic memory allocation failed, "
544                               "rescheduling to the thread");
545                         res = SCST_CMD_STATE_RES_NEED_THREAD;
546                         goto out;
547                 } else
548                         goto out_no_space;
549         }
550
551 prep_done:
552         if (cmd->preprocessing_only) {
553                 if (scst_cmd_atomic(cmd) && 
554                     !cmd->tgtt->preprocessing_done_atomic) {
555                         TRACE_DBG("%s", "preprocessing_done() can not be "
556                               "called in atomic context, rescheduling to "
557                               "the thread");
558                         res = SCST_CMD_STATE_RES_NEED_THREAD;
559                         goto out;
560                 }
561
562                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
563                         TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
564                                 "cmd %p", cmd);
565                         cmd->state = SCST_CMD_STATE_DEV_DONE;
566                         res = SCST_CMD_STATE_RES_CONT_SAME;
567                         goto out;
568                 }
569
570                 res = SCST_CMD_STATE_RES_CONT_NEXT;
571                 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
572
573                 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
574                 cmd->tgtt->preprocessing_done(cmd);
575                 TRACE_DBG("%s", "preprocessing_done() returned");
576                 goto out;
577
578         }
579
580         switch (cmd->data_direction) {
581         case SCST_DATA_WRITE:
582                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
583                 break;
584
585         default:
586                 cmd->state = SCST_CMD_STATE_PRE_EXEC;
587                 break;
588         }
589
590 out:
591         TRACE_EXIT_HRES(res);
592         return res;
593
594 out_no_space:
595         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
596                 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
597         scst_set_busy(cmd);
598         cmd->state = SCST_CMD_STATE_DEV_DONE;
599         res = SCST_CMD_STATE_RES_CONT_SAME;
600         goto out;
601
602 out_error:
603         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
604         cmd->state = SCST_CMD_STATE_DEV_DONE;
605         res = SCST_CMD_STATE_RES_CONT_SAME;
606         goto out;
607 }
608
609 void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context)
610 {
611         TRACE_ENTRY();
612
613         TRACE_DBG("Preferred context: %d", pref_context);
614         TRACE_DBG("tag=%llu, status=%#x", scst_cmd_get_tag(cmd), status);
615
616 #ifdef EXTRACHECKS
617         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
618                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
619         {
620                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
621                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
622                         cmd->tgtt->name);
623                 pref_context = SCST_CONTEXT_TASKLET;
624         }
625 #endif
626
627         switch (status) {
628         case SCST_PREPROCESS_STATUS_SUCCESS:
629                 switch (cmd->data_direction) {
630                 case SCST_DATA_WRITE:
631                         cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
632                         break;
633                 default:
634                         cmd->state = SCST_CMD_STATE_PRE_EXEC;
635                         break;
636                 }
637                 if (cmd->set_sn_on_restart_cmd)
638                         scst_cmd_set_sn(cmd);
639                 /* Small context optimization */
640                 if ((pref_context == SCST_CONTEXT_TASKLET) || 
641                     (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
642                         if (cmd->data_direction == SCST_DATA_WRITE) {
643                                 if ( !test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
644                                                 &cmd->tgt_dev->tgt_dev_flags))
645                                         pref_context = SCST_CONTEXT_THREAD;
646                         } else {
647                                 if ( !test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
648                                                 &cmd->tgt_dev->tgt_dev_flags))
649                                         pref_context = SCST_CONTEXT_THREAD;
650                         }
651                 }
652                 break;
653
654         case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
655                 cmd->state = SCST_CMD_STATE_DEV_DONE;
656                 break;
657
658         case SCST_PREPROCESS_STATUS_ERROR_FATAL:
659                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
660                 /* go through */
661         case SCST_PREPROCESS_STATUS_ERROR:
662                 scst_set_cmd_error(cmd,
663                            SCST_LOAD_SENSE(scst_sense_hardw_error));
664                 cmd->state = SCST_CMD_STATE_DEV_DONE;
665                 break;
666
667         default:
668                 PRINT_ERROR_PR("%s() received unknown status %x", __func__,
669                         status);
670                 cmd->state = SCST_CMD_STATE_DEV_DONE;
671                 break;
672         }
673
674         scst_proccess_redirect_cmd(cmd, pref_context, 1);
675
676         TRACE_EXIT();
677         return;
678 }
679
680 /* No locks */
681 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
682 {
683         struct scst_tgt *tgt = cmd->sess->tgt;
684         int res = 0;
685         unsigned long flags;
686
687         TRACE_ENTRY();
688
689         spin_lock_irqsave(&tgt->tgt_lock, flags);
690         tgt->retry_cmds++;
691         smp_mb();
692         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
693               tgt->retry_cmds);
694         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
695                 /* At least one cmd finished, so try again */
696                 tgt->retry_cmds--;
697                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
698                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
699                       "retry_cmds=%d)", finished_cmds,
700                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
701                 res = -1;
702                 goto out_unlock_tgt;
703         }
704
705         TRACE(TRACE_RETRY, "Adding cmd %p to retry cmd list", cmd);
706         list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
707
708         if (!tgt->retry_timer_active) {
709                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
710                 add_timer(&tgt->retry_timer);
711                 tgt->retry_timer_active = 1;
712         }
713
714 out_unlock_tgt:
715         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
716
717         TRACE_EXIT_RES(res);
718         return res;
719 }
720
721 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
722 {
723         int res, rc;
724         int atomic = scst_cmd_atomic(cmd);
725
726         TRACE_ENTRY();
727
728         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
729                 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
730                 goto out_dev_done;
731         }
732
733         if (cmd->tgtt->rdy_to_xfer == NULL) {
734                 cmd->state = SCST_CMD_STATE_PRE_EXEC;
735                 res = SCST_CMD_STATE_RES_CONT_SAME;
736                 goto out;
737         }
738
739         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
740                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
741                       "called in atomic context, rescheduling to the thread");
742                 res = SCST_CMD_STATE_RES_NEED_THREAD;
743                 goto out;
744         }
745
746         while (1) {
747                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
748
749                 res = SCST_CMD_STATE_RES_CONT_NEXT;
750                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
751
752                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
753 #ifdef DEBUG_RETRY
754                 if (((scst_random() % 100) == 75))
755                         rc = SCST_TGT_RES_QUEUE_FULL;
756                 else
757 #endif
758                         rc = cmd->tgtt->rdy_to_xfer(cmd);
759                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
760
761                 if (likely(rc == SCST_TGT_RES_SUCCESS))
762                         goto out;
763
764                 /* Restore the previous state */
765                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
766
767                 switch (rc) {
768                 case SCST_TGT_RES_QUEUE_FULL:
769                 {
770                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
771                                 break;
772                         else
773                                 continue;
774                 }
775
776                 case SCST_TGT_RES_NEED_THREAD_CTX:
777                 {
778                         TRACE_DBG("Target driver %s "
779                               "rdy_to_xfer() requested thread "
780                               "context, rescheduling", cmd->tgtt->name);
781                         res = SCST_CMD_STATE_RES_NEED_THREAD;
782                         break;
783                 }
784
785                 default:
786                         goto out_error_rc;
787                 }
788                 break;
789         }
790
791 out:
792         TRACE_EXIT_HRES(res);
793         return res;
794
795 out_error_rc:
796         if (rc == SCST_TGT_RES_FATAL_ERROR) {
797                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
798                      "fatal error", cmd->tgtt->name);
799         } else {
800                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
801                             "value %d", cmd->tgtt->name, rc);
802         }
803         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
804
805 out_dev_done:
806         cmd->state = SCST_CMD_STATE_DEV_DONE;
807         res = SCST_CMD_STATE_RES_CONT_SAME;
808         goto out;
809 }
810
811 /* No locks, but might be in IRQ */
812 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
813         int check_retries)
814 {
815         unsigned long flags;
816
817         TRACE_ENTRY();
818
819         TRACE_DBG("Context: %d", context);
820
821         switch(context) {
822         case SCST_CONTEXT_DIRECT:
823         case SCST_CONTEXT_DIRECT_ATOMIC:
824                 if (check_retries)
825                         scst_check_retries(cmd->tgt);
826                 scst_process_active_cmd(cmd, context);
827                 break;
828
829         default:
830                 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
831                             context);
832                 /* go through */
833         case SCST_CONTEXT_THREAD:
834                 if (check_retries)
835                         scst_check_retries(cmd->tgt);
836                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
837                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
838                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
839                         list_add(&cmd->cmd_list_entry,
840                                 &cmd->cmd_lists->active_cmd_list);
841                 else
842                         list_add_tail(&cmd->cmd_list_entry,
843                                 &cmd->cmd_lists->active_cmd_list);
844                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
845                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
846                 break;
847
848         case SCST_CONTEXT_TASKLET:
849                 if (check_retries)
850                         scst_check_retries(cmd->tgt);
851                 scst_schedule_tasklet(cmd);
852                 break;
853         }
854
855         TRACE_EXIT();
856         return;
857 }
858
859 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
860 {
861         TRACE_ENTRY();
862
863         TRACE_DBG("Preferred context: %d", pref_context);
864         TRACE(TRACE_SCSI, "tag=%llu status=%#x", scst_cmd_get_tag(cmd), status);
865
866 #ifdef EXTRACHECKS
867         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
868                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
869         {
870                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
871                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
872                         cmd->tgtt->name);
873                 pref_context = SCST_CONTEXT_TASKLET;
874         }
875 #endif
876
877         switch (status) {
878         case SCST_RX_STATUS_SUCCESS:
879                 cmd->state = SCST_CMD_STATE_PRE_EXEC;
880                 /* Small context optimization */
881                 if ((pref_context == SCST_CONTEXT_TASKLET) || 
882                     (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
883                         if ( !test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC, 
884                                         &cmd->tgt_dev->tgt_dev_flags))
885                                 pref_context = SCST_CONTEXT_THREAD;
886                 }
887                 break;
888
889         case SCST_RX_STATUS_ERROR_SENSE_SET:
890                 cmd->state = SCST_CMD_STATE_DEV_DONE;
891                 break;
892
893         case SCST_RX_STATUS_ERROR_FATAL:
894                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
895                 /* go through */
896         case SCST_RX_STATUS_ERROR:
897                 scst_set_cmd_error(cmd,
898                            SCST_LOAD_SENSE(scst_sense_hardw_error));
899                 cmd->state = SCST_CMD_STATE_DEV_DONE;
900                 break;
901
902         default:
903                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
904                         status);
905                 cmd->state = SCST_CMD_STATE_DEV_DONE;
906                 break;
907         }
908
909         scst_proccess_redirect_cmd(cmd, pref_context, 1);
910
911         TRACE_EXIT();
912         return;
913 }
914
915 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
916 {
917         int rc;
918
919         TRACE_ENTRY();
920
921         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
922
923         if (cmd->tgtt->pre_exec == NULL)
924                 goto out;
925
926         TRACE_DBG("Calling pre_exec(%p)", cmd);
927         rc = cmd->tgtt->pre_exec(cmd);
928         TRACE_DBG("pre_exec() returned %d", rc);
929
930         if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
931                 switch(rc) {
932                 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
933                         cmd->state = SCST_CMD_STATE_DEV_DONE;
934                         break;
935                 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
936                         set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
937                         /* go through */
938                 case SCST_PREPROCESS_STATUS_ERROR:
939                         scst_set_cmd_error(cmd,
940                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
941                         cmd->state = SCST_CMD_STATE_DEV_DONE;
942                         break;
943                 default:
944                         sBUG();
945                         break;
946                 }
947         }
948
949 out:
950         TRACE_EXIT();
951         return SCST_CMD_STATE_RES_CONT_SAME;
952 }
953
954 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
955 {
956         struct scst_cmd *c;
957
958         if (likely(cmd->sn_set))
959                 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
960
961         c = scst_check_deferred_commands(cmd->tgt_dev);
962         if (c != NULL) {
963                 unsigned long flags;
964                 spin_lock_irqsave(&c->cmd_lists->cmd_list_lock, flags);
965                 TRACE_SN("Adding cmd %p to active cmd list", c);
966                 list_add_tail(&c->cmd_list_entry,
967                         &c->cmd_lists->active_cmd_list);
968                 wake_up(&c->cmd_lists->cmd_list_waitQ);
969                 spin_unlock_irqrestore(&c->cmd_lists->cmd_list_lock, flags);
970         }
971 }
972
973 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
974         const uint8_t *rq_sense, int rq_sense_len, int resid)
975 {
976         unsigned char type;
977
978         TRACE_ENTRY();
979
980         if (cmd->inc_expected_sn_on_done)
981                 scst_inc_check_expected_sn(cmd);
982
983         cmd->status = result & 0xff;
984         cmd->msg_status = msg_byte(result);
985         cmd->host_status = host_byte(result);
986         cmd->driver_status = driver_byte(result);
987         if (unlikely(resid != 0)) {
988 #ifdef EXTRACHECKS
989                 if ((resid < 0) || (resid > cmd->resp_data_len)) {
990                         PRINT_ERROR_PR("Wrong resid %d (cmd->resp_data_len=%d)",
991                                 resid, cmd->resp_data_len);
992                 } else
993 #endif
994                         scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
995         }
996
997         /* 
998          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
999          * in init_scst()
1000          */
1001         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
1002         memset(&cmd->sense_buffer[rq_sense_len], 0,
1003                 sizeof(cmd->sense_buffer) - rq_sense_len);
1004
1005         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, resid=%d, "
1006               "cmd->msg_status=%x, cmd->host_status=%x, "
1007               "cmd->driver_status=%x", result, cmd->status, resid,
1008               cmd->msg_status, cmd->host_status, cmd->driver_status);
1009
1010         cmd->completed = 1;
1011
1012         if (likely(cmd->host_status != DID_RESET) &&
1013             likely(!SCST_SENSE_VALID(cmd->sense_buffer)))
1014                 scst_dec_on_dev_cmd(cmd);
1015
1016         type = cmd->dev->handler->type;
1017         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1018             cmd->tgt_dev->acg_dev->rd_only_flag &&
1019             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1020              type == TYPE_TAPE)) {
1021                 int32_t length;
1022                 uint8_t *address;
1023
1024                 length = scst_get_buf_first(cmd, &address);
1025                 TRACE_DBG("length %d", length);
1026                 if (unlikely(length <= 0)) {
1027                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1028                                 __func__);
1029                         goto out;
1030                 }
1031                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1032                         address[2] |= 0x80;   /* Write Protect*/
1033                 }
1034                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1035                         address[3] |= 0x80;   /* Write Protect*/
1036                 }
1037                 scst_put_buf(cmd, address);
1038         }
1039
1040 out:
1041         TRACE_EXIT();
1042         return;
1043 }
1044
1045 /* For small context optimization */
1046 static inline int scst_optimize_post_exec_context(struct scst_cmd *cmd,
1047         int context)
1048 {
1049         if ((context == SCST_CONTEXT_TASKLET) || 
1050             (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1051                 if ( !test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC, 
1052                                 &cmd->tgt_dev->tgt_dev_flags))
1053                         context = SCST_CONTEXT_THREAD;
1054         }
1055         return context;
1056 }
1057
1058 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1059 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1060                                             struct scsi_request **req)
1061 {
1062         struct scst_cmd *cmd = NULL;
1063
1064         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1065                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1066
1067         if (cmd == NULL) {
1068                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1069                 if (*req)
1070                         scsi_release_request(*req);
1071         }
1072
1073         return cmd;
1074 }
1075
1076 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1077 {
1078         struct scsi_request *req = NULL;
1079         struct scst_cmd *cmd;
1080
1081         TRACE_ENTRY();
1082
1083         cmd = scst_get_cmd(scsi_cmd, &req);
1084         if (cmd == NULL)
1085                 goto out;
1086
1087         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1088                 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1089
1090         /* Clear out request structure */
1091         req->sr_use_sg = 0;
1092         req->sr_sglist_len = 0;
1093         req->sr_bufflen = 0;
1094         req->sr_buffer = NULL;
1095         req->sr_underflow = 0;
1096         req->sr_request->rq_disk = NULL; /* disown request blk */
1097
1098         scst_release_request(cmd);
1099
1100         cmd->state = SCST_CMD_STATE_DEV_DONE;
1101
1102         scst_proccess_redirect_cmd(cmd,
1103                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1104
1105 out:
1106         TRACE_EXIT();
1107         return;
1108 }
1109 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1110 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1111 {
1112         struct scst_cmd *cmd;
1113
1114         TRACE_ENTRY();
1115
1116         cmd = (struct scst_cmd *)data;
1117         if (cmd == NULL)
1118                 goto out;
1119
1120         scst_do_cmd_done(cmd, result, sense, SCST_SENSE_BUFFERSIZE, resid);
1121
1122         cmd->state = SCST_CMD_STATE_DEV_DONE;
1123
1124         scst_proccess_redirect_cmd(cmd,
1125                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1126
1127 out:
1128         TRACE_EXIT();
1129         return;
1130 }
1131 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1132
1133 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1134 {
1135         TRACE_ENTRY();
1136
1137         if (likely(!SCST_SENSE_VALID(cmd->sense_buffer)))
1138                 scst_dec_on_dev_cmd(cmd);
1139
1140         if (cmd->inc_expected_sn_on_done)
1141                 scst_inc_check_expected_sn(cmd);
1142
1143         if (next_state == SCST_CMD_STATE_DEFAULT)
1144                 next_state = SCST_CMD_STATE_DEV_DONE;
1145
1146 #if defined(DEBUG) || defined(TRACING)
1147         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1148                 if (cmd->sg) {
1149                         int i;
1150                         struct scatterlist *sg = cmd->sg;
1151                         TRACE(TRACE_RECV_TOP, 
1152                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1153                               cmd->sg_cnt, sg, (void*)sg[0].page);
1154                         for(i = 0; i < cmd->sg_cnt; ++i) {
1155                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1156                                         "Exec'd sg", page_address(sg[i].page),
1157                                         sg[i].length);
1158                         }
1159                 }
1160         }
1161 #endif
1162
1163
1164 #ifdef EXTRACHECKS
1165         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1166             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1167             (next_state != SCST_CMD_STATE_FINISHED)) 
1168         {
1169                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1170                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1171                 scst_set_cmd_error(cmd,
1172                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1173                 next_state = SCST_CMD_STATE_DEV_DONE;
1174         }
1175 #endif
1176         cmd->state = next_state;
1177
1178         scst_proccess_redirect_cmd(cmd,
1179                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1180
1181         TRACE_EXIT();
1182         return;
1183 }
1184
1185 static int scst_report_luns_local(struct scst_cmd *cmd)
1186 {
1187         int res = SCST_EXEC_COMPLETED, rc;
1188         int dev_cnt = 0;
1189         int buffer_size;
1190         int i;
1191         struct scst_tgt_dev *tgt_dev = NULL;
1192         uint8_t *buffer;
1193         int offs, overflow = 0;
1194
1195         TRACE_ENTRY();
1196
1197         rc = scst_check_local_events(cmd);
1198         if (unlikely(rc != 0))
1199                 goto out_done;
1200
1201         cmd->status = 0;
1202         cmd->msg_status = 0;
1203         cmd->host_status = DID_OK;
1204         cmd->driver_status = 0;
1205
1206         if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1207                 PRINT_ERROR_PR("Unsupported SELECT REPORT value %x in REPORT "
1208                         "LUNS command", cmd->cdb[2]);
1209                 goto out_err;
1210         }
1211
1212         buffer_size = scst_get_buf_first(cmd, &buffer);
1213         if (unlikely(buffer_size <= 0))
1214                 goto out_err;
1215
1216         if (buffer_size < 16)
1217                 goto out_put_err;
1218
1219         memset(buffer, 0, buffer_size);
1220         offs = 8;
1221
1222         /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1223         for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1224                 struct list_head *sess_tgt_dev_list_head =
1225                         &cmd->sess->sess_tgt_dev_list_hash[i];
1226                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1227                                 sess_tgt_dev_list_entry) {
1228                         if (!overflow) {
1229                                 if (offs >= buffer_size) {
1230                                         scst_put_buf(cmd, buffer);
1231                                         buffer_size = scst_get_buf_next(cmd, &buffer);
1232                                         if (buffer_size > 0) {
1233                                                 memset(buffer, 0, buffer_size);
1234                                                 offs = 0;
1235                                         } else {
1236                                                 overflow = 1;
1237                                                 goto inc_dev_cnt;
1238                                         }
1239                                 }
1240                                 if ((buffer_size - offs) < 8) {
1241                                         PRINT_ERROR_PR("Buffer allocated for REPORT "
1242                                                 "LUNS command doesn't allow to fit 8 "
1243                                                 "byte entry (buffer_size=%d)",
1244                                                 buffer_size);
1245                                         goto out_put_hw_err;
1246                                 }
1247                                 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1248                                 buffer[offs+1] = tgt_dev->lun & 0xff;
1249                                 offs += 8;
1250                         }
1251 inc_dev_cnt:
1252                         dev_cnt++;
1253                 }
1254         }
1255         if (!overflow)
1256                 scst_put_buf(cmd, buffer);
1257
1258         /* Set the response header */
1259         buffer_size = scst_get_buf_first(cmd, &buffer);
1260         if (unlikely(buffer_size <= 0))
1261                 goto out_err;
1262         dev_cnt *= 8;
1263         buffer[0] = (dev_cnt >> 24) & 0xff;
1264         buffer[1] = (dev_cnt >> 16) & 0xff;
1265         buffer[2] = (dev_cnt >> 8) & 0xff;
1266         buffer[3] = dev_cnt & 0xff;
1267         scst_put_buf(cmd, buffer);
1268
1269         dev_cnt += 8;
1270         if (dev_cnt < cmd->resp_data_len)
1271                 scst_set_resp_data_len(cmd, dev_cnt);
1272
1273 out_compl:
1274         cmd->completed = 1;
1275
1276 out_done:
1277         /* Report the result */
1278         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1279
1280         TRACE_EXIT_RES(res);
1281         return res;
1282         
1283 out_put_err:
1284         scst_put_buf(cmd, buffer);
1285
1286 out_err:
1287         scst_set_cmd_error(cmd,
1288                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1289         goto out_compl;
1290
1291 out_put_hw_err:
1292         scst_put_buf(cmd, buffer);
1293         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1294         goto out_compl;
1295 }
1296
1297 static int scst_pre_select(struct scst_cmd *cmd)
1298 {
1299         int res = SCST_EXEC_NOT_COMPLETED;
1300
1301         TRACE_ENTRY();
1302
1303         if (scst_cmd_atomic(cmd)) {
1304                 res = SCST_EXEC_NEED_THREAD;
1305                 goto out;
1306         }
1307
1308         scst_block_dev_cmd(cmd, 1);
1309
1310         /* Check for local events will be done when cmd will be executed */
1311
1312 out:
1313         TRACE_EXIT_RES(res);
1314         return res;
1315 }
1316
1317 static inline void scst_report_reserved(struct scst_cmd *cmd)
1318 {
1319         TRACE_ENTRY();
1320
1321         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1322         cmd->completed = 1;
1323         /* Report the result */
1324         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1325
1326         TRACE_EXIT();
1327         return;
1328 }
1329
1330 static int scst_reserve_local(struct scst_cmd *cmd)
1331 {
1332         int res = SCST_EXEC_NOT_COMPLETED, rc;
1333         struct scst_device *dev;
1334         struct scst_tgt_dev *tgt_dev_tmp;
1335
1336         TRACE_ENTRY();
1337
1338         if (scst_cmd_atomic(cmd)) {
1339                 res = SCST_EXEC_NEED_THREAD;
1340                 goto out;
1341         }
1342
1343         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1344                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1345                      "(lun=%Ld)", (uint64_t)cmd->lun);
1346                 scst_set_cmd_error(cmd,
1347                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1348                 cmd->completed = 1;
1349                 res = SCST_EXEC_COMPLETED;
1350                 goto out;
1351         }
1352
1353         dev = cmd->dev;
1354
1355         scst_block_dev_cmd(cmd, 1);
1356
1357         rc = scst_check_local_events(cmd);
1358         if (unlikely(rc != 0))
1359                 goto out_done;
1360
1361         spin_lock_bh(&dev->dev_lock);
1362
1363         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1364                 scst_report_reserved(cmd);
1365                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1366                 res = SCST_EXEC_COMPLETED;
1367                 goto out_unlock;
1368         }
1369
1370         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1371                             dev_tgt_dev_list_entry) 
1372         {
1373                 if (cmd->tgt_dev != tgt_dev_tmp)
1374                         set_bit(SCST_TGT_DEV_RESERVED, 
1375                                 &tgt_dev_tmp->tgt_dev_flags);
1376         }
1377         dev->dev_reserved = 1;
1378
1379 out_unlock:
1380         spin_unlock_bh(&dev->dev_lock);
1381         
1382 out:
1383         TRACE_EXIT_RES(res);
1384         return res;
1385
1386 out_done:
1387         res = SCST_EXEC_COMPLETED;
1388         /* Report the result */
1389         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1390         goto out;
1391 }
1392
1393 static int scst_release_local(struct scst_cmd *cmd)
1394 {
1395         int res = SCST_EXEC_NOT_COMPLETED, rc;
1396         struct scst_tgt_dev *tgt_dev_tmp;
1397         struct scst_device *dev;
1398
1399         TRACE_ENTRY();
1400
1401         if (scst_cmd_atomic(cmd)) {
1402                 res = SCST_EXEC_NEED_THREAD;
1403                 goto out;
1404         }
1405
1406         dev = cmd->dev;
1407
1408         scst_block_dev_cmd(cmd, 1);
1409
1410         rc = scst_check_local_events(cmd);
1411         if (unlikely(rc != 0))
1412                 goto out_done;
1413
1414         spin_lock_bh(&dev->dev_lock);
1415
1416         /* 
1417          * The device could be RELEASED behind us, if RESERVING session 
1418          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1419          * matter, so use lock and no retest for DEV_RESERVED bits again
1420          */
1421         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1422                 res = SCST_EXEC_COMPLETED;
1423                 cmd->status = 0;
1424                 cmd->msg_status = 0;
1425                 cmd->host_status = DID_OK;
1426                 cmd->driver_status = 0;
1427         } else {
1428                 list_for_each_entry(tgt_dev_tmp,
1429                                     &dev->dev_tgt_dev_list,
1430                                     dev_tgt_dev_list_entry) {
1431                         clear_bit(SCST_TGT_DEV_RESERVED, 
1432                                 &tgt_dev_tmp->tgt_dev_flags);
1433                 }
1434                 dev->dev_reserved = 0;
1435         }
1436
1437         spin_unlock_bh(&dev->dev_lock);
1438
1439         if (res == SCST_EXEC_COMPLETED)
1440                 goto out_done;
1441
1442 out:
1443         TRACE_EXIT_RES(res);
1444         return res;
1445
1446 out_done:
1447         res = SCST_EXEC_COMPLETED;
1448         /* Report the result */
1449         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1450         goto out;
1451 }
1452
1453 /* No locks, no IRQ or IRQ-safe context allowed */
1454 int scst_check_local_events(struct scst_cmd *cmd)
1455 {
1456         int res, rc;
1457         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1458
1459         TRACE_ENTRY();
1460
1461         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1462                 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1463                 goto out_uncomplete;
1464         }
1465
1466         /* Reserve check before Unit Attention */
1467         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags))) {
1468                 if ((cmd->cdb[0] != INQUIRY) && (cmd->cdb[0] != REPORT_LUNS) &&
1469                     (cmd->cdb[0] != RELEASE) && (cmd->cdb[0] != RELEASE_10) &&
1470                     (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1471                     (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1472                     (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE))
1473                 {
1474                         scst_report_reserved(cmd);
1475                         goto out_complete;
1476                 }
1477         }
1478
1479         /* If we had a internal bus reset, set the command error unit attention */
1480         if ((cmd->dev->scsi_dev != NULL) &&
1481             unlikely(cmd->dev->scsi_dev->was_reset)) {
1482                 if (scst_is_ua_command(cmd)) 
1483                 {
1484                         struct scst_device *dev = cmd->dev;
1485                         int done = 0;
1486                         /* Prevent more than 1 cmd to be triggered by was_reset */
1487                         spin_lock_bh(&dev->dev_lock);
1488                         barrier(); /* to reread was_reset */
1489                         if (dev->scsi_dev->was_reset) {
1490                                 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1491                                 scst_set_cmd_error(cmd,
1492                                            SCST_LOAD_SENSE(scst_sense_reset_UA));
1493                                 /* It looks like it is safe to clear was_reset here */
1494                                 dev->scsi_dev->was_reset = 0;
1495                                 smp_mb();
1496                                 done = 1;
1497                         }
1498                         spin_unlock_bh(&dev->dev_lock);
1499
1500                         if (done)
1501                                 goto out_complete;
1502                 }
1503         }
1504
1505         if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING, 
1506                         &cmd->tgt_dev->tgt_dev_flags))) {
1507                 if (scst_is_ua_command(cmd)) 
1508                 {
1509                         rc = scst_set_pending_UA(cmd);
1510                         if (rc == 0)
1511                                 goto out_complete;
1512                 }
1513         }
1514
1515         res = 0;
1516
1517 out:
1518         TRACE_EXIT_RES(res);
1519         return res;
1520
1521 out_complete:
1522         res = 1;
1523         cmd->completed = 1;
1524         goto out;
1525
1526 out_uncomplete:
1527         res = -1;
1528         goto out;
1529 }
1530
1531 /* 
1532  * The result of cmd execution, if any, should be reported 
1533  * via scst_cmd_done_local() 
1534  */
1535 static int scst_pre_exec(struct scst_cmd *cmd)
1536 {
1537         int res = SCST_EXEC_NOT_COMPLETED;
1538         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1539
1540         TRACE_ENTRY();
1541
1542         /* Check READ_ONLY device status */
1543         if (tgt_dev->acg_dev->rd_only_flag &&
1544             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1545              cmd->cdb[0] == WRITE_10 ||
1546              cmd->cdb[0] == WRITE_12 ||
1547              cmd->cdb[0] == WRITE_16 ||
1548              cmd->cdb[0] == WRITE_VERIFY ||
1549              cmd->cdb[0] == WRITE_VERIFY_12 ||
1550              cmd->cdb[0] == WRITE_VERIFY_16 ||
1551              (cmd->dev->handler->type == TYPE_TAPE &&
1552               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1553         {
1554                 scst_set_cmd_error(cmd,
1555                            SCST_LOAD_SENSE(scst_sense_data_protect));
1556                 goto out_done;
1557         }
1558
1559 out:
1560         TRACE_EXIT_RES(res);
1561         return res;
1562
1563 out_done:
1564         res = SCST_EXEC_COMPLETED;
1565         cmd->completed = 1;
1566         /* Report the result */
1567         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1568         goto out;
1569 }
1570
1571 /* 
1572  * The result of cmd execution, if any, should be reported 
1573  * via scst_cmd_done_local() 
1574  */
1575 static inline int scst_local_exec(struct scst_cmd *cmd)
1576 {
1577         int res = SCST_EXEC_NOT_COMPLETED;
1578
1579         TRACE_ENTRY();
1580
1581         /*
1582          * Adding new commands here don't forget to update
1583          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1584          */
1585
1586         switch (cmd->cdb[0]) {
1587         case MODE_SELECT:
1588         case MODE_SELECT_10:
1589         case LOG_SELECT:
1590                 res = scst_pre_select(cmd);
1591                 break;
1592         case RESERVE:
1593         case RESERVE_10:
1594                 res = scst_reserve_local(cmd);
1595                 break;
1596         case RELEASE:
1597         case RELEASE_10:
1598                 res = scst_release_local(cmd);
1599                 break;
1600         case REPORT_LUNS:
1601                 res = scst_report_luns_local(cmd);
1602                 break;
1603         }
1604
1605         TRACE_EXIT_RES(res);
1606         return res;
1607 }
1608
1609 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1610 {
1611         int rc = SCST_EXEC_NOT_COMPLETED;
1612
1613         TRACE_ENTRY();
1614
1615         /* Check here to let an out of SN cmd be queued w/o context switch */
1616         if (scst_cmd_atomic(cmd) && !cmd->dev->handler->exec_atomic) {
1617                 TRACE_DBG("Dev handler %s exec() can not be "
1618                       "called in atomic context, rescheduling to the thread",
1619                       cmd->dev->handler->name);
1620                 rc = SCST_EXEC_NEED_THREAD;
1621                 goto out;
1622         }
1623
1624         cmd->sent_to_midlev = 1;
1625         cmd->state = SCST_CMD_STATE_EXECUTING;
1626         cmd->scst_cmd_done = scst_cmd_done_local;
1627
1628         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1629         smp_mb__after_set_bit();
1630
1631         rc = scst_pre_exec(cmd);
1632         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1633         if (rc != SCST_EXEC_NOT_COMPLETED) {
1634                 if (rc == SCST_EXEC_COMPLETED)
1635                         goto out;
1636                 else if (rc == SCST_EXEC_NEED_THREAD)
1637                         goto out_clear;
1638                 else
1639                         goto out_rc_error;
1640         }
1641
1642         rc = scst_local_exec(cmd);
1643         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1644         if (rc != SCST_EXEC_NOT_COMPLETED) {
1645                 if (rc == SCST_EXEC_COMPLETED)
1646                         goto out;
1647                 else if (rc == SCST_EXEC_NEED_THREAD)
1648                         goto out_clear;
1649                 else
1650                         goto out_rc_error;
1651         }
1652
1653         if (cmd->dev->handler->exec) {
1654                 struct scst_device *dev = cmd->dev;
1655                 TRACE_DBG("Calling dev handler %s exec(%p)",
1656                       dev->handler->name, cmd);
1657                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1658                 cmd->scst_cmd_done = scst_cmd_done_local;
1659                 rc = dev->handler->exec(cmd);
1660                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1661                 TRACE_DBG("Dev handler %s exec() returned %d",
1662                       dev->handler->name, rc);
1663                 if (rc == SCST_EXEC_COMPLETED)
1664                         goto out;
1665                 else if (rc == SCST_EXEC_NEED_THREAD)
1666                         goto out_clear;
1667                 else if (rc != SCST_EXEC_NOT_COMPLETED)
1668                         goto out_rc_error;
1669         }
1670
1671         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1672         
1673         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1674                 PRINT_ERROR_PR("Command for virtual device must be "
1675                         "processed by device handler (lun %Ld)!",
1676                         (uint64_t)cmd->lun);
1677                 goto out_error;
1678         }
1679
1680         rc = scst_check_local_events(cmd);
1681         if (unlikely(rc != 0))
1682                 goto out_done;
1683
1684 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1685         if (unlikely(scst_alloc_request(cmd) != 0)) {
1686                 if (scst_cmd_atomic(cmd)) {
1687                         rc = SCST_EXEC_NEED_THREAD;
1688                         goto out_clear;
1689                 } else {
1690                         PRINT_INFO_PR("%s", "Unable to allocate request, "
1691                                 "sending BUSY status");
1692                         goto out_busy;
1693                 }
1694         }
1695         
1696         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1697                     (void *)cmd->scsi_req->sr_buffer,
1698                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1699                     cmd->retries);
1700 #else
1701         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1702                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1703                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1704                         scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1705         if (unlikely(rc != 0)) {
1706                 if (scst_cmd_atomic(cmd)) {
1707                         rc = SCST_EXEC_NEED_THREAD;
1708                         goto out_clear;
1709                 } else {
1710                         PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1711                         goto out_error;
1712                 }
1713         }
1714 #endif
1715
1716         rc = SCST_EXEC_COMPLETED;
1717
1718 out:
1719         TRACE_EXIT();
1720         return rc;
1721
1722 out_clear:
1723         /* Restore the state */
1724         cmd->sent_to_midlev = 0;
1725         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1726         goto out;
1727
1728 out_rc_error:
1729         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1730                     "invalid code %d", cmd->dev->handler->name, rc);
1731         /* go through */
1732
1733 out_error:
1734         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1735
1736 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1737 out_busy:
1738         scst_set_busy(cmd);
1739         cmd->completed = 1;
1740         /* go through */
1741 #endif
1742
1743 out_done:
1744         rc = SCST_EXEC_COMPLETED;
1745         /* Report the result. The cmd is not completed */
1746         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1747         goto out;
1748 }
1749
1750 /* No locks */
1751 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1752 {
1753         if (slot == NULL)
1754                 goto inc;
1755
1756         /* Optimized for lockless fast path */
1757
1758         TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1759                 atomic_read(slot));
1760
1761         if (!atomic_dec_and_test(slot))
1762                 goto out;
1763
1764         TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1765                 tgt_dev->num_free_sn_slots);
1766         if (tgt_dev->num_free_sn_slots != ARRAY_SIZE(tgt_dev->sn_slots)) {
1767                 spin_lock_irq(&tgt_dev->sn_lock);
1768                 if (tgt_dev->num_free_sn_slots != ARRAY_SIZE(tgt_dev->sn_slots)) {
1769                         tgt_dev->num_free_sn_slots++;
1770                         TRACE_SN("Incremented num_free_sn_slots (%d)",
1771                                 tgt_dev->num_free_sn_slots);
1772                         if (tgt_dev->num_free_sn_slots == 0)
1773                                 tgt_dev->cur_sn_slot = slot;
1774                 }
1775                 spin_unlock_irq(&tgt_dev->sn_lock);
1776         }
1777
1778 inc:
1779         /*
1780          * No locks is needed, because only one thread at time can 
1781          * be here (serialized by sn). Also it is supposed that there
1782          * could not be half-incremented halves.
1783          */
1784         tgt_dev->expected_sn++;
1785         smp_mb(); /* write must be before def_cmd_count read */
1786         TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1787
1788 out:
1789         return;
1790 }
1791
1792 static int scst_send_to_midlev(struct scst_cmd *cmd)
1793 {
1794         int res, rc;
1795         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1796         struct scst_device *dev = cmd->dev;
1797         typeof(tgt_dev->expected_sn) expected_sn;
1798         int count;
1799
1800         TRACE_ENTRY();
1801
1802         res = SCST_CMD_STATE_RES_CONT_NEXT;
1803
1804         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1805                 goto out;
1806
1807         __scst_get(0); /* protect dev & tgt_dev */
1808
1809         if (unlikely(cmd->internal || cmd->retry)) {
1810                 rc = scst_do_send_to_midlev(cmd);
1811                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1812                 if (rc == SCST_EXEC_NEED_THREAD) {
1813                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1814                               "thread context, rescheduling");
1815                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1816                         scst_dec_on_dev_cmd(cmd);
1817                         goto out_put;
1818                 } else {
1819                         sBUG_ON(rc != SCST_EXEC_COMPLETED);
1820                         goto out_unplug;
1821                 }
1822         }
1823
1824         if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1825                 goto exec;
1826
1827         sBUG_ON(!cmd->sn_set);
1828
1829         expected_sn = tgt_dev->expected_sn;
1830         /* Optimized for lockless fast path */
1831         if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
1832                 spin_lock_irq(&tgt_dev->sn_lock);
1833                 tgt_dev->def_cmd_count++;
1834                 smp_mb();
1835                 barrier(); /* to reread expected_sn & hq_cmd_count */
1836                 expected_sn = tgt_dev->expected_sn;
1837                 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
1838                         /* We are under IRQ lock, but dev->dev_lock is BH one */
1839                         int cmd_blocking = scst_pre_dec_on_dev_cmd(cmd);
1840                         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1841                                 /* Necessary to allow aborting out of sn cmds */
1842                                 TRACE_MGMT_DBG("Aborting out of sn cmd %p (tag %llu)",
1843                                         cmd, cmd->tag);
1844                                 tgt_dev->def_cmd_count--;
1845                                 cmd->state = SCST_CMD_STATE_DEV_DONE;
1846                                 res = SCST_CMD_STATE_RES_CONT_SAME;
1847                         } else {
1848                                 TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
1849                                         "expected_sn=%ld)", cmd, cmd->sn,
1850                                         cmd->sn_set, expected_sn);
1851                                 list_add_tail(&cmd->sn_cmd_list_entry,
1852                                               &tgt_dev->deferred_cmd_list);
1853                         }
1854                         spin_unlock_irq(&tgt_dev->sn_lock);
1855                         /* !! At this point cmd can be already freed !! */
1856                         __scst_dec_on_dev_cmd(dev, cmd_blocking);
1857                         goto out_put;
1858                 } else {
1859                         TRACE_SN("Somebody incremented expected_sn %ld, "
1860                                 "continuing", expected_sn);
1861                         tgt_dev->def_cmd_count--;
1862                         spin_unlock_irq(&tgt_dev->sn_lock);
1863                 }
1864         }
1865
1866 exec:
1867         count = 0;
1868         while(1) {
1869                 atomic_t *slot = cmd->sn_slot;
1870                 int inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1871                                       cmd->sn_set;
1872                 rc = scst_do_send_to_midlev(cmd);
1873                 if (rc == SCST_EXEC_NEED_THREAD) {
1874                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1875                               "thread context, rescheduling");
1876                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1877                         scst_dec_on_dev_cmd(cmd);
1878                         if (count != 0)
1879                                 goto out_unplug;
1880                         else
1881                                 goto out_put;
1882                 }
1883                 sBUG_ON(rc != SCST_EXEC_COMPLETED);
1884                 /* !! At this point cmd can be already freed !! */
1885                 count++;
1886                 if (inc_expected_sn)
1887                         scst_inc_expected_sn(tgt_dev, slot);
1888                 cmd = scst_check_deferred_commands(tgt_dev);
1889                 if (cmd == NULL)
1890                         break;
1891                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1892                         break;
1893         }
1894
1895 out_unplug:
1896         if (dev->scsi_dev != NULL)
1897                 generic_unplug_device(dev->scsi_dev->request_queue);
1898
1899 out_put:
1900         __scst_put();
1901         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1902
1903 out:
1904         TRACE_EXIT_HRES(res);
1905         return res;
1906 }
1907
1908 /* No locks supposed to be held */
1909 static int scst_check_sense(struct scst_cmd *cmd)
1910 {
1911         int res = 0;
1912         int sense_valid;
1913         struct scst_device *dev = cmd->dev;
1914         int dbl_ua_possible, ua_sent = 0;
1915
1916         TRACE_ENTRY();
1917
1918         /* If we had a internal bus reset behind us, set the command error UA */
1919         if ((dev->scsi_dev != NULL) &&
1920             unlikely(cmd->host_status == DID_RESET) &&
1921             scst_is_ua_command(cmd))
1922         {
1923                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
1924                       dev->scsi_dev->was_reset, cmd->host_status);
1925                 scst_set_cmd_error(cmd,
1926                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1927                 /* just in case */
1928                 cmd->ua_ignore = 0;
1929                 /* It looks like it is safe to clear was_reset here */
1930                 dev->scsi_dev->was_reset = 0;
1931                 smp_mb();
1932         }
1933
1934         sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
1935
1936         dbl_ua_possible = dev->dev_double_ua_possible;
1937         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
1938         if (unlikely(dbl_ua_possible)) {
1939                 spin_lock_bh(&dev->dev_lock);
1940                 barrier(); /* to reread dev_double_ua_possible */
1941                 dbl_ua_possible = dev->dev_double_ua_possible;
1942                 if (dbl_ua_possible)
1943                         ua_sent = dev->dev_reset_ua_sent;
1944                 else
1945                         spin_unlock_bh(&dev->dev_lock);
1946         }
1947
1948         if (unlikely(sense_valid)) {
1949                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
1950                         sizeof(cmd->sense_buffer));
1951                 /* Check Unit Attention Sense Key */
1952                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
1953                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
1954                                 if (dbl_ua_possible) {
1955                                         if (ua_sent) {
1956                                                 TRACE(TRACE_MGMT, "%s", 
1957                                                         "Double UA detected");
1958                                                 /* Do retry */
1959                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
1960                                                         "(tag %llu)", cmd, cmd->tag);
1961                                                 cmd->status = 0;
1962                                                 cmd->msg_status = 0;
1963                                                 cmd->host_status = DID_OK;
1964                                                 cmd->driver_status = 0;
1965                                                 memset(cmd->sense_buffer, 0,
1966                                                         sizeof(cmd->sense_buffer));
1967                                                 cmd->retry = 1;
1968                                                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1969                                                 res = 1;
1970                                                 /* 
1971                                                  * Dev is still blocked by this cmd, so
1972                                                  * it's OK to clear SCST_DEV_SERIALIZED
1973                                                  * here.
1974                                                  */
1975                                                 dev->dev_double_ua_possible = 0;
1976                                                 dev->dev_serialized = 0;
1977                                                 dev->dev_reset_ua_sent = 0;
1978                                                 goto out_unlock;
1979                                         } else
1980                                                 dev->dev_reset_ua_sent = 1;
1981                                 }
1982                         }
1983                         if (cmd->ua_ignore == 0) {
1984                                 if (unlikely(dbl_ua_possible)) {
1985                                         __scst_process_UA(dev, cmd,
1986                                                 cmd->sense_buffer,
1987                                                 sizeof(cmd->sense_buffer), 0);
1988                                 } else {
1989                                         scst_process_UA(dev, cmd,
1990                                                 cmd->sense_buffer,
1991                                                 sizeof(cmd->sense_buffer), 0);
1992                                 }
1993                         }
1994                 }
1995         }
1996
1997         if (unlikely(dbl_ua_possible)) {
1998                 if (ua_sent && scst_is_ua_command(cmd)) {
1999                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
2000                         dev->dev_double_ua_possible = 0;
2001                         dev->dev_serialized = 0;
2002                         dev->dev_reset_ua_sent = 0;
2003                 }
2004                 spin_unlock_bh(&dev->dev_lock);
2005         }
2006
2007 out:
2008         TRACE_EXIT_RES(res);
2009         return res;
2010
2011 out_unlock:
2012         spin_unlock_bh(&dev->dev_lock);
2013         goto out;
2014 }
2015
2016 static int scst_check_auto_sense(struct scst_cmd *cmd)
2017 {
2018         int res = 0;
2019
2020         TRACE_ENTRY();
2021
2022         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2023             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
2024              SCST_NO_SENSE(cmd->sense_buffer)))
2025         {
2026                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2027                       "cmd->status=%x, cmd->msg_status=%x, "
2028                       "cmd->host_status=%x, cmd->driver_status=%x", cmd->status,
2029                       cmd->msg_status, cmd->host_status, cmd->driver_status);
2030                 res = 1;
2031         } else if (unlikely(cmd->host_status)) {
2032                 if ((cmd->host_status == DID_REQUEUE) ||
2033                     (cmd->host_status == DID_IMM_RETRY) ||
2034                     (cmd->host_status == DID_SOFT_ERROR)) {
2035                         scst_set_busy(cmd);
2036                 } else {
2037                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2038                                 "received, returning HARDWARE ERROR instead",
2039                                 cmd->host_status);
2040                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2041                 }
2042         }
2043
2044         TRACE_EXIT_RES(res);
2045         return res;
2046 }
2047
2048 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
2049 {
2050         int res = 0, rc;
2051         unsigned char type;
2052
2053         TRACE_ENTRY();
2054
2055         if (unlikely(cmd->cdb[0] == REQUEST_SENSE)) {
2056                 if (cmd->internal)
2057                         cmd = scst_complete_request_sense(cmd);
2058         } else if (unlikely(scst_check_auto_sense(cmd))) {
2059                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
2060                             "without sense data (opcode 0x%x), issuing "
2061                             "REQUEST SENSE", cmd->cdb[0]);
2062                 rc = scst_prepare_request_sense(cmd);
2063                 if (rc > 0) {
2064                         *pres = rc;
2065                         res = 1;
2066                         goto out;
2067                 } else {
2068                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
2069                                     "returning HARDWARE ERROR");
2070                         scst_set_cmd_error(cmd,
2071                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
2072                 }
2073         } else if (scst_check_sense(cmd)) {
2074                 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2075                 res = 1;
2076                 goto out;
2077         }
2078
2079         type = cmd->dev->handler->type;
2080         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
2081             cmd->tgt_dev->acg_dev->rd_only_flag &&
2082             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
2083              type == TYPE_TAPE))
2084         {
2085                 int32_t length;
2086                 uint8_t *address;
2087
2088                 length = scst_get_buf_first(cmd, &address);
2089                 if (length <= 0)
2090                         goto out;
2091                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2092                         address[2] |= 0x80;   /* Write Protect*/
2093                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2094                         address[3] |= 0x80;   /* Write Protect*/
2095                 scst_put_buf(cmd, address);
2096         }
2097
2098         /* 
2099          * Check and clear NormACA option for the device, if necessary,
2100          * since we don't support ACA
2101          */
2102         if ((cmd->cdb[0] == INQUIRY) &&
2103             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
2104             (cmd->resp_data_len > SCST_INQ_BYTE3))
2105         {
2106                 uint8_t *buffer;
2107                 int buflen;
2108
2109                 /* ToDo: all pages ?? */
2110                 buflen = scst_get_buf_first(cmd, &buffer);
2111                 if (buflen > 0) {
2112                         if (buflen > SCST_INQ_BYTE3) {
2113 #ifdef EXTRACHECKS
2114                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2115                                         PRINT_INFO_PR("NormACA set for device: "
2116                                             "lun=%Ld, type 0x%02x", 
2117                                             (uint64_t)cmd->lun, buffer[0]);
2118                                 }
2119 #endif
2120                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2121                         } else
2122                                 scst_set_cmd_error(cmd,
2123                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
2124
2125                         scst_put_buf(cmd, buffer);
2126                 }
2127         }
2128
2129         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
2130                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
2131                                                 &cmd->tgt_dev->tgt_dev_flags)) {
2132                         struct scst_tgt_dev *tgt_dev_tmp;
2133                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2134                               (uint64_t)cmd->lun, cmd->status);
2135                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2136                                      sizeof(cmd->sense_buffer));
2137                         /* Clearing the reservation */
2138                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2139                                             dev_tgt_dev_list_entry) {
2140                                 clear_bit(SCST_TGT_DEV_RESERVED, 
2141                                         &tgt_dev_tmp->tgt_dev_flags);
2142                         }
2143                         cmd->dev->dev_reserved = 0;
2144                 }
2145         }
2146         
2147         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
2148                      (cmd->cdb[0] == MODE_SELECT_10) ||
2149                      (cmd->cdb[0] == LOG_SELECT)))
2150         {
2151                 if (cmd->status == 0) {
2152                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2153                                 "setting the SELECT UA (lun=%Ld)", 
2154                                 (uint64_t)cmd->lun);
2155                         spin_lock_bh(&scst_temp_UA_lock);
2156                         if (cmd->cdb[0] == LOG_SELECT) {
2157                                 scst_set_sense(scst_temp_UA,
2158                                         sizeof(scst_temp_UA),
2159                                         UNIT_ATTENTION, 0x2a, 0x02);
2160                         } else {
2161                                 scst_set_sense(scst_temp_UA,
2162                                         sizeof(scst_temp_UA),
2163                                         UNIT_ATTENTION, 0x2a, 0x01);
2164                         }
2165                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2166                                 sizeof(scst_temp_UA), 1);
2167                         spin_unlock_bh(&scst_temp_UA_lock);
2168                 }
2169         }
2170
2171 out:
2172         TRACE_EXIT_RES(res);
2173         return res;
2174 }
2175
2176 static int scst_dev_done(struct scst_cmd *cmd)
2177 {
2178         int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2179         int state;
2180         int atomic = scst_cmd_atomic(cmd);
2181
2182         TRACE_ENTRY();
2183
2184         if (atomic && !cmd->dev->handler->dev_done_atomic) 
2185         {
2186                 TRACE_DBG("Dev handler %s dev_done() can not be "
2187                       "called in atomic context, rescheduling to the thread",
2188                       cmd->dev->handler->name);
2189                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2190                 goto out;
2191         }
2192
2193         rc = scst_done_cmd_check(cmd, &res);
2194
2195         if (cmd->needs_unblocking)
2196                 scst_unblock_dev_cmd(cmd);
2197
2198         if (unlikely(cmd->dec_on_dev_needed))
2199                 scst_dec_on_dev_cmd(cmd);
2200
2201         if (rc)
2202                 goto out;
2203
2204         state = SCST_CMD_STATE_XMIT_RESP;
2205         if (likely(!scst_is_cmd_local(cmd)) && 
2206             likely(cmd->dev->handler->dev_done != NULL))
2207         {
2208                 int rc;
2209                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2210                       cmd->dev->handler->name, cmd);
2211                 rc = cmd->dev->handler->dev_done(cmd);
2212                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2213                       cmd->dev->handler->name, rc);
2214                 if (rc != SCST_CMD_STATE_DEFAULT)
2215                         state = rc;
2216         }
2217
2218         switch (state) {
2219         case SCST_CMD_STATE_XMIT_RESP:
2220         case SCST_CMD_STATE_DEV_PARSE:
2221         case SCST_CMD_STATE_PREPARE_SPACE:
2222         case SCST_CMD_STATE_RDY_TO_XFER:
2223         case SCST_CMD_STATE_PRE_EXEC:
2224         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2225         case SCST_CMD_STATE_DEV_DONE:
2226         case SCST_CMD_STATE_FINISHED:
2227                 cmd->state = state;
2228                 res = SCST_CMD_STATE_RES_CONT_SAME;
2229                 break;
2230
2231         case SCST_CMD_STATE_NEED_THREAD_CTX:
2232                 TRACE_DBG("Dev handler %s dev_done() requested "
2233                       "thread context, rescheduling",
2234                       cmd->dev->handler->name);
2235                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2236                 break;
2237
2238         default:
2239                 if (state >= 0) {
2240                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2241                                 "invalid cmd state %d", 
2242                                 cmd->dev->handler->name, state);
2243                 } else {
2244                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2245                                 "error %d", cmd->dev->handler->name, 
2246                                 state);
2247                 }
2248                 scst_set_cmd_error(cmd,
2249                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2250                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2251                 res = SCST_CMD_STATE_RES_CONT_SAME;
2252                 break;
2253         }
2254
2255 out:
2256         TRACE_EXIT_HRES(res);
2257         return res;
2258 }
2259
2260 static int scst_xmit_response(struct scst_cmd *cmd)
2261 {
2262         int res, rc;
2263         int atomic = scst_cmd_atomic(cmd);
2264
2265         TRACE_ENTRY();
2266
2267         /*
2268          * Check here also in order to avoid unnecessary delays of other
2269          * commands.
2270          */
2271         if (cmd->tgt_dev != NULL) {
2272                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)) {
2273                         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2274
2275                         spin_lock_irq(&tgt_dev->sn_lock);
2276                         tgt_dev->hq_cmd_count--;
2277                         spin_unlock_irq(&tgt_dev->sn_lock);
2278
2279                         EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
2280
2281                         /*
2282                          * There is no problem in checking hq_cmd_count in the
2283                          * non-locked state. In the worst case we will only have
2284                          * unneeded run of the deferred commands.
2285                          */
2286                         if (tgt_dev->hq_cmd_count == 0) {
2287                                 struct scst_cmd *c =
2288                                         scst_check_deferred_commands(tgt_dev);
2289                                 if (c != NULL) {
2290                                         spin_lock_irq(&c->cmd_lists->cmd_list_lock);
2291                                         TRACE_SN("Adding cmd %p to active cmd list", c);
2292                                         list_add_tail(&c->cmd_list_entry,
2293                                                 &c->cmd_lists->active_cmd_list);
2294                                         wake_up(&c->cmd_lists->cmd_list_waitQ);
2295                                         spin_unlock_irq(&c->cmd_lists->cmd_list_lock);
2296                                 }
2297                         }
2298                 }
2299
2300                 if (unlikely(!cmd->sent_to_midlev)) {
2301                         TRACE_SN("cmd %p was not sent to mid-lev (sn %ld, set %d)",
2302                                 cmd, cmd->sn, cmd->sn_set);
2303                         scst_unblock_deferred(cmd->tgt_dev, cmd);
2304                         cmd->sent_to_midlev = 1;
2305                 }
2306         }
2307
2308         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2309                 TRACE_DBG("%s", "xmit_response() can not be "
2310                       "called in atomic context, rescheduling to the thread");
2311                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2312                 goto out;
2313         }
2314
2315         /*
2316          * If we don't remove cmd from the search list here, before
2317          * submitting it for transmittion, we will have a race, when for
2318          * some reason cmd's release is delayed after transmittion and
2319          * initiator sends cmd with the same tag => it is possible that
2320          * a wrong cmd will be found by find() functions.
2321          */
2322         spin_lock_irq(&cmd->sess->sess_list_lock);
2323         list_del(&cmd->search_cmd_list_entry);
2324         spin_unlock_irq(&cmd->sess->sess_list_lock);
2325
2326         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2327         smp_mb__after_set_bit();
2328
2329         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2330                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2331                         if (cmd->completed) {
2332                                 /* It's completed and it's OK to return its result */
2333                                 clear_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2334                                 clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2335                         } else {
2336                                 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd "
2337                                         "%p (tag %llu), returning TASK ABORTED",
2338                                         cmd, cmd->tag);
2339                                 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2340                         }
2341                 }
2342         }
2343
2344         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2345                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu), skipping",
2346                         cmd, cmd->tag);
2347                 cmd->state = SCST_CMD_STATE_FINISHED;
2348                 res = SCST_CMD_STATE_RES_CONT_SAME;
2349                 goto out;
2350         }
2351
2352 #ifdef DEBUG_TM
2353         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2354                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2355                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2356                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2357                         goto out;
2358                 }
2359                 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2360                         cmd, cmd->tag);
2361                 schedule_timeout_uninterruptible(HZ);
2362         }
2363 #endif
2364
2365         while (1) {
2366                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2367
2368                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2369                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2370
2371                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2372
2373 #if defined(DEBUG) || defined(TRACING)
2374                 if (cmd->sg) {
2375                         int i;
2376                         struct scatterlist *sg = cmd->sg;
2377                         TRACE(TRACE_SEND_BOT,
2378                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2379                               cmd->sg_cnt, sg, (void*)sg[0].page);
2380                         for(i = 0; i < cmd->sg_cnt; ++i) {
2381                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2382                                     "Xmitting sg", page_address(sg[i].page),
2383                                     sg[i].length);
2384                         }
2385                 }
2386 #endif
2387
2388 #ifdef DEBUG_RETRY
2389                 if (((scst_random() % 100) == 77))
2390                         rc = SCST_TGT_RES_QUEUE_FULL;
2391                 else
2392 #endif
2393                         rc = cmd->tgtt->xmit_response(cmd);
2394                 TRACE_DBG("xmit_response() returned %d", rc);
2395
2396                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2397                         goto out;
2398
2399                 /* Restore the previous state */
2400                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2401
2402                 switch (rc) {
2403                 case SCST_TGT_RES_QUEUE_FULL:
2404                 {
2405                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2406                                 break;
2407                         else
2408                                 continue;
2409                 }
2410
2411                 case SCST_TGT_RES_NEED_THREAD_CTX:
2412                 {
2413                         TRACE_DBG("Target driver %s xmit_response() "
2414                               "requested thread context, rescheduling",
2415                               cmd->tgtt->name);
2416                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2417                         break;
2418                 }
2419
2420                 default:
2421                         goto out_error;
2422                 }
2423                 break;
2424         }
2425
2426 out:
2427         /* Caution: cmd can be already dead here */
2428         TRACE_EXIT_HRES(res);
2429         return res;
2430
2431 out_error:
2432         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2433                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2434                         "fatal error", cmd->tgtt->name);
2435         } else {
2436                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2437                         "invalid value %d", cmd->tgtt->name, rc);
2438         }
2439         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2440         cmd->state = SCST_CMD_STATE_FINISHED;
2441         res = SCST_CMD_STATE_RES_CONT_SAME;
2442         goto out;
2443 }
2444
2445 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2446 {
2447         TRACE_ENTRY();
2448
2449         sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2450
2451         cmd->state = SCST_CMD_STATE_FINISHED;
2452         scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2453
2454         TRACE_EXIT();
2455         return;
2456 }
2457
2458 static int scst_finish_cmd(struct scst_cmd *cmd)
2459 {
2460         int res;
2461
2462         TRACE_ENTRY();
2463
2464         atomic_dec(&cmd->sess->sess_cmd_count);
2465
2466         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2467                 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
2468                         "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
2469                         atomic_read(&scst_cmd_count));
2470         }
2471
2472         scst_cmd_put(cmd);
2473
2474         res = SCST_CMD_STATE_RES_CONT_NEXT;
2475
2476         TRACE_EXIT_HRES(res);
2477         return res;
2478 }
2479
2480 /*
2481  * No locks, but it must be externally serialized (see comment for
2482  * scst_cmd_init_done() in scsi_tgt.h)
2483  */
2484 static void scst_cmd_set_sn(struct scst_cmd *cmd)
2485 {
2486         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2487         unsigned long flags;
2488
2489         if (scst_is_implicit_hq(cmd)) {
2490                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "Implicit HQ cmd %p", cmd);
2491                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2492         }
2493
2494         /* Optimized for lockless fast path */
2495
2496         scst_check_debug_sn(cmd);
2497
2498         switch(cmd->queue_type) {
2499         case SCST_CMD_QUEUE_SIMPLE:
2500         case SCST_CMD_QUEUE_UNTAGGED:
2501                 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
2502                         if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
2503                                 tgt_dev->curr_sn++;
2504                                 TRACE_SN("Incremented curr_sn %ld",
2505                                         tgt_dev->curr_sn);
2506                         }
2507                         cmd->sn_slot = tgt_dev->cur_sn_slot;
2508                         cmd->sn = tgt_dev->curr_sn;
2509                         
2510                         tgt_dev->prev_cmd_ordered = 0;
2511                 } else {
2512                         TRACE(TRACE_MINOR, "%s", "Not enough SN slots");
2513                         goto ordered;
2514                 }
2515                 break;
2516
2517         case SCST_CMD_QUEUE_ORDERED:
2518                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "ORDERED cmd %p "
2519                         "(op %x)", cmd, cmd->cdb[0]);
2520 ordered:
2521                 if (!tgt_dev->prev_cmd_ordered) {
2522                         spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2523                         tgt_dev->num_free_sn_slots--;
2524                         smp_mb();
2525                         if ((tgt_dev->num_free_sn_slots >= 0) &&
2526                             (atomic_read(tgt_dev->cur_sn_slot) > 0)) {
2527                                 do {
2528                                         tgt_dev->cur_sn_slot++;
2529                                         if (tgt_dev->cur_sn_slot == 
2530                                                 tgt_dev->sn_slots +
2531                                                 ARRAY_SIZE(tgt_dev->sn_slots))
2532                                             tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
2533                                 } while(atomic_read(tgt_dev->cur_sn_slot) != 0);
2534                                 TRACE_SN("New cur SN slot %zd",
2535                                         tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2536                         } else
2537                                 tgt_dev->num_free_sn_slots++;
2538                         spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2539                 }
2540                 tgt_dev->prev_cmd_ordered = 1;
2541                 tgt_dev->curr_sn++;
2542                 cmd->sn = tgt_dev->curr_sn;
2543                 break;
2544
2545         case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
2546                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "HQ cmd %p "
2547                         "(op %x)", cmd, cmd->cdb[0]);
2548                 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2549                 tgt_dev->hq_cmd_count++;
2550                 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2551                 goto out;
2552
2553         default:
2554                 PRINT_ERROR_PR("Unsupported queue type %d, treating it as "
2555                         "ORDERED", cmd->queue_type);
2556                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2557                 goto ordered;
2558         }
2559
2560         TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
2561                 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
2562                 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
2563                 atomic_read(tgt_dev->cur_sn_slot), 
2564                 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
2565                 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2566
2567         cmd->sn_set = 1;
2568 out:
2569         return;
2570 }
2571
2572 /*
2573  * Returns 0 on success, > 0 when we need to wait for unblock,
2574  * < 0 if there is no device (lun) or device type handler.
2575  *
2576  * No locks, but might be on IRQ, protection is done by the
2577  * suspended activity.
2578  */
2579 static int scst_translate_lun(struct scst_cmd *cmd)
2580 {
2581         struct scst_tgt_dev *tgt_dev = NULL;
2582         int res;
2583
2584         TRACE_ENTRY();
2585
2586         __scst_get(1);
2587
2588         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2589                 struct list_head *sess_tgt_dev_list_head =
2590                         &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
2591                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2592                         (uint64_t)cmd->lun);
2593                 res = -1;
2594                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
2595                                 sess_tgt_dev_list_entry) {
2596                         if (tgt_dev->lun == cmd->lun) {
2597                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2598
2599                                 if (unlikely(tgt_dev->dev->handler == NULL)) {
2600                                         PRINT_INFO_PR("Dev handler for device "
2601                                           "%Ld is NULL, the device will not be "
2602                                           "visible remotely", (uint64_t)cmd->lun);
2603                                         break;
2604                                 }
2605                                 
2606                                 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
2607                                 cmd->tgt_dev = tgt_dev;
2608                                 cmd->dev = tgt_dev->dev;
2609
2610                                 res = 0;
2611                                 break;
2612                         }
2613                 }
2614                 if (res != 0) {
2615                         TRACE(TRACE_MINOR, "tgt_dev for lun %Ld not found, command to "
2616                                 "unexisting LU?", (uint64_t)cmd->lun);
2617                         __scst_put();
2618                 }
2619         } else {
2620                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
2621                 __scst_put();
2622                 res = 1;
2623         }
2624
2625         TRACE_EXIT_RES(res);
2626         return res;
2627 }
2628
2629 /*
2630  * No locks, but might be on IRQ
2631  *
2632  * Returns 0 on success, > 0 when we need to wait for unblock,
2633  * < 0 if there is no device (lun) or device type handler.
2634  */
2635 static int __scst_init_cmd(struct scst_cmd *cmd)
2636 {
2637         int res = 0;
2638
2639         TRACE_ENTRY();
2640
2641         res = scst_translate_lun(cmd);
2642         if (likely(res == 0)) {
2643                 int cnt;
2644                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2645                 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
2646                 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
2647                         TRACE(TRACE_RETRY, "Too many pending commands (%d) in "
2648                                 "session, returning BUSY to initiator \"%s\"",
2649                                 cnt, (cmd->sess->initiator_name[0] == '\0') ?
2650                                   "Anonymous" : cmd->sess->initiator_name);
2651                         goto out_busy;
2652                 }
2653                 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
2654                 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
2655                         TRACE(TRACE_RETRY, "Too many pending device commands "
2656                                 "(%d), returning BUSY to initiator \"%s\"",
2657                                 cnt, (cmd->sess->initiator_name[0] == '\0') ?
2658                                   "Anonymous" : cmd->sess->initiator_name);
2659                         goto out_busy;
2660                 }
2661                 if (!cmd->set_sn_on_restart_cmd)
2662                         scst_cmd_set_sn(cmd);
2663         } else if (res < 0) {
2664                 TRACE_DBG("Finishing cmd %p", cmd);
2665                 scst_set_cmd_error(cmd,
2666                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2667                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2668         } else
2669                 goto out;
2670
2671 out:
2672         TRACE_EXIT_RES(res);
2673         return res;
2674
2675 out_busy:
2676         scst_set_busy(cmd);
2677         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2678         goto out;
2679 }
2680
2681 /* Called under scst_init_lock and IRQs disabled */
2682 static void scst_do_job_init(void)
2683 {
2684         struct scst_cmd *cmd;
2685         int susp;
2686
2687         TRACE_ENTRY();
2688
2689 restart:
2690         susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
2691         if (scst_init_poll_cnt > 0)
2692                 scst_init_poll_cnt--;
2693
2694         list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
2695                 int rc;
2696                 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
2697                         continue;
2698                 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2699                         spin_unlock_irq(&scst_init_lock);
2700                         rc = __scst_init_cmd(cmd);
2701                         spin_lock_irq(&scst_init_lock);
2702                         if (rc > 0) {
2703                                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, restarting");
2704                                 goto restart;
2705                         }
2706                 } else {
2707                         TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
2708                                 cmd, cmd->tag);
2709                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2710                 }
2711
2712                 /*
2713                  * Deleting cmd from init cmd list after __scst_init_cmd()
2714                  * is necessary to keep the check in scst_init_cmd() correct
2715                  * to preserve the commands order.
2716                  *
2717                  * We don't care about the race, when init cmd list is empty
2718                  * and one command detected that it just was not empty, so
2719                  * it's inserting to it, but another command at the same time
2720                  * seeing init cmd list empty and goes directly, because it
2721                  * could affect only commands from the same initiator to the
2722                  * same tgt_dev, but init_cmd_done() doesn't guarantee the order
2723                  * in case of simultaneous such calls anyway.
2724                  */
2725                 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
2726                 list_del(&cmd->cmd_list_entry);
2727                 spin_unlock(&scst_init_lock);
2728
2729                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2730                 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
2731                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2732                         list_add(&cmd->cmd_list_entry,
2733                                 &cmd->cmd_lists->active_cmd_list);
2734                 else
2735                         list_add_tail(&cmd->cmd_list_entry,
2736                                 &cmd->cmd_lists->active_cmd_list);
2737                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2738                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2739
2740                 spin_lock(&scst_init_lock);
2741                 goto restart;
2742         }
2743
2744         if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
2745                 goto restart;
2746
2747         TRACE_EXIT();
2748         return;
2749 }
2750
2751 static inline int test_init_cmd_list(void)
2752 {
2753         int res = (!list_empty(&scst_init_cmd_list) &&
2754                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2755                   unlikely(kthread_should_stop()) ||
2756                   (scst_init_poll_cnt > 0);
2757         return res;
2758 }
2759
2760 int scst_init_cmd_thread(void *arg)
2761 {
2762         TRACE_ENTRY();
2763
2764         current->flags |= PF_NOFREEZE;
2765
2766         spin_lock_irq(&scst_init_lock);
2767         while(!kthread_should_stop()) {
2768                 wait_queue_t wait;
2769                 init_waitqueue_entry(&wait, current);
2770
2771                 if (!test_init_cmd_list()) {
2772                         add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
2773                                                  &wait);
2774                         for (;;) {
2775                                 set_current_state(TASK_INTERRUPTIBLE);
2776                                 if (test_init_cmd_list())
2777                                         break;
2778                                 spin_unlock_irq(&scst_init_lock);
2779                                 schedule();
2780                                 spin_lock_irq(&scst_init_lock);
2781                         }
2782                         set_current_state(TASK_RUNNING);
2783                         remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
2784                 }
2785                 scst_do_job_init();
2786         }
2787         spin_unlock_irq(&scst_init_lock);
2788
2789         /*
2790          * If kthread_should_stop() is true, we are guaranteed to be
2791          * on the module unload, so scst_init_cmd_list must be empty.
2792          */
2793         sBUG_ON(!list_empty(&scst_init_cmd_list));
2794
2795         TRACE_EXIT();
2796         return 0;
2797 }
2798
2799 /* Called with no locks held */
2800 void scst_process_active_cmd(struct scst_cmd *cmd, int context)
2801 {
2802         int res;
2803
2804         TRACE_ENTRY();
2805
2806         EXTRACHECKS_BUG_ON(in_irq());
2807
2808         cmd->atomic = (context == SCST_CONTEXT_DIRECT_ATOMIC);
2809
2810         do {
2811                 switch (cmd->state) {
2812                 case SCST_CMD_STATE_DEV_PARSE:
2813                         res = scst_parse_cmd(cmd);
2814                         if ((res != SCST_CMD_STATE_RES_CONT_SAME) ||
2815                             (cmd->state != SCST_CMD_STATE_PREPARE_SPACE))
2816                                 break;
2817                         /* else go through */
2818
2819                 case SCST_CMD_STATE_PREPARE_SPACE:
2820                         res = scst_prepare_space(cmd);
2821                         break;
2822
2823                 case SCST_CMD_STATE_RDY_TO_XFER:
2824                         res = scst_rdy_to_xfer(cmd);
2825                         break;
2826
2827                 case SCST_CMD_STATE_PRE_EXEC:
2828                         res = scst_tgt_pre_exec(cmd);
2829                         if ((res != SCST_CMD_STATE_RES_CONT_SAME) ||
2830                             (cmd->state != SCST_CMD_STATE_SEND_TO_MIDLEV))
2831                                 break;
2832                         /* else go through */
2833
2834                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2835                         if (tm_dbg_check_cmd(cmd) != 0) {
2836                                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2837                                 TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
2838                                         "because of TM DBG delay", cmd,
2839                                         cmd->tag);
2840                                 break;
2841                         }
2842                         res = scst_send_to_midlev(cmd);
2843                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2844                         break;
2845
2846                 case SCST_CMD_STATE_DEV_DONE:
2847                         res = scst_dev_done(cmd);
2848                         if ((res != SCST_CMD_STATE_RES_CONT_SAME) ||
2849                             (cmd->state != SCST_CMD_STATE_XMIT_RESP))
2850                                 break;
2851                         /* else go through */
2852                         break;
2853
2854                 case SCST_CMD_STATE_XMIT_RESP:
2855                         res = scst_xmit_response(cmd);
2856                         break;
2857
2858                 case SCST_CMD_STATE_FINISHED:
2859                         res = scst_finish_cmd(cmd);
2860                         break;
2861
2862                 default:
2863                         PRINT_ERROR_PR("cmd (%p) in state %d, but shouldn't be",
2864                                cmd, cmd->state);
2865                         sBUG();
2866                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2867                         break;
2868                 }
2869         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2870
2871         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2872                 /* None */
2873         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2874                 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2875                 switch (cmd->state) {
2876                 case SCST_CMD_STATE_DEV_PARSE:
2877                 case SCST_CMD_STATE_PREPARE_SPACE:
2878                 case SCST_CMD_STATE_RDY_TO_XFER:
2879                 case SCST_CMD_STATE_PRE_EXEC:
2880                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2881                 case SCST_CMD_STATE_DEV_DONE:
2882                 case SCST_CMD_STATE_XMIT_RESP:
2883                 case SCST_CMD_STATE_FINISHED:
2884                         TRACE_DBG("Adding cmd %p to head of active cmd list", cmd);
2885                         list_add(&cmd->cmd_list_entry,
2886                                 &cmd->cmd_lists->active_cmd_list);
2887                         break;
2888 #ifdef EXTRACHECKS
2889                 /* not very valid commands */
2890                 case SCST_CMD_STATE_DEFAULT:
2891                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2892                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2893                                 "useful list (left on scst cmd list)", cmd, 
2894                                 cmd->state);
2895                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2896                         sBUG();
2897                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2898                         break;
2899 #endif
2900                 default:
2901                         break;
2902                 }
2903                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2904                 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2905         } else
2906                 sBUG();
2907
2908         TRACE_EXIT();
2909         return;
2910 }
2911
2912 /* Called under cmd_list_lock and IRQs disabled */
2913 static void scst_do_job_active(struct list_head *cmd_list,
2914         spinlock_t *cmd_list_lock, int context)
2915 {
2916         TRACE_ENTRY();
2917
2918 #ifdef EXTRACHECKS
2919         WARN_ON((context != SCST_CONTEXT_DIRECT_ATOMIC) && 
2920                 (context != SCST_CONTEXT_DIRECT));
2921 #endif
2922
2923         while (!list_empty(cmd_list)) {
2924                 struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
2925                                         cmd_list_entry);
2926                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
2927                 list_del(&cmd->cmd_list_entry);
2928                 spin_unlock_irq(cmd_list_lock);
2929                 scst_process_active_cmd(cmd, context);
2930                 spin_lock_irq(cmd_list_lock);
2931         }
2932
2933         TRACE_EXIT();
2934         return;
2935 }
2936
2937 static inline int test_cmd_lists(struct scst_cmd_lists *p_cmd_lists)
2938 {
2939         int res = !list_empty(&p_cmd_lists->active_cmd_list) ||
2940             unlikely(kthread_should_stop()) ||
2941             tm_dbg_is_release();
2942         return res;
2943 }
2944
2945 int scst_cmd_thread(void *arg)
2946 {
2947         struct scst_cmd_lists *p_cmd_lists = (struct scst_cmd_lists*)arg;
2948
2949         TRACE_ENTRY();
2950
2951 #if 0
2952         set_user_nice(current, 10);
2953 #endif
2954         current->flags |= PF_NOFREEZE;
2955
2956         spin_lock_irq(&p_cmd_lists->cmd_list_lock);
2957         while (!kthread_should_stop()) {
2958                 wait_queue_t wait;
2959                 init_waitqueue_entry(&wait, current);
2960
2961                 if (!test_cmd_lists(p_cmd_lists)) {
2962                         add_wait_queue_exclusive(&p_cmd_lists->cmd_list_waitQ,
2963                                 &wait);
2964                         for (;;) {
2965                                 set_current_state(TASK_INTERRUPTIBLE);
2966                                 if (test_cmd_lists(p_cmd_lists))
2967                                         break;
2968                                 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
2969                                 schedule();
2970                                 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
2971                         }
2972                         set_current_state(TASK_RUNNING);
2973                         remove_wait_queue(&p_cmd_lists->cmd_list_waitQ, &wait);
2974                 }
2975
2976                 if (tm_dbg_is_release()) {
2977                         spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
2978                         tm_dbg_check_released_cmds();
2979                         spin_lock_irq(&p_cmd_lists->cmd_list_lock);
2980                 }
2981
2982                 scst_do_job_active(&p_cmd_lists->active_cmd_list,
2983                         &p_cmd_lists->cmd_list_lock, SCST_CONTEXT_DIRECT);
2984         }
2985         spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
2986
2987 #ifdef EXTRACHECKS
2988         /*
2989          * If kthread_should_stop() is true, we are guaranteed to be either
2990          * on the module unload, or there must be at least one other thread to
2991          * process the commands lists.
2992          */
2993         if (p_cmd_lists == &scst_main_cmd_lists) {
2994                 sBUG_ON((scst_threads_info.nr_cmd_threads == 1) &&
2995                          !list_empty(&scst_main_cmd_lists.active_cmd_list));
2996         }
2997 #endif
2998
2999         TRACE_EXIT();
3000         return 0;
3001 }
3002
3003 void scst_cmd_tasklet(long p)
3004 {
3005         struct scst_tasklet *t = (struct scst_tasklet*)p;
3006
3007         TRACE_ENTRY();
3008
3009         spin_lock_irq(&t->tasklet_lock);
3010         scst_do_job_active(&t->tasklet_cmd_list, &t->tasklet_lock,
3011                 SCST_CONTEXT_DIRECT_ATOMIC);
3012         spin_unlock_irq(&t->tasklet_lock);
3013
3014         TRACE_EXIT();
3015         return;
3016 }
3017
3018 /*
3019  * Returns 0 on success, < 0 if there is no device handler or
3020  * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
3021  * No locks, protection is done by the suspended activity.
3022  */
3023 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
3024 {
3025         struct scst_tgt_dev *tgt_dev = NULL;
3026         struct list_head *sess_tgt_dev_list_head;
3027         int res = -1;
3028
3029         TRACE_ENTRY();
3030
3031         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
3032               (uint64_t)mcmd->lun);
3033
3034         __scst_get(1);
3035
3036         if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
3037                      !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
3038                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3039                 __scst_put();
3040                 res = 1;
3041                 goto out;
3042         }
3043
3044         sess_tgt_dev_list_head =
3045                 &mcmd->sess->sess_tgt_dev_list_hash[HASH_VAL(mcmd->lun)];
3046         list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3047                         sess_tgt_dev_list_entry) {
3048                 if (tgt_dev->lun == mcmd->lun) {
3049                         TRACE_DBG("tgt_dev %p found", tgt_dev);
3050                         mcmd->mcmd_tgt_dev = tgt_dev;
3051                         res = 0;
3052                         break;
3053                 }
3054         }
3055         if (mcmd->mcmd_tgt_dev == NULL)
3056                 __scst_put();
3057
3058 out:
3059         TRACE_EXIT_HRES(res);
3060         return res;
3061 }
3062
3063 /* No locks */
3064 void scst_complete_cmd_mgmt(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd)
3065 {
3066         TRACE_ENTRY();
3067
3068         spin_lock_irq(&scst_mcmd_lock);
3069
3070         TRACE_MGMT_DBG("cmd %p completed (tag %llu, mcmd %p, "
3071                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
3072                 mcmd->cmd_wait_count);
3073
3074         cmd->mgmt_cmnd = NULL;
3075
3076         if (cmd->completed)
3077                 mcmd->completed_cmd_count++;
3078
3079         mcmd->cmd_wait_count--;
3080         if (mcmd->cmd_wait_count > 0) {
3081                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
3082                         mcmd->cmd_wait_count);
3083                 goto out_unlock;
3084         }
3085
3086         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3087
3088         if (mcmd->completed) {
3089                 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list",
3090                         mcmd);
3091                 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3092                         &scst_active_mgmt_cmd_list);
3093         }
3094
3095         spin_unlock_irq(&scst_mcmd_lock);
3096
3097         wake_up(&scst_mgmt_cmd_list_waitQ);
3098
3099 out:
3100         TRACE_EXIT();
3101         return;
3102
3103 out_unlock:
3104         spin_unlock_irq(&scst_mcmd_lock);
3105         goto out;
3106 }
3107
3108 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
3109         struct scst_tgt_dev *tgt_dev, int set_status)
3110 {
3111         int res = SCST_DEV_TM_NOT_COMPLETED;
3112         struct scst_dev_type *h = tgt_dev->dev->handler;
3113
3114         if (h->task_mgmt_fn) {
3115                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
3116                         h->name, mcmd->fn);
3117                 EXTRACHECKS_BUG_ON(in_irq());
3118                 res = h->task_mgmt_fn(mcmd, tgt_dev);
3119                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
3120                       h->name, res);
3121                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
3122                         mcmd->status = res;
3123         }
3124         return res;
3125 }
3126
3127 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
3128 {
3129         switch(mgmt_fn) {
3130                 case SCST_ABORT_TASK:
3131                 case SCST_ABORT_TASK_SET:
3132                 case SCST_CLEAR_TASK_SET:
3133                         return 1;
3134                 default:
3135                         return 0;
3136         }
3137 }
3138
3139 /* 
3140  * Might be called under sess_list_lock and IRQ off + BHs also off
3141  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
3142  */
3143 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
3144         int other_ini, int call_dev_task_mgmt_fn)
3145 {
3146         TRACE_ENTRY();
3147
3148         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %llu)", cmd, cmd->tag);
3149
3150         if (other_ini) {
3151                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3152                 smp_mb__after_set_bit();
3153         }
3154         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
3155         smp_mb__after_set_bit();
3156
3157         if (cmd->tgt_dev == NULL) {
3158                 unsigned long flags;
3159                 spin_lock_irqsave(&scst_init_lock, flags);
3160                 scst_init_poll_cnt++;
3161                 spin_unlock_irqrestore(&scst_init_lock, flags);
3162                 wake_up(&scst_init_cmd_list_waitQ);
3163         }
3164
3165         if (call_dev_task_mgmt_fn && (cmd->tgt_dev != NULL)) {
3166                 EXTRACHECKS_BUG_ON(irqs_disabled());
3167                 scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 1);
3168         }
3169
3170         if (mcmd) {
3171                 unsigned long flags;
3172                 /*
3173                  * Delay the response until the command's finish in
3174                  * order to guarantee that "no further responses from
3175                  * the task are sent to the SCSI initiator port" after
3176                  * response from the TM function is sent (SAM). Plus,
3177                  * we must wait here to be sure that we won't receive
3178                  * double commands with the same tag.
3179                  */
3180                 TRACE(TRACE_MGMT, "cmd %p (tag %llu) being executed/"
3181                         "xmitted (state %d), deferring ABORT...", cmd,
3182                         cmd->tag, cmd->state);
3183 #ifdef EXTRACHECKS
3184                 if (cmd->mgmt_cmnd) {
3185                         printk(KERN_ALERT "cmd %p (tag %llu, state %d) "
3186                                 "has non-NULL mgmt_cmnd %p!!! Current "
3187                                 "mcmd %p\n", cmd, cmd->tag, cmd->state,
3188                                 cmd->mgmt_cmnd, mcmd);
3189                 }
3190 #endif
3191                 sBUG_ON(cmd->mgmt_cmnd);
3192                 spin_lock_irqsave(&scst_mcmd_lock, flags);
3193                 mcmd->cmd_wait_count++;
3194                 spin_unlock_irqrestore(&scst_mcmd_lock, flags);
3195                 /* cmd can't die here or sess_list_lock already taken */
3196                 cmd->mgmt_cmnd = mcmd;
3197         }
3198
3199         tm_dbg_release_cmd(cmd);
3200
3201         TRACE_EXIT();
3202         return;
3203 }
3204
3205 /* No locks */
3206 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
3207 {
3208         int res;
3209         spin_lock_irq(&scst_mcmd_lock);
3210         if (mcmd->cmd_wait_count != 0) {
3211                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
3212                         "wait", mcmd->cmd_wait_count);
3213                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
3214                 res = -1;
3215         } else {
3216                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3217                 res = 0;
3218         }
3219         mcmd->completed = 1;
3220         spin_unlock_irq(&scst_mcmd_lock);
3221         return res;
3222 }
3223
3224 static int __scst_check_unblock_aborted_cmd(struct scst_cmd *cmd)
3225 {
3226         int res;
3227         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3228                 TRACE_MGMT_DBG("Adding aborted blocked cmd %p to active cmd "
3229                         "list", cmd);
3230                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3231                 list_add_tail(&cmd->cmd_list_entry,
3232                         &cmd->cmd_lists->active_cmd_list);
3233                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3234                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3235                 res = 1;
3236         } else
3237                 res = 0;
3238         return res;
3239 }
3240
3241 static void scst_unblock_aborted_cmds(int scst_mutex_held)
3242 {
3243         struct scst_device *dev;
3244
3245         TRACE_ENTRY();
3246
3247         if (!scst_mutex_held)
3248                 mutex_lock(&scst_mutex);
3249
3250         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3251                 struct scst_cmd *cmd, *tcmd;
3252                 struct scst_tgt_dev *tgt_dev;
3253                 spin_lock_bh(&dev->dev_lock);
3254                 local_irq_disable();
3255                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3256                                         blocked_cmd_list_entry) {
3257                         if (__scst_check_unblock_aborted_cmd(cmd))
3258                                 list_del(&cmd->blocked_cmd_list_entry);
3259                 }
3260                 local_irq_enable();
3261                 spin_unlock_bh(&dev->dev_lock);
3262
3263                 local_irq_disable();
3264                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3265                                          dev_tgt_dev_list_entry) {
3266                         spin_lock(&tgt_dev->sn_lock);
3267                         list_for_each_entry_safe(cmd, tcmd,
3268                                         &tgt_dev->deferred_cmd_list,
3269                                         sn_cmd_list_entry) {
3270                                 if (__scst_check_unblock_aborted_cmd(cmd)) {
3271                                         TRACE_MGMT_DBG("Deleting aborted SN "
3272                                                 "cmd %p from SN list", cmd);
3273                                         tgt_dev->def_cmd_count--;
3274                                         list_del(&cmd->sn_cmd_list_entry);
3275                                 }
3276                         }
3277                         spin_unlock(&tgt_dev->sn_lock);
3278                 }
3279                 local_irq_enable();
3280         }
3281
3282         if (!scst_mutex_held)
3283                 mutex_unlock(&scst_mutex);
3284
3285         TRACE_EXIT();
3286         return;
3287 }
3288
3289 /* Returns 0 if the command processing should be continued, <0 otherwise */
3290 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
3291         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
3292 {
3293         struct scst_cmd *cmd;
3294         struct scst_session *sess = tgt_dev->sess;
3295
3296         TRACE_ENTRY();
3297
3298         spin_lock_irq(&sess->sess_list_lock);
3299
3300         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
3301         list_for_each_entry(cmd, &sess->search_cmd_list, 
3302                         search_cmd_list_entry) {
3303                 if ((cmd->tgt_dev == tgt_dev) ||
3304                     ((cmd->tgt_dev == NULL) && 
3305                      (cmd->lun == tgt_dev->lun))) {
3306                         if (mcmd->cmd_sn_set) {
3307                                 sBUG_ON(!cmd->tgt_sn_set);
3308                                 if (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
3309                                     (mcmd->cmd_sn == cmd->tgt_sn))
3310                                         continue;
3311                         }
3312                         scst_abort_cmd(cmd, mcmd, other_ini, 0);
3313                 }
3314         }
3315         spin_unlock_irq(&sess->sess_list_lock);
3316
3317         scst_unblock_aborted_cmds(scst_mutex_held);
3318
3319         TRACE_EXIT();
3320         return;
3321 }
3322
3323 /* Returns 0 if the command processing should be continued, <0 otherwise */
3324 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
3325 {
3326         int res;
3327         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3328         struct scst_device *dev = tgt_dev->dev;
3329
3330         TRACE(TRACE_MGMT, "Aborting task set (lun=%Ld, mcmd=%p)",
3331                 tgt_dev->lun, mcmd);
3332
3333         mcmd->needs_unblocking = 1;
3334
3335         spin_lock_bh(&dev->dev_lock);
3336         __scst_block_dev(dev);
3337         spin_unlock_bh(&dev->dev_lock);
3338
3339         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
3340         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3341
3342         res = scst_set_mcmd_next_state(mcmd);
3343
3344         TRACE_EXIT_RES(res);
3345         return res;
3346 }
3347
3348 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3349 {
3350         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags) && !mcmd->active) {
3351                 TRACE_MGMT_DBG("Adding mgmt cmd %p to delayed mgmt cmd list",
3352                         mcmd);
3353                 spin_lock_irq(&scst_mcmd_lock);
3354                 list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3355                         &scst_delayed_mgmt_cmd_list);
3356                 spin_unlock_irq(&scst_mcmd_lock);
3357                 return -1;
3358         } else {
3359                 mcmd->active = 1;
3360                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3361                 return 0;
3362         }
3363 }
3364
3365 /* Returns 0 if the command processing should be continued, 
3366  * >0, if it should be requeued, <0 otherwise */
3367 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3368 {
3369         int res = 0, rc;
3370
3371         TRACE_ENTRY();
3372
3373         res = scst_check_delay_mgmt_cmd(mcmd);
3374         if (res != 0)
3375                 goto out;
3376
3377         mcmd->state = SCST_MGMT_CMD_STATE_READY;
3378
3379         switch (mcmd->fn) {
3380         case SCST_ABORT_TASK:
3381         {
3382                 struct scst_session *sess = mcmd->sess;
3383                 struct scst_cmd *cmd;
3384
3385                 spin_lock_irq(&sess->sess_list_lock);
3386                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3387                 if (cmd == NULL) {
3388                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3389                                 "tag %llu not found", mcmd->tag);
3390                         mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
3391                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3392                         spin_unlock_irq(&sess->sess_list_lock);
3393                         goto out;
3394                 }
3395                 scst_cmd_get(cmd);
3396                 spin_unlock_irq(&sess->sess_list_lock);
3397                 TRACE(TRACE_MGMT, "Cmd %p for tag %llu (sn %ld, set %d, "
3398                         "queue_type %x) found, aborting it", cmd, mcmd->tag,
3399                         cmd->sn, cmd->sn_set, cmd->queue_type);
3400                 mcmd->cmd_to_abort = cmd;
3401                 if (mcmd->lun_set && (mcmd->lun != cmd->lun)) {
3402                         PRINT_ERROR_PR("ABORT TASK: LUN mismatch: mcmd LUN %Lx, "
3403                                 "cmd LUN %Lx, cmd tag %Lu", mcmd->lun, cmd->lun,
3404                                 mcmd->tag);
3405                         mcmd->status = SCST_MGMT_STATUS_REJECTED;
3406                 } else if (mcmd->cmd_sn_set && 
3407                            (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
3408                             (mcmd->cmd_sn == cmd->tgt_sn))) {
3409                         PRINT_ERROR_PR("ABORT TASK: SN mismatch: mcmd SN %x, "
3410                                 "cmd SN %x, cmd tag %Lu", mcmd->cmd_sn,
3411                                 cmd->tgt_sn, mcmd->tag);
3412                         mcmd->status = SCST_MGMT_STATUS_REJECTED;
3413                 } else {
3414                         scst_abort_cmd(cmd, mcmd, 0, 1);
3415                         scst_unblock_aborted_cmds(0);
3416                 }
3417                 res = scst_set_mcmd_next_state(mcmd);
3418                 mcmd->cmd_to_abort = NULL; /* just in case */
3419                 scst_cmd_put(cmd);
3420                 break;
3421         }
3422
3423         case SCST_TARGET_RESET:
3424         case SCST_ABORT_ALL_TASKS:
3425         case SCST_NEXUS_LOSS:
3426                 break;
3427
3428         default:
3429                 rc = scst_mgmt_translate_lun(mcmd);
3430                 if (rc < 0) {
3431                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3432                                 "found", (uint64_t)mcmd->lun);
3433                         mcmd->status = SCST_MGMT_STATUS_LUN_NOT_EXIST;
3434                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3435                 } else if (rc != 0)
3436                         res = rc;
3437                 break;
3438         }
3439
3440 out:
3441         TRACE_EXIT_RES(res);
3442         return res;
3443 }
3444
3445 /* Returns 0 if the command processing should be continued, <0 otherwise */
3446 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3447 {
3448         int res, rc;
3449         struct scst_device *dev, *d;
3450         struct scst_tgt_dev *tgt_dev;
3451         int cont, c;
3452         LIST_HEAD(host_devs);
3453
3454         TRACE_ENTRY();
3455
3456         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3457                 mcmd, atomic_read(&mcmd->sess->sess_cmd_count));
3458
3459         mcmd->needs_unblocking = 1;
3460
3461         mutex_lock(&scst_mutex);
3462
3463         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3464                 int found = 0;
3465
3466                 spin_lock_bh(&dev->dev_lock);
3467                 __scst_block_dev(dev);
3468                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3469                 spin_unlock_bh(&dev->dev_lock);
3470
3471                 cont = 0;
3472                 c = 0;
3473                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3474                         dev_tgt_dev_list_entry) 
3475                 {
3476                         cont = 1;
3477                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3478                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3479                                 c = 1;
3480                         else if ((rc < 0) &&
3481                                  (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
3482                                 mcmd->status = rc;
3483                 }
3484                 if (cont && !c)
3485                         continue;
3486                 
3487                 if (dev->scsi_dev == NULL)
3488                         continue;
3489
3490                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3491                         if (dev->scsi_dev->host->host_no ==
3492                                     d->scsi_dev->host->host_no) 
3493                         {
3494                                 found = 1;
3495                                 break;
3496                         }
3497                 }
3498                 if (!found)
3499                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3500         }
3501
3502         /*
3503          * We suppose here that for all commands that already on devices
3504          * on/after scsi_reset_provider() completion callbacks will be called.
3505          */
3506
3507         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3508                 /* dev->scsi_dev must be non-NULL here */
3509                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3510                       dev->scsi_dev->host->host_no);
3511                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3512                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3513                       dev->scsi_dev->host->host_no,
3514                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3515                 if ((rc != SUCCESS) &&
3516                     (mcmd->status == SCST_MGMT_STATUS_SUCCESS)) {
3517                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3518                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3519                 }
3520         }
3521
3522         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3523                 if (dev->scsi_dev != NULL)
3524                         dev->scsi_dev->was_reset = 0;
3525         }
3526
3527         mutex_unlock(&scst_mutex);
3528
3529         tm_dbg_task_mgmt("TARGET RESET", 0);
3530         res = scst_set_mcmd_next_state(mcmd);
3531
3532         TRACE_EXIT_RES(res);
3533         return res;
3534 }
3535
3536 /* Returns 0 if the command processing should be continued, <0 otherwise */
3537 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3538 {
3539         int res, rc;
3540         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3541         struct scst_device *dev = tgt_dev->dev;
3542
3543         TRACE_ENTRY();
3544
3545         TRACE(TRACE_MGMT, "Resetting lun %Ld (mcmd %p)", tgt_dev->lun, mcmd);
3546
3547         mcmd->needs_unblocking = 1;
3548