7e2524f0a61dc61011c490e4443a07cd985ae259
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28 #include <linux/kthread.h>
29
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static void scst_cmd_set_sn(struct scst_cmd *cmd);
34 static int __scst_init_cmd(struct scst_cmd *cmd);
35
36 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
37 {
38         struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
39         unsigned long flags;
40
41         spin_lock_irqsave(&t->tasklet_lock, flags);
42         TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
43                 smp_processor_id());
44         list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
45         spin_unlock_irqrestore(&t->tasklet_lock, flags);
46
47         tasklet_schedule(&t->tasklet);
48 }
49
50 /* 
51  * Must not been called in parallel with scst_unregister_session() for the 
52  * same sess
53  */
54 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
55                              const uint8_t *lun, int lun_len,
56                              const uint8_t *cdb, int cdb_len, int atomic)
57 {
58         struct scst_cmd *cmd;
59
60         TRACE_ENTRY();
61
62 #ifdef EXTRACHECKS
63         if (unlikely(sess->shutting_down)) {
64                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
65                 sBUG();
66         }
67 #endif
68
69         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
70         if (cmd == NULL)
71                 goto out;
72
73         cmd->sess = sess;
74         cmd->tgt = sess->tgt;
75         cmd->tgtt = sess->tgt->tgtt;
76         cmd->state = SCST_CMD_STATE_INIT_WAIT;
77
78         /* 
79          * For both wrong lun and CDB defer the error reporting for
80          * scst_cmd_init_done()
81          */
82
83         cmd->lun = scst_unpack_lun(lun, lun_len);
84
85         if (cdb_len <= SCST_MAX_CDB_SIZE) {
86                 memcpy(cmd->cdb, cdb, cdb_len);
87                 cmd->cdb_len = cdb_len;
88         }
89
90         TRACE_DBG("cmd %p, sess %p", cmd, sess);
91         scst_sess_get(sess);
92
93 out:
94         TRACE_EXIT();
95         return cmd;
96 }
97
98 static int scst_init_cmd(struct scst_cmd *cmd, int context)
99 {
100         int rc;
101
102         TRACE_ENTRY();
103
104         /* See the comment in scst_do_job_init() */
105         if (unlikely(!list_empty(&scst_init_cmd_list))) {
106                 TRACE_MGMT_DBG("%s", "init cmd list busy");
107                 goto out_redirect;
108         }
109         smp_rmb();
110
111         rc = __scst_init_cmd(cmd);
112         if (unlikely(rc > 0))
113                 goto out_redirect;
114         else if (unlikely(rc != 0))
115                 goto out;
116
117         /* Small context optimization */
118         if (((context == SCST_CONTEXT_TASKLET) ||
119              (context == SCST_CONTEXT_DIRECT_ATOMIC)) && 
120             scst_cmd_is_expected_set(cmd)) {
121                 if (cmd->expected_data_direction == SCST_DATA_WRITE) {
122                         if ( !test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
123                                         &cmd->tgt_dev->tgt_dev_flags))
124                                 context = SCST_CONTEXT_THREAD;
125                 } else {
126                         if ( !test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
127                                         &cmd->tgt_dev->tgt_dev_flags))
128                                 context = SCST_CONTEXT_THREAD;
129                 }
130         }
131
132 out:
133         TRACE_EXIT_RES(context);
134         return context;
135
136 out_redirect:
137         if (cmd->preprocessing_only) {
138                 /*
139                  * Poor man solution for single threaded targets, where 
140                  * blocking receiver at least sometimes means blocking all.
141                  */
142                 sBUG_ON(context != SCST_CONTEXT_DIRECT);
143                 scst_set_busy(cmd);
144                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
145         } else {
146                 unsigned long flags;
147                 spin_lock_irqsave(&scst_init_lock, flags);
148                 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
149                         "%d)", cmd, atomic_read(&scst_cmd_count));
150                 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
151                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
152                         scst_init_poll_cnt++;
153                 spin_unlock_irqrestore(&scst_init_lock, flags);
154                 wake_up(&scst_init_cmd_list_waitQ);
155                 context = -1;
156         }
157         goto out;
158 }
159
160 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
161 {
162         unsigned long flags;
163         struct scst_session *sess = cmd->sess;
164
165         TRACE_ENTRY();
166
167         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
168         TRACE(TRACE_SCSI, "tag=%lld, lun=%Ld, CDB len=%d", cmd->tag, 
169                 (uint64_t)cmd->lun, cmd->cdb_len);
170         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
171                 cmd->cdb, cmd->cdb_len);
172
173 #ifdef EXTRACHECKS
174         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
175                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
176         {
177                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
178                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
179                         cmd->tgtt->name);
180                 pref_context = SCST_CONTEXT_TASKLET;
181         }
182 #endif
183
184         atomic_inc(&sess->sess_cmd_count);
185
186         spin_lock_irqsave(&sess->sess_list_lock, flags);
187
188         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
189
190         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
191                 switch(sess->init_phase) {
192                 case SCST_SESS_IPH_SUCCESS:
193                         break;
194                 case SCST_SESS_IPH_INITING:
195                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
196                         list_add_tail(&cmd->cmd_list_entry, 
197                                 &sess->init_deferred_cmd_list);
198                         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
199                         goto out;
200                 case SCST_SESS_IPH_FAILED:
201                         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
202                         scst_set_busy(cmd);
203                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
204                         goto active;
205                 default:
206                         sBUG();
207                 }
208         }
209
210         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
211
212         if (unlikely(cmd->lun == (lun_t)-1)) {
213                 PRINT_ERROR_PR("Wrong LUN %d, finishing cmd", -1);
214                 scst_set_cmd_error(cmd,
215                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
216                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
217                 goto active;
218         }
219
220         if (unlikely(cmd->cdb_len == 0)) {
221                 PRINT_ERROR_PR("Wrong CDB len %d, finishing cmd", 0);
222                 scst_set_cmd_error(cmd,
223                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
224                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
225                 goto active;
226         }
227
228         cmd->state = SCST_CMD_STATE_INIT;
229         /* cmd must be inited here to keep the order */
230         pref_context = scst_init_cmd(cmd, pref_context);
231         if (unlikely(pref_context < 0))
232                 goto out;
233
234 active:
235         /* Here cmd must not be in any cmd list, no locks */
236         switch (pref_context) {
237         case SCST_CONTEXT_TASKLET:
238                 scst_schedule_tasklet(cmd);
239                 break;
240
241         case SCST_CONTEXT_DIRECT:
242         case SCST_CONTEXT_DIRECT_ATOMIC:
243                 scst_process_active_cmd(cmd, pref_context);
244                 /* For *NEED_THREAD wake_up() is already done */
245                 break;
246
247         default:
248                 PRINT_ERROR_PR("Context %x is undefined, using the thread one",
249                         pref_context);
250                 /* go through */
251         case SCST_CONTEXT_THREAD:
252                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
253                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
254                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
255                         list_add(&cmd->cmd_list_entry,
256                                 &cmd->cmd_lists->active_cmd_list);
257                 else
258                         list_add_tail(&cmd->cmd_list_entry,
259                                 &cmd->cmd_lists->active_cmd_list);
260                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
261                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
262                 break;
263         }
264
265 out:
266         TRACE_EXIT();
267         return;
268 }
269
270 static int scst_parse_cmd(struct scst_cmd *cmd)
271 {
272         int res = SCST_CMD_STATE_RES_CONT_SAME;
273         int state;
274         struct scst_device *dev = cmd->dev;
275         struct scst_info_cdb cdb_info;
276         int atomic = scst_cmd_atomic(cmd);
277         int orig_bufflen;
278
279         TRACE_ENTRY();
280
281         if (atomic && !dev->handler->parse_atomic) {
282                 TRACE_DBG("Dev handler %s parse() can not be "
283                       "called in atomic context, rescheduling to the thread",
284                       dev->handler->name);
285                 res = SCST_CMD_STATE_RES_NEED_THREAD;
286                 goto out;
287         }
288
289         cmd->inc_expected_sn_on_done = dev->handler->inc_expected_sn_on_done;
290
291         if (cmd->skip_parse || cmd->internal)
292                 goto call_parse;
293
294         /*
295          * Expected transfer data supplied by the SCSI transport via the
296          * target driver are untrusted, so we prefer to fetch them from CDB.
297          * Additionally, not all transports support supplying the expected
298          * transfer data.
299          */
300
301         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
302                         &cdb_info) != 0)) 
303         {
304                 static int t;
305                 if (t < 10) {
306                         t++;
307                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
308                                 "Should you update scst_scsi_op_table?",
309                                 cmd->cdb[0], dev->handler->name);
310                 }
311                 if (scst_cmd_is_expected_set(cmd)) {
312                         TRACE(TRACE_SCSI, "Using initiator supplied values: "
313                                 "direction %d, transfer_len %d",
314                                 cmd->expected_data_direction,
315                                 cmd->expected_transfer_len);
316                         cmd->data_direction = cmd->expected_data_direction;
317                         cmd->bufflen = cmd->expected_transfer_len;
318                         /* Restore (most probably) lost CDB length */
319                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
320                         if (cmd->cdb_len == -1) {
321                                 PRINT_ERROR_PR("Unable to get CDB length for "
322                                         "opcode 0x%02x. Returning INVALID "
323                                         "OPCODE", cmd->cdb[0]);
324                                 scst_set_cmd_error(cmd,
325                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
326                                 goto out_xmit;
327                         }
328                 } else {
329                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
330                              "target %s not supplied expected values. "
331                              "Returning INVALID OPCODE.", cmd->cdb[0], 
332                              dev->handler->name, cmd->tgtt->name);
333                         scst_set_cmd_error(cmd,
334                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
335                         goto out_xmit;
336                 }
337         } else {
338                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
339                         "set %s), transfer_len=%d (expected len %d), flags=%d",
340                         cdb_info.op_name, cdb_info.direction,
341                         cmd->expected_data_direction,
342                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
343                         cdb_info.transfer_len, cmd->expected_transfer_len,
344                         cdb_info.flags);
345
346                 /* Restore (most probably) lost CDB length */
347                 cmd->cdb_len = cdb_info.cdb_len;
348
349                 cmd->data_direction = cdb_info.direction;
350                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
351                         cmd->bufflen = cdb_info.transfer_len;
352                 /* else cmd->bufflen remained as it was inited in 0 */
353         }
354
355         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
356                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
357                             "(opcode 0x%02x)", cmd->cdb[0]);
358                 scst_set_cmd_error(cmd,
359                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
360                 goto out_xmit;
361         }
362
363         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
364                 PRINT_ERROR_PR("Linked commands are not supported "
365                             "(opcode 0x%02x)", cmd->cdb[0]);
366                 scst_set_cmd_error(cmd,
367                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
368                 goto out_xmit;
369         }
370
371 call_parse:
372         orig_bufflen = cmd->bufflen;
373
374         if (likely(!scst_is_cmd_local(cmd))) {
375                 TRACE_DBG("Calling dev handler %s parse(%p)",
376                       dev->handler->name, cmd);
377                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
378                 state = dev->handler->parse(cmd, &cdb_info);
379                 /* Caution: cmd can be already dead here */
380                 TRACE_DBG("Dev handler %s parse() returned %d",
381                         dev->handler->name, state);
382
383                 switch (state) {
384                 case SCST_CMD_STATE_NEED_THREAD_CTX:
385                         TRACE_DBG("Dev handler %s parse() requested thread "
386                               "context, rescheduling", dev->handler->name);
387                         res = SCST_CMD_STATE_RES_NEED_THREAD;
388                         goto out;
389
390                 case SCST_CMD_STATE_STOP:
391                         TRACE_DBG("Dev handler %s parse() requested stop "
392                                 "processing", dev->handler->name);
393                         res = SCST_CMD_STATE_RES_CONT_NEXT;
394                         goto out;
395                 }
396
397                 if (state == SCST_CMD_STATE_DEFAULT)
398                         state = SCST_CMD_STATE_PREPARE_SPACE;
399         }
400         else
401                 state = SCST_CMD_STATE_PREPARE_SPACE;
402
403         if (scst_cmd_is_expected_set(cmd)) {
404                 if (cmd->expected_transfer_len < cmd->bufflen) {
405                         TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
406                                 "cmd->bufflen(%d), using expected_transfer_len "
407                                 "instead", cmd->expected_transfer_len,
408                                 cmd->bufflen);
409                         cmd->bufflen = cmd->expected_transfer_len;
410                 }
411         }
412
413         if (cmd->data_len == -1)
414                 cmd->data_len = cmd->bufflen;
415
416         if (cmd->data_buf_alloced && (orig_bufflen > cmd->bufflen)) {
417                 PRINT_ERROR_PR("Target driver supplied data buffer (size %d), "
418                         "is less, than required (size %d)", cmd->bufflen,
419                         orig_bufflen);
420                 goto out_error;
421         }
422
423 #ifdef EXTRACHECKS
424         if ((state != SCST_CMD_STATE_XMIT_RESP) &&
425             (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
426                 (state != SCST_CMD_STATE_DEV_PARSE)) ||
427             ((cmd->bufflen != 0) && 
428                 (cmd->data_direction == SCST_DATA_NONE) &&
429                 (cmd->status == 0)) ||
430             ((cmd->bufflen == 0) && 
431                 (cmd->data_direction != SCST_DATA_NONE)) ||
432             ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
433                 (state > SCST_CMD_STATE_PREPARE_SPACE))))
434         {
435                 PRINT_ERROR_PR("Dev handler %s parse() returned "
436                                "invalid cmd data_direction %d, "
437                                "bufflen %d or state %d (opcode 0x%x)",
438                                dev->handler->name, 
439                                cmd->data_direction, cmd->bufflen,
440                                state, cmd->cdb[0]);
441                 goto out_error;
442         }
443 #endif
444
445         switch (state) {
446         case SCST_CMD_STATE_PREPARE_SPACE:
447         case SCST_CMD_STATE_DEV_PARSE:
448         case SCST_CMD_STATE_RDY_TO_XFER:
449         case SCST_CMD_STATE_SEND_TO_MIDLEV:
450         case SCST_CMD_STATE_DEV_DONE:
451         case SCST_CMD_STATE_XMIT_RESP:
452         case SCST_CMD_STATE_FINISHED:
453                 cmd->state = state;
454                 res = SCST_CMD_STATE_RES_CONT_SAME;
455                 break;
456
457         default:
458                 if (state >= 0) {
459                         PRINT_ERROR_PR("Dev handler %s parse() returned "
460                              "invalid cmd state %d (opcode %d)", 
461                              dev->handler->name, state, cmd->cdb[0]);
462                 } else {
463                         PRINT_ERROR_PR("Dev handler %s parse() returned "
464                                 "error %d (opcode %d)", dev->handler->name, 
465                                 state, cmd->cdb[0]);
466                 }
467                 goto out_error;
468         }
469
470         if (cmd->resp_data_len == -1) {
471                 if (cmd->data_direction == SCST_DATA_READ)
472                         cmd->resp_data_len = cmd->bufflen;
473                 else
474                          cmd->resp_data_len = 0;
475         }
476         
477 out:
478         TRACE_EXIT_HRES(res);
479         return res;
480
481 out_error:
482         /* dev_done() will be called as part of the regular cmd's finish */
483         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
484         cmd->state = SCST_CMD_STATE_DEV_DONE;
485         res = SCST_CMD_STATE_RES_CONT_SAME;
486         goto out;
487
488 out_xmit:
489         cmd->state = SCST_CMD_STATE_XMIT_RESP;
490         res = SCST_CMD_STATE_RES_CONT_SAME;
491         goto out;
492 }
493
494 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
495 void scst_cmd_mem_work_fn(void *p)
496 #else
497 void scst_cmd_mem_work_fn(struct work_struct *work)
498 #endif
499 {
500         TRACE_ENTRY();
501
502         spin_lock_bh(&scst_cmd_mem_lock);
503
504         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
505         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
506                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
507                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
508         } else {
509                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
510                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
511         }
512         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
513
514         spin_unlock_bh(&scst_cmd_mem_lock);
515
516         TRACE_EXIT();
517         return;
518 }
519
520 int scst_check_mem(struct scst_cmd *cmd)
521 {
522         int res = 0;
523
524         TRACE_ENTRY();
525
526         if (cmd->mem_checked)
527                 goto out;
528
529         spin_lock_bh(&scst_cmd_mem_lock);
530
531         scst_cur_cmd_mem += cmd->bufflen;
532         cmd->mem_checked = 1;
533         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
534                 goto out_unlock;
535
536         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
537                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
538                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
539                 (cmd->sess->initiator_name[0] == '\0') ?
540                   "Anonymous" : cmd->sess->initiator_name,
541                 scst_cur_max_cmd_mem >> 10);
542
543         scst_cur_cmd_mem -= cmd->bufflen;
544         cmd->mem_checked = 0;
545         scst_set_busy(cmd);
546         cmd->state = SCST_CMD_STATE_XMIT_RESP;
547         res = 1;
548
549 out_unlock:
550         spin_unlock_bh(&scst_cmd_mem_lock);
551
552 out:
553         TRACE_EXIT_RES(res);
554         return res;
555 }
556
557 static void scst_low_cur_max_cmd_mem(void)
558 {
559         TRACE_ENTRY();
560
561         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
562                 cancel_delayed_work(&scst_cmd_mem_work);
563                 flush_scheduled_work();
564                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
565         }
566
567         spin_lock_bh(&scst_cmd_mem_lock);
568
569         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
570                                 (scst_cur_cmd_mem >> 2);
571         if (scst_cur_max_cmd_mem < 16*1024*1024)
572                 scst_cur_max_cmd_mem = 16*1024*1024;
573
574         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
575                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
576                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
577                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
578         }
579
580         spin_unlock_bh(&scst_cmd_mem_lock);
581
582         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
583
584         TRACE_EXIT();
585         return;
586 }
587
588 static int scst_prepare_space(struct scst_cmd *cmd)
589 {
590         int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
591
592         TRACE_ENTRY();
593
594         if (cmd->data_direction == SCST_DATA_NONE)
595                 goto prep_done;
596
597         if (cmd->data_buf_tgt_alloc) {
598                 int orig_bufflen = cmd->bufflen;
599
600                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
601
602                 r = cmd->tgtt->alloc_data_buf(cmd);
603                 if (r > 0)
604                         goto alloc;
605                 else if (r == 0) {
606                         cmd->data_buf_alloced = 1;
607                         if (unlikely(orig_bufflen < cmd->bufflen)) {
608                                 PRINT_ERROR_PR("Target driver allocated data "
609                                         "buffer (size %d), is less, than "
610                                         "required (size %d)", orig_bufflen,
611                                         cmd->bufflen);
612                                 goto out_error;
613                         }
614                 } else
615                         goto check;
616         }
617
618 alloc:
619         r = scst_check_mem(cmd);
620         if (unlikely(r != 0))
621                 goto out;
622         else if (!cmd->data_buf_alloced) {
623                 r = scst_alloc_space(cmd);
624         } else {
625                 TRACE_MEM("%s", "data_buf_alloced set, returning");
626         }
627
628 check:
629         if (r != 0) {
630                 if (scst_cmd_atomic(cmd)) {
631                         TRACE_MEM("%s", "Atomic memory allocation failed, "
632                               "rescheduling to the thread");
633                         res = SCST_CMD_STATE_RES_NEED_THREAD;
634                         goto out;
635                 } else
636                         goto out_no_space;
637         }
638
639 prep_done:
640         if (cmd->preprocessing_only) {
641                 if (scst_cmd_atomic(cmd) && 
642                     !cmd->tgtt->preprocessing_done_atomic) {
643                         TRACE_DBG("%s", "preprocessing_done() can not be "
644                               "called in atomic context, rescheduling to "
645                               "the thread");
646                         res = SCST_CMD_STATE_RES_NEED_THREAD;
647                         goto out;
648                 }
649
650                 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
651                         TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
652                                 "cmd %p", cmd);
653                         cmd->state = SCST_CMD_STATE_DEV_DONE;
654                         res = SCST_CMD_STATE_RES_CONT_SAME;
655                         goto out;
656                 }
657
658                 res = SCST_CMD_STATE_RES_CONT_NEXT;
659                 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
660
661                 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
662                 cmd->tgtt->preprocessing_done(cmd);
663                 TRACE_DBG("%s", "preprocessing_done() returned");
664                 goto out;
665
666         }
667
668         switch (cmd->data_direction) {
669         case SCST_DATA_WRITE:
670                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
671                 break;
672
673         default:
674                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
675                 break;
676         }
677
678 out:
679         TRACE_EXIT_HRES(res);
680         return res;
681
682 out_no_space:
683         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
684                 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
685         scst_low_cur_max_cmd_mem();
686         scst_set_busy(cmd);
687         cmd->state = SCST_CMD_STATE_DEV_DONE;
688         res = SCST_CMD_STATE_RES_CONT_SAME;
689         goto out;
690
691 out_error:
692         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
693         cmd->state = SCST_CMD_STATE_DEV_DONE;
694         res = SCST_CMD_STATE_RES_CONT_SAME;
695         goto out;
696 }
697
698 void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context)
699 {
700         TRACE_ENTRY();
701
702         TRACE_DBG("Preferred context: %d", pref_context);
703         TRACE_DBG("tag=%lld, status=%#x", scst_cmd_get_tag(cmd), status);
704
705 #ifdef EXTRACHECKS
706         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
707                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
708         {
709                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
710                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
711                         cmd->tgtt->name);
712                 pref_context = SCST_CONTEXT_TASKLET;
713         }
714 #endif
715
716         switch (status) {
717         case SCST_PREPROCESS_STATUS_SUCCESS:
718                 switch (cmd->data_direction) {
719                 case SCST_DATA_WRITE:
720                         cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
721                         break;
722                 default:
723                         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
724                         break;
725                 }
726                 if (cmd->no_sn)
727                         scst_cmd_set_sn(cmd);
728                 /* Small context optimization */
729                 if ((pref_context == SCST_CONTEXT_TASKLET) || 
730                     (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
731                         if (cmd->data_direction == SCST_DATA_WRITE) {
732                                 if ( !test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
733                                                 &cmd->tgt_dev->tgt_dev_flags))
734                                         pref_context = SCST_CONTEXT_THREAD;
735                         } else {
736                                 if ( !test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
737                                                 &cmd->tgt_dev->tgt_dev_flags))
738                                         pref_context = SCST_CONTEXT_THREAD;
739                         }
740                 }
741                 break;
742
743         case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
744                 cmd->state = SCST_CMD_STATE_DEV_DONE;
745                 break;
746
747         case SCST_PREPROCESS_STATUS_ERROR_FATAL:
748                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
749                 /* go through */
750         case SCST_PREPROCESS_STATUS_ERROR:
751                 scst_set_cmd_error(cmd,
752                            SCST_LOAD_SENSE(scst_sense_hardw_error));
753                 cmd->state = SCST_CMD_STATE_DEV_DONE;
754                 break;
755
756         default:
757                 PRINT_ERROR_PR("%s() received unknown status %x", __func__,
758                         status);
759                 cmd->state = SCST_CMD_STATE_DEV_DONE;
760                 break;
761         }
762
763         scst_proccess_redirect_cmd(cmd, pref_context, 1);
764
765         TRACE_EXIT();
766         return;
767 }
768
769 /* No locks */
770 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
771 {
772         struct scst_tgt *tgt = cmd->sess->tgt;
773         int res = 0;
774         unsigned long flags;
775
776         TRACE_ENTRY();
777
778         spin_lock_irqsave(&tgt->tgt_lock, flags);
779         tgt->retry_cmds++;
780         smp_mb();
781         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
782               tgt->retry_cmds);
783         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
784                 /* At least one cmd finished, so try again */
785                 tgt->retry_cmds--;
786                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
787                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
788                       "retry_cmds=%d)", finished_cmds,
789                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
790                 res = -1;
791                 goto out_unlock_tgt;
792         }
793
794         TRACE(TRACE_RETRY, "Adding cmd %p to retry cmd list", cmd);
795         list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
796
797         if (!tgt->retry_timer_active) {
798                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
799                 add_timer(&tgt->retry_timer);
800                 tgt->retry_timer_active = 1;
801         }
802
803 out_unlock_tgt:
804         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
805
806         TRACE_EXIT_RES(res);
807         return res;
808 }
809
810 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
811 {
812         int res, rc;
813         int atomic = scst_cmd_atomic(cmd);
814
815         TRACE_ENTRY();
816
817         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
818                 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
819                 goto out_dev_done;
820         }
821
822         if (cmd->tgtt->rdy_to_xfer == NULL) {
823                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
824                 res = SCST_CMD_STATE_RES_CONT_SAME;
825                 goto out;
826         }
827
828         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
829                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
830                       "called in atomic context, rescheduling to the thread");
831                 res = SCST_CMD_STATE_RES_NEED_THREAD;
832                 goto out;
833         }
834
835         while (1) {
836                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
837
838                 res = SCST_CMD_STATE_RES_CONT_NEXT;
839                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
840
841                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
842 #ifdef DEBUG_RETRY
843                 if (((scst_random() % 100) == 75))
844                         rc = SCST_TGT_RES_QUEUE_FULL;
845                 else
846 #endif
847                         rc = cmd->tgtt->rdy_to_xfer(cmd);
848                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
849
850                 if (likely(rc == SCST_TGT_RES_SUCCESS))
851                         goto out;
852
853                 /* Restore the previous state */
854                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
855
856                 switch (rc) {
857                 case SCST_TGT_RES_QUEUE_FULL:
858                 {
859                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
860                                 break;
861                         else
862                                 continue;
863                 }
864
865                 case SCST_TGT_RES_NEED_THREAD_CTX:
866                 {
867                         TRACE_DBG("Target driver %s "
868                               "rdy_to_xfer() requested thread "
869                               "context, rescheduling", cmd->tgtt->name);
870                         res = SCST_CMD_STATE_RES_NEED_THREAD;
871                         break;
872                 }
873
874                 default:
875                         goto out_error_rc;
876                 }
877                 break;
878         }
879
880 out:
881         TRACE_EXIT_HRES(res);
882         return res;
883
884 out_error_rc:
885         if (rc == SCST_TGT_RES_FATAL_ERROR) {
886                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
887                      "fatal error", cmd->tgtt->name);
888         } else {
889                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
890                             "value %d", cmd->tgtt->name, rc);
891         }
892         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
893
894 out_dev_done:
895         cmd->state = SCST_CMD_STATE_DEV_DONE;
896         res = SCST_CMD_STATE_RES_CONT_SAME;
897         goto out;
898 }
899
900 /* No locks, but might be in IRQ */
901 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
902         int check_retries)
903 {
904         unsigned long flags;
905
906         TRACE_ENTRY();
907
908         TRACE_DBG("Context: %d", context);
909
910         switch(context) {
911         case SCST_CONTEXT_DIRECT:
912         case SCST_CONTEXT_DIRECT_ATOMIC:
913                 if (check_retries)
914                         scst_check_retries(cmd->tgt);
915                 scst_process_active_cmd(cmd, context);
916                 break;
917
918         default:
919                 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
920                             context);
921                 /* go through */
922         case SCST_CONTEXT_THREAD:
923                 if (check_retries)
924                         scst_check_retries(cmd->tgt);
925                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
926                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
927                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
928                         list_add(&cmd->cmd_list_entry,
929                                 &cmd->cmd_lists->active_cmd_list);
930                 else
931                         list_add_tail(&cmd->cmd_list_entry,
932                                 &cmd->cmd_lists->active_cmd_list);
933                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
934                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
935                 break;
936
937         case SCST_CONTEXT_TASKLET:
938                 if (check_retries)
939                         scst_check_retries(cmd->tgt);
940                 scst_schedule_tasklet(cmd);
941                 break;
942         }
943
944         TRACE_EXIT();
945         return;
946 }
947
948 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
949 {
950         TRACE_ENTRY();
951
952         TRACE_DBG("Preferred context: %d", pref_context);
953         TRACE(TRACE_SCSI, "tag=%lld status=%#x", scst_cmd_get_tag(cmd), status);
954
955 #ifdef EXTRACHECKS
956         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
957                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
958         {
959                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
960                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
961                         cmd->tgtt->name);
962                 pref_context = SCST_CONTEXT_TASKLET;
963         }
964 #endif
965
966         switch (status) {
967         case SCST_RX_STATUS_SUCCESS:
968                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
969                 /* Small context optimization */
970                 if ((pref_context == SCST_CONTEXT_TASKLET) || 
971                     (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
972                         if ( !test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC, 
973                                         &cmd->tgt_dev->tgt_dev_flags))
974                                 pref_context = SCST_CONTEXT_THREAD;
975                 }
976                 break;
977
978         case SCST_RX_STATUS_ERROR_SENSE_SET:
979                 cmd->state = SCST_CMD_STATE_DEV_DONE;
980                 break;
981
982         case SCST_RX_STATUS_ERROR_FATAL:
983                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
984                 /* go through */
985         case SCST_RX_STATUS_ERROR:
986                 scst_set_cmd_error(cmd,
987                            SCST_LOAD_SENSE(scst_sense_hardw_error));
988                 cmd->state = SCST_CMD_STATE_DEV_DONE;
989                 break;
990
991         default:
992                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
993                         status);
994                 cmd->state = SCST_CMD_STATE_DEV_DONE;
995                 break;
996         }
997
998         scst_proccess_redirect_cmd(cmd, pref_context, 1);
999
1000         TRACE_EXIT();
1001         return;
1002 }
1003
1004 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
1005 {
1006         struct scst_cmd *c;
1007
1008         if (likely(cmd->queue_type != SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1009                 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
1010         c = scst_check_deferred_commands(cmd->tgt_dev);
1011         if (c != NULL) {
1012                 unsigned long flags;
1013                 spin_lock_irqsave(&c->cmd_lists->cmd_list_lock, flags);
1014                 TRACE_SN("Adding cmd %p to active cmd list", c);
1015                 list_add_tail(&c->cmd_list_entry,
1016                         &c->cmd_lists->active_cmd_list);
1017                 wake_up(&c->cmd_lists->cmd_list_waitQ);
1018                 spin_unlock_irqrestore(&c->cmd_lists->cmd_list_lock, flags);
1019         }
1020 }
1021
1022 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1023         const uint8_t *rq_sense, int rq_sense_len, int resid)
1024 {
1025         unsigned char type;
1026
1027         TRACE_ENTRY();
1028
1029         if (cmd->inc_expected_sn_on_done)
1030                 scst_inc_check_expected_sn(cmd);
1031
1032         cmd->status = result & 0xff;
1033         cmd->msg_status = msg_byte(result);
1034         cmd->host_status = host_byte(result);
1035         cmd->driver_status = driver_byte(result);
1036         if (unlikely(resid != 0)) {
1037 #ifdef EXTRACHECKS
1038                 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1039                         PRINT_ERROR_PR("Wrong resid %d (cmd->resp_data_len=%d)",
1040                                 resid, cmd->resp_data_len);
1041                 } else
1042 #endif
1043                         scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1044         }
1045
1046         /* 
1047          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
1048          * in init_scst()
1049          */
1050         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
1051         memset(&cmd->sense_buffer[rq_sense_len], 0,
1052                 sizeof(cmd->sense_buffer) - rq_sense_len);
1053
1054         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, resid=%d, "
1055               "cmd->msg_status=%x, cmd->host_status=%x, "
1056               "cmd->driver_status=%x", result, cmd->status, resid,
1057               cmd->msg_status, cmd->host_status, cmd->driver_status);
1058
1059         cmd->completed = 1;
1060
1061         scst_dec_on_dev_cmd(cmd, 0);
1062
1063         type = cmd->dev->handler->type;
1064         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1065             cmd->tgt_dev->acg_dev->rd_only_flag &&
1066             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1067              type == TYPE_TAPE)) {
1068                 int32_t length;
1069                 uint8_t *address;
1070
1071                 length = scst_get_buf_first(cmd, &address);
1072                 TRACE_DBG("length %d", length);
1073                 if (unlikely(length <= 0)) {
1074                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1075                                 __func__);
1076                         goto out;
1077                 }
1078                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1079                         address[2] |= 0x80;   /* Write Protect*/
1080                 }
1081                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1082                         address[3] |= 0x80;   /* Write Protect*/
1083                 }
1084                 scst_put_buf(cmd, address);
1085         }
1086
1087 out:
1088         TRACE_EXIT();
1089         return;
1090 }
1091
1092 /* For small context optimization */
1093 static inline int scst_optimize_post_exec_context(struct scst_cmd *cmd,
1094         int context)
1095 {
1096         if ((context == SCST_CONTEXT_TASKLET) || 
1097             (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1098                 if ( !test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC, 
1099                                 &cmd->tgt_dev->tgt_dev_flags))
1100                         context = SCST_CONTEXT_THREAD;
1101         }
1102         return context;
1103 }
1104
1105 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1106 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1107                                             struct scsi_request **req)
1108 {
1109         struct scst_cmd *cmd = NULL;
1110
1111         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1112                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1113
1114         if (cmd == NULL) {
1115                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1116                 if (*req)
1117                         scsi_release_request(*req);
1118         }
1119
1120         return cmd;
1121 }
1122
1123 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1124 {
1125         struct scsi_request *req = NULL;
1126         struct scst_cmd *cmd;
1127
1128         TRACE_ENTRY();
1129
1130         cmd = scst_get_cmd(scsi_cmd, &req);
1131         if (cmd == NULL)
1132                 goto out;
1133
1134         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1135                 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1136
1137         /* Clear out request structure */
1138         req->sr_use_sg = 0;
1139         req->sr_sglist_len = 0;
1140         req->sr_bufflen = 0;
1141         req->sr_buffer = NULL;
1142         req->sr_underflow = 0;
1143         req->sr_request->rq_disk = NULL; /* disown request blk */
1144
1145         scst_release_request(cmd);
1146
1147         cmd->state = SCST_CMD_STATE_DEV_DONE;
1148
1149         scst_proccess_redirect_cmd(cmd,
1150                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1151
1152 out:
1153         TRACE_EXIT();
1154         return;
1155 }
1156 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1157 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1158 {
1159         struct scst_cmd *cmd;
1160
1161         TRACE_ENTRY();
1162
1163         cmd = (struct scst_cmd *)data;
1164         if (cmd == NULL)
1165                 goto out;
1166
1167         scst_do_cmd_done(cmd, result, sense, SCST_SENSE_BUFFERSIZE, resid);
1168
1169         cmd->state = SCST_CMD_STATE_DEV_DONE;
1170
1171         scst_proccess_redirect_cmd(cmd,
1172                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1173
1174 out:
1175         TRACE_EXIT();
1176         return;
1177 }
1178 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1179
1180 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1181 {
1182         TRACE_ENTRY();
1183
1184         scst_dec_on_dev_cmd(cmd, 0);
1185
1186         if (cmd->inc_expected_sn_on_done)
1187                 scst_inc_check_expected_sn(cmd);
1188
1189         if (next_state == SCST_CMD_STATE_DEFAULT)
1190                 next_state = SCST_CMD_STATE_DEV_DONE;
1191
1192 #if defined(DEBUG) || defined(TRACING)
1193         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1194                 if (cmd->sg) {
1195                         int i;
1196                         struct scatterlist *sg = cmd->sg;
1197                         TRACE(TRACE_RECV_TOP, 
1198                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1199                               cmd->sg_cnt, sg, (void*)sg[0].page);
1200                         for(i = 0; i < cmd->sg_cnt; ++i) {
1201                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1202                                         "Exec'd sg", page_address(sg[i].page),
1203                                         sg[i].length);
1204                         }
1205                 }
1206         }
1207 #endif
1208
1209
1210 #ifdef EXTRACHECKS
1211         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1212             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1213             (next_state != SCST_CMD_STATE_FINISHED)) 
1214         {
1215                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1216                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1217                 scst_set_cmd_error(cmd,
1218                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1219                 next_state = SCST_CMD_STATE_DEV_DONE;
1220         }
1221 #endif
1222         cmd->state = next_state;
1223
1224         scst_proccess_redirect_cmd(cmd,
1225                 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1226
1227         TRACE_EXIT();
1228         return;
1229 }
1230
1231 static int scst_report_luns_local(struct scst_cmd *cmd)
1232 {
1233         int res = SCST_EXEC_COMPLETED;
1234         int dev_cnt = 0;
1235         int buffer_size;
1236         int i;
1237         struct scst_tgt_dev *tgt_dev = NULL;
1238         uint8_t *buffer;
1239         int offs, overflow = 0;
1240
1241         TRACE_ENTRY();
1242
1243         cmd->status = 0;
1244         cmd->msg_status = 0;
1245         cmd->host_status = DID_OK;
1246         cmd->driver_status = 0;
1247
1248         if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1249                 PRINT_ERROR_PR("Unsupported SELECT REPORT value %x in REPORT "
1250                         "LUNS command", cmd->cdb[2]);
1251                 goto out_err;
1252         }
1253
1254         buffer_size = scst_get_buf_first(cmd, &buffer);
1255         if (unlikely(buffer_size <= 0))
1256                 goto out_err;
1257
1258         if (buffer_size < 16)
1259                 goto out_put_err;
1260
1261         memset(buffer, 0, buffer_size);
1262         offs = 8;
1263
1264         /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1265         for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1266                 struct list_head *sess_tgt_dev_list_head =
1267                         &cmd->sess->sess_tgt_dev_list_hash[i];
1268                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1269                                 sess_tgt_dev_list_entry) {
1270                         if (!overflow) {
1271                                 if (offs >= buffer_size) {
1272                                         scst_put_buf(cmd, buffer);
1273                                         buffer_size = scst_get_buf_next(cmd, &buffer);
1274                                         if (buffer_size > 0) {
1275                                                 memset(buffer, 0, buffer_size);
1276                                                 offs = 0;
1277                                         } else {
1278                                                 overflow = 1;
1279                                                 goto inc_dev_cnt;
1280                                         }
1281                                 }
1282                                 if ((buffer_size - offs) < 8) {
1283                                         PRINT_ERROR_PR("Buffer allocated for REPORT "
1284                                                 "LUNS command doesn't allow to fit 8 "
1285                                                 "byte entry (buffer_size=%d)",
1286                                                 buffer_size);
1287                                         goto out_put_hw_err;
1288                                 }
1289                                 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1290                                 buffer[offs+1] = tgt_dev->lun & 0xff;
1291                                 offs += 8;
1292                         }
1293 inc_dev_cnt:
1294                         dev_cnt++;
1295                 }
1296         }
1297         if (!overflow)
1298                 scst_put_buf(cmd, buffer);
1299
1300         /* Set the response header */
1301         buffer_size = scst_get_buf_first(cmd, &buffer);
1302         if (unlikely(buffer_size <= 0))
1303                 goto out_err;
1304         dev_cnt *= 8;
1305         buffer[0] = (dev_cnt >> 24) & 0xff;
1306         buffer[1] = (dev_cnt >> 16) & 0xff;
1307         buffer[2] = (dev_cnt >> 8) & 0xff;
1308         buffer[3] = dev_cnt & 0xff;
1309         scst_put_buf(cmd, buffer);
1310
1311         dev_cnt += 8;
1312         if (dev_cnt < cmd->resp_data_len)
1313                 scst_set_resp_data_len(cmd, dev_cnt);
1314
1315 out_done:
1316         cmd->completed = 1;
1317
1318         /* Report the result */
1319         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1320
1321         TRACE_EXIT_RES(res);
1322         return res;
1323         
1324 out_put_err:
1325         scst_put_buf(cmd, buffer);
1326
1327 out_err:
1328         scst_set_cmd_error(cmd,
1329                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1330         goto out_done;
1331
1332 out_put_hw_err:
1333         scst_put_buf(cmd, buffer);
1334         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1335         goto out_done;
1336 }
1337
1338 static int scst_pre_select(struct scst_cmd *cmd)
1339 {
1340         int res = SCST_EXEC_NOT_COMPLETED;
1341
1342         TRACE_ENTRY();
1343
1344         if (scst_cmd_atomic(cmd)) {
1345                 res = SCST_EXEC_NEED_THREAD;
1346                 goto out;
1347         }
1348
1349         scst_block_dev(cmd->dev, 1);
1350         /* Device will be unblocked in scst_done_cmd_check() */
1351
1352         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1353                 int rc = scst_set_pending_UA(cmd);
1354                 if (rc == 0) {
1355                         res = SCST_EXEC_COMPLETED;
1356                         cmd->completed = 1;
1357                         /* Report the result */
1358                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1359                         goto out;
1360                 }
1361         }
1362
1363 out:
1364         TRACE_EXIT_RES(res);
1365         return res;
1366 }
1367
1368 static inline void scst_report_reserved(struct scst_cmd *cmd)
1369 {
1370         TRACE_ENTRY();
1371
1372         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1373         cmd->completed = 1;
1374         /* Report the result */
1375         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1376
1377         TRACE_EXIT();
1378         return;
1379 }
1380
1381 static int scst_reserve_local(struct scst_cmd *cmd)
1382 {
1383         int res = SCST_EXEC_NOT_COMPLETED;
1384         struct scst_device *dev;
1385         struct scst_tgt_dev *tgt_dev_tmp;
1386
1387         TRACE_ENTRY();
1388
1389         if (scst_cmd_atomic(cmd)) {
1390                 res = SCST_EXEC_NEED_THREAD;
1391                 goto out;
1392         }
1393
1394         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1395                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1396                      "(lun=%Ld)", (uint64_t)cmd->lun);
1397                 scst_set_cmd_error(cmd,
1398                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1399                 cmd->completed = 1;
1400                 res = SCST_EXEC_COMPLETED;
1401                 goto out;
1402         }
1403
1404         dev = cmd->dev;
1405         scst_block_dev(dev, 1);
1406         /* Device will be unblocked in scst_done_cmd_check() */
1407
1408         spin_lock_bh(&dev->dev_lock);
1409
1410         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1411                 scst_report_reserved(cmd);
1412                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1413                 res = SCST_EXEC_COMPLETED;
1414                 goto out_unlock;
1415         }
1416
1417         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1418                             dev_tgt_dev_list_entry) 
1419         {
1420                 if (cmd->tgt_dev != tgt_dev_tmp)
1421                         set_bit(SCST_TGT_DEV_RESERVED, 
1422                                 &tgt_dev_tmp->tgt_dev_flags);
1423         }
1424         dev->dev_reserved = 1;
1425
1426 out_unlock:
1427         spin_unlock_bh(&dev->dev_lock);
1428         
1429 out:
1430         TRACE_EXIT_RES(res);
1431         return res;
1432 }
1433
1434 static int scst_release_local(struct scst_cmd *cmd)
1435 {
1436         int res = SCST_EXEC_NOT_COMPLETED;
1437         struct scst_tgt_dev *tgt_dev_tmp;
1438         struct scst_device *dev;
1439
1440         TRACE_ENTRY();
1441
1442         dev = cmd->dev;
1443
1444         scst_block_dev(dev, 1);
1445         cmd->blocking = 1;
1446         TRACE_MGMT_DBG("Blocking cmd %p (tag %lld)", cmd, cmd->tag);
1447
1448         spin_lock_bh(&dev->dev_lock);
1449
1450         /* 
1451          * The device could be RELEASED behind us, if RESERVING session 
1452          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1453          * matter, so use lock and no retest for DEV_RESERVED bits again
1454          */
1455         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1456                 res = SCST_EXEC_COMPLETED;
1457                 cmd->status = 0;
1458                 cmd->msg_status = 0;
1459                 cmd->host_status = DID_OK;
1460                 cmd->driver_status = 0;
1461         } else {
1462                 list_for_each_entry(tgt_dev_tmp,
1463                                     &dev->dev_tgt_dev_list,
1464                                     dev_tgt_dev_list_entry) 
1465                 {
1466                         clear_bit(SCST_TGT_DEV_RESERVED, 
1467                                 &tgt_dev_tmp->tgt_dev_flags);
1468                 }
1469                 dev->dev_reserved = 0;
1470         }
1471
1472         spin_unlock_bh(&dev->dev_lock);
1473
1474         if (res == SCST_EXEC_COMPLETED) {
1475                 cmd->completed = 1;
1476                 /* Report the result */
1477                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1478         }
1479
1480         TRACE_EXIT_RES(res);
1481         return res;
1482 }
1483
1484 /* 
1485  * The result of cmd execution, if any, should be reported 
1486  * via scst_cmd_done_local() 
1487  */
1488 static int scst_pre_exec(struct scst_cmd *cmd)
1489 {
1490         int res = SCST_EXEC_NOT_COMPLETED, rc;
1491         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1492
1493         TRACE_ENTRY();
1494
1495         /* Reserve check before Unit Attention */
1496         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags))) {
1497                 if ((cmd->cdb[0] != INQUIRY) && (cmd->cdb[0] != REPORT_LUNS) &&
1498                     (cmd->cdb[0] != RELEASE) && (cmd->cdb[0] != RELEASE_10) &&
1499                     (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1500                     (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1501                     (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE))
1502                 {
1503                         scst_report_reserved(cmd);
1504                         res = SCST_EXEC_COMPLETED;
1505                         goto out;
1506                 }
1507         }
1508
1509         /* If we had a internal bus reset, set the command error unit attention */
1510         if ((cmd->dev->scsi_dev != NULL) &&
1511             unlikely(cmd->dev->scsi_dev->was_reset)) {
1512                 if (scst_is_ua_command(cmd)) 
1513                 {
1514                         struct scst_device *dev = cmd->dev;
1515                         int done = 0;
1516                         /* Prevent more than 1 cmd to be triggered by was_reset */
1517                         spin_lock_bh(&dev->dev_lock);
1518                         barrier(); /* to reread was_reset */
1519                         if (dev->scsi_dev->was_reset) {
1520                                 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1521                                 scst_set_cmd_error(cmd,
1522                                            SCST_LOAD_SENSE(scst_sense_reset_UA));
1523                                 /* It looks like it is safe to clear was_reset here */
1524                                 dev->scsi_dev->was_reset = 0;
1525                                 smp_mb();
1526                                 done = 1;
1527                         }
1528                         spin_unlock_bh(&dev->dev_lock);
1529
1530                         if (done)
1531                                 goto out_done;
1532                 }
1533         }
1534
1535         if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING, 
1536                         &cmd->tgt_dev->tgt_dev_flags))) {
1537                 if (scst_is_ua_command(cmd)) 
1538                 {
1539                         rc = scst_set_pending_UA(cmd);
1540                         if (rc == 0)
1541                                 goto out_done;
1542                 }
1543         }
1544
1545         /* Check READ_ONLY device status */
1546         if (tgt_dev->acg_dev->rd_only_flag &&
1547             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1548              cmd->cdb[0] == WRITE_10 ||
1549              cmd->cdb[0] == WRITE_12 ||
1550              cmd->cdb[0] == WRITE_16 ||
1551              cmd->cdb[0] == WRITE_VERIFY ||
1552              cmd->cdb[0] == WRITE_VERIFY_12 ||
1553              cmd->cdb[0] == WRITE_VERIFY_16 ||
1554              (cmd->dev->handler->type == TYPE_TAPE &&
1555               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1556         {
1557                 scst_set_cmd_error(cmd,
1558                            SCST_LOAD_SENSE(scst_sense_data_protect));
1559                 goto out_done;
1560         }
1561 out:
1562         TRACE_EXIT_RES(res);
1563         return res;
1564
1565 out_done:
1566         res = SCST_EXEC_COMPLETED;
1567         cmd->completed = 1;
1568         /* Report the result */
1569         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1570         goto out;
1571 }
1572
1573 /* 
1574  * The result of cmd execution, if any, should be reported 
1575  * via scst_cmd_done_local() 
1576  */
1577 static inline int scst_local_exec(struct scst_cmd *cmd)
1578 {
1579         int res = SCST_EXEC_NOT_COMPLETED;
1580
1581         TRACE_ENTRY();
1582
1583         /*
1584          * Adding new commands here don't forget to update
1585          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1586          */
1587
1588         switch (cmd->cdb[0]) {
1589         case MODE_SELECT:
1590         case MODE_SELECT_10:
1591         case LOG_SELECT:
1592                 res = scst_pre_select(cmd);
1593                 break;
1594         case RESERVE:
1595         case RESERVE_10:
1596                 res = scst_reserve_local(cmd);
1597                 break;
1598         case RELEASE:
1599         case RELEASE_10:
1600                 res = scst_release_local(cmd);
1601                 break;
1602         case REPORT_LUNS:
1603                 res = scst_report_luns_local(cmd);
1604                 break;
1605         }
1606
1607         TRACE_EXIT_RES(res);
1608         return res;
1609 }
1610
1611 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1612 {
1613         int rc = SCST_EXEC_NOT_COMPLETED;
1614
1615         TRACE_ENTRY();
1616
1617         /* Check here to let an out of SN cmd be queued w/o context switch */
1618         if (scst_cmd_atomic(cmd) && !cmd->dev->handler->exec_atomic) {
1619                 TRACE_DBG("Dev handler %s exec() can not be "
1620                       "called in atomic context, rescheduling to the thread",
1621                       cmd->dev->handler->name);
1622                 rc = SCST_EXEC_NEED_THREAD;
1623                 goto out;
1624         }
1625
1626         cmd->sent_to_midlev = 1;
1627         cmd->state = SCST_CMD_STATE_EXECUTING;
1628         cmd->scst_cmd_done = scst_cmd_done_local;
1629
1630         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1631         smp_mb__after_set_bit();
1632
1633         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1634                 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1635                 goto out_aborted;
1636         }
1637
1638         rc = scst_pre_exec(cmd);
1639         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1640         if (rc != SCST_EXEC_NOT_COMPLETED) {
1641                 if (rc == SCST_EXEC_COMPLETED)
1642                         goto out;
1643                 else if (rc == SCST_EXEC_NEED_THREAD)
1644                         goto out_clear;
1645                 else
1646                         goto out_rc_error;
1647         }
1648
1649         rc = scst_local_exec(cmd);
1650         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1651         if (rc != SCST_EXEC_NOT_COMPLETED) {
1652                 if (rc == SCST_EXEC_COMPLETED)
1653                         goto out;
1654                 else if (rc == SCST_EXEC_NEED_THREAD)
1655                         goto out_clear;
1656                 else
1657                         goto out_rc_error;
1658         }
1659
1660         if (cmd->dev->handler->exec) {
1661                 struct scst_device *dev = cmd->dev;
1662                 TRACE_DBG("Calling dev handler %s exec(%p)",
1663                       dev->handler->name, cmd);
1664                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1665                 cmd->scst_cmd_done = scst_cmd_done_local;
1666                 rc = dev->handler->exec(cmd);
1667                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1668                 TRACE_DBG("Dev handler %s exec() returned %d",
1669                       dev->handler->name, rc);
1670                 if (rc == SCST_EXEC_COMPLETED)
1671                         goto out;
1672                 else if (rc == SCST_EXEC_NEED_THREAD)
1673                         goto out_clear;
1674                 else if (rc != SCST_EXEC_NOT_COMPLETED)
1675                         goto out_rc_error;
1676         }
1677
1678         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1679         
1680         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1681                 PRINT_ERROR_PR("Command for virtual device must be "
1682                         "processed by device handler (lun %Ld)!",
1683                         (uint64_t)cmd->lun);
1684                 goto out_error;
1685         }
1686
1687 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1688         if (unlikely(scst_alloc_request(cmd) != 0)) {
1689                 if (scst_cmd_atomic(cmd)) {
1690                         rc = SCST_EXEC_NEED_THREAD;
1691                         goto out_clear;
1692                 } else {
1693                         PRINT_INFO_PR("%s", "Unable to allocate request, "
1694                                 "sending BUSY status");
1695                         goto out_busy;
1696                 }
1697         }
1698         
1699         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1700                     (void *)cmd->scsi_req->sr_buffer,
1701                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1702                     cmd->retries);
1703 #else
1704         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1705                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1706                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1707                         scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1708         if (unlikely(rc != 0)) {
1709                 if (scst_cmd_atomic(cmd)) {
1710                         rc = SCST_EXEC_NEED_THREAD;
1711                         goto out_clear;
1712                 } else {
1713                         PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1714                         goto out_error;
1715                 }
1716         }
1717 #endif
1718
1719         rc = SCST_EXEC_COMPLETED;
1720
1721 out:
1722         TRACE_EXIT();
1723         return rc;
1724
1725 out_clear:
1726         /* Restore the state */
1727         cmd->sent_to_midlev = 0;
1728         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1729         goto out;
1730
1731 out_rc_error:
1732         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1733                     "invalid code %d", cmd->dev->handler->name, rc);
1734         /* go through */
1735
1736 out_error:
1737         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1738         cmd->completed = 1;
1739         cmd->state = SCST_CMD_STATE_DEV_DONE;
1740         rc = SCST_EXEC_COMPLETED;
1741         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1742         goto out;
1743
1744 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1745 out_busy:
1746         scst_set_busy(cmd);
1747         cmd->completed = 1;
1748         cmd->state = SCST_CMD_STATE_DEV_DONE;
1749         rc = SCST_EXEC_COMPLETED;
1750         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1751         goto out;
1752 #endif
1753
1754 out_aborted:
1755         rc = SCST_EXEC_COMPLETED;
1756         /* Report the result. The cmd is not completed */
1757         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1758         goto out;
1759 }
1760
1761 /* No locks */
1762 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1763 {
1764         if (slot == NULL)
1765                 goto inc;
1766
1767         /* Optimized for lockless fast path */
1768
1769         TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1770                 atomic_read(slot));
1771
1772         if (!atomic_dec_and_test(slot))
1773                 goto out;
1774
1775         TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1776                 tgt_dev->num_free_sn_slots);
1777         if (tgt_dev->num_free_sn_slots != ARRAY_SIZE(tgt_dev->sn_slots)) {
1778                 spin_lock_irq(&tgt_dev->sn_lock);
1779                 if (tgt_dev->num_free_sn_slots != ARRAY_SIZE(tgt_dev->sn_slots)) {
1780                         tgt_dev->num_free_sn_slots++;
1781                         TRACE_SN("Incremented num_free_sn_slots (%d)",
1782                                 tgt_dev->num_free_sn_slots);
1783                         if (tgt_dev->num_free_sn_slots == 0)
1784                                 tgt_dev->cur_sn_slot = slot;
1785                 }
1786                 spin_unlock_irq(&tgt_dev->sn_lock);
1787         }
1788
1789 inc:
1790         /*
1791          * No locks is needed, because only one thread at time can 
1792          * be here (serialized by sn). Also it is supposed that there
1793          * could not be half-incremented halves.
1794          */
1795         tgt_dev->expected_sn++;
1796         smp_mb(); /* write must be before def_cmd_count read */
1797         TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1798
1799 out:
1800         return;
1801 }
1802
1803 static int scst_send_to_midlev(struct scst_cmd *cmd)
1804 {
1805         int res, rc;
1806         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1807         struct scst_device *dev = cmd->dev;
1808         typeof(tgt_dev->expected_sn) expected_sn;
1809         int count;
1810
1811         TRACE_ENTRY();
1812
1813         res = SCST_CMD_STATE_RES_CONT_NEXT;
1814
1815         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1816                 goto out;
1817
1818         __scst_get(0); /* protect dev & tgt_dev */
1819
1820         if (unlikely(cmd->internal || cmd->retry)) {
1821                 rc = scst_do_send_to_midlev(cmd);
1822                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1823                 if (rc == SCST_EXEC_NEED_THREAD) {
1824                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1825                               "thread context, rescheduling");
1826                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1827                         scst_dec_on_dev_cmd(cmd, 0);
1828                         goto out_dec_cmd_count;
1829                 } else {
1830                         sBUG_ON(rc != SCST_EXEC_COMPLETED);
1831                         goto out_unplug;
1832                 }
1833         }
1834
1835         EXTRACHECKS_BUG_ON(cmd->no_sn);
1836
1837         if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)) {
1838                 /* 
1839                  * W/o get() there will be a race, when cmd is executed and
1840                  * destroyed before "goto out_unplug"
1841                  */
1842                 scst_cmd_get(cmd);
1843                 if (scst_check_hq_cmd(cmd)) {
1844                         scst_cmd_put(cmd);
1845                         goto exec;
1846                 } else {
1847                         scst_dec_on_dev_cmd(cmd, 0);
1848                         scst_cmd_put(cmd);
1849                         goto out_unplug;
1850                 }
1851         }
1852
1853         expected_sn = tgt_dev->expected_sn;
1854         /* Optimized for lockless fast path */
1855         if ((cmd->sn != expected_sn) || unlikely(test_bit(SCST_TGT_DEV_HQ_ACTIVE,
1856                                                 &tgt_dev->tgt_dev_flags))) {
1857                 spin_lock_irq(&tgt_dev->sn_lock);
1858                 tgt_dev->def_cmd_count++;
1859                 smp_mb();
1860                 barrier(); /* to reread expected_sn & hq_cmd_active */
1861                 expected_sn = tgt_dev->expected_sn;
1862                 if ((cmd->sn != expected_sn) || test_bit(SCST_TGT_DEV_HQ_ACTIVE,
1863                                                 &tgt_dev->tgt_dev_flags)) {
1864                         /* We are under IRQ lock, but dev->dev_lock is BH one */
1865                         int cmd_blocking = scst_dec_on_dev_cmd(cmd, 1);
1866                         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1867                                 /* Necessary to allow aborting out of sn cmds */
1868                                 TRACE_MGMT_DBG("Aborting out of sn cmd %p (tag %lld)",
1869                                         cmd, cmd->tag);
1870                                 tgt_dev->def_cmd_count--;
1871                                 cmd->state = SCST_CMD_STATE_DEV_DONE;
1872                                 res = SCST_CMD_STATE_RES_CONT_SAME;
1873                         } else {
1874                                 TRACE_SN("Deferring cmd %p (sn=%ld, "
1875                                         "expected_sn=%ld, hq_cmd_active=%d)", cmd,
1876                                         cmd->sn, expected_sn, 
1877                                         test_bit(SCST_TGT_DEV_HQ_ACTIVE,
1878                                                 &tgt_dev->tgt_dev_flags));
1879                                 list_add_tail(&cmd->sn_cmd_list_entry,
1880                                               &tgt_dev->deferred_cmd_list);
1881                         }
1882                         spin_unlock_irq(&tgt_dev->sn_lock);
1883                         /* !! At this point cmd can be already freed !! */
1884                         __scst_dec_on_dev_cmd(dev, cmd_blocking);
1885                         goto out_dec_cmd_count;
1886                 } else {
1887                         TRACE_SN("Somebody incremented expected_sn %ld, "
1888                                 "continuing", expected_sn);
1889                         tgt_dev->def_cmd_count--;
1890                         spin_unlock_irq(&tgt_dev->sn_lock);
1891                 }
1892         }
1893
1894 exec:
1895         count = 0;
1896         while(1) {
1897                 atomic_t *slot = cmd->sn_slot;
1898                 int hq = (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE);
1899                 int inc_expected_sn_on_done = cmd->inc_expected_sn_on_done;
1900                 rc = scst_do_send_to_midlev(cmd);
1901                 if (rc == SCST_EXEC_NEED_THREAD) {
1902                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1903                               "thread context, rescheduling");
1904                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1905                         if (unlikely(hq)) {
1906                                 TRACE_SN("Rescheduling HQ cmd %p", cmd);
1907                                 spin_lock_irq(&tgt_dev->sn_lock);
1908                                 clear_bit(SCST_TGT_DEV_HQ_ACTIVE,
1909                                         &tgt_dev->tgt_dev_flags);
1910                                 list_add(&cmd->sn_cmd_list_entry,
1911                                         &tgt_dev->hq_cmd_list);
1912                                 spin_unlock_irq(&tgt_dev->sn_lock);
1913                         }
1914                         scst_dec_on_dev_cmd(cmd, 0);
1915                         if (count != 0)
1916                                 goto out_unplug;
1917                         else
1918                                 goto out_dec_cmd_count;
1919                 }
1920                 sBUG_ON(rc != SCST_EXEC_COMPLETED);
1921                 /* !! At this point cmd can be already freed !! */
1922                 count++;
1923                 if ( !inc_expected_sn_on_done && likely(!hq))
1924                         scst_inc_expected_sn(tgt_dev, slot);
1925                 cmd = scst_check_deferred_commands(tgt_dev);
1926                 if (cmd == NULL)
1927                         break;
1928                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1929                         break;
1930         }
1931
1932 out_unplug:
1933         if (dev->scsi_dev != NULL)
1934                 generic_unplug_device(dev->scsi_dev->request_queue);
1935
1936 out_dec_cmd_count:
1937         __scst_put();
1938         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1939
1940 out:
1941         TRACE_EXIT_HRES(res);
1942         return res;
1943 }
1944
1945 /* No locks supposed to be held */
1946 static int scst_check_sense(struct scst_cmd *cmd)
1947 {
1948         int res = 0;
1949         int sense_valid;
1950         struct scst_device *dev = cmd->dev;
1951         int dbl_ua_possible, ua_sent = 0;
1952
1953         TRACE_ENTRY();
1954
1955         /* If we had a internal bus reset behind us, set the command error UA */
1956         if ((dev->scsi_dev != NULL) &&
1957             unlikely(cmd->host_status == DID_RESET) &&
1958             scst_is_ua_command(cmd))
1959         {
1960                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
1961                       dev->scsi_dev->was_reset, cmd->host_status);
1962                 scst_set_cmd_error(cmd,
1963                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1964                 /* just in case */
1965                 cmd->ua_ignore = 0;
1966                 /* It looks like it is safe to clear was_reset here */
1967                 dev->scsi_dev->was_reset = 0;
1968                 smp_mb();
1969         }
1970
1971         sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
1972
1973         dbl_ua_possible = dev->dev_double_ua_possible;
1974         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
1975         if (unlikely(dbl_ua_possible)) {
1976                 spin_lock_bh(&dev->dev_lock);
1977                 barrier(); /* to reread dev_double_ua_possible */
1978                 dbl_ua_possible = dev->dev_double_ua_possible;
1979                 if (dbl_ua_possible)
1980                         ua_sent = dev->dev_reset_ua_sent;
1981                 else
1982                         spin_unlock_bh(&dev->dev_lock);
1983         }
1984
1985         if (sense_valid) {
1986                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
1987                              sizeof(cmd->sense_buffer));
1988                 /* Check Unit Attention Sense Key */
1989                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
1990                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
1991                                 if (dbl_ua_possible) 
1992                                 {
1993                                         if (ua_sent) {
1994                                                 TRACE(TRACE_MGMT, "%s", 
1995                                                         "Double UA detected");
1996                                                 /* Do retry */
1997                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
1998                                                         "(tag %lld)", cmd, cmd->tag);
1999                                                 cmd->status = 0;
2000                                                 cmd->msg_status = 0;
2001                                                 cmd->host_status = DID_OK;
2002                                                 cmd->driver_status = 0;
2003                                                 memset(cmd->sense_buffer, 0,
2004                                                         sizeof(cmd->sense_buffer));
2005                                                 cmd->retry = 1;
2006                                                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
2007                                                 res = 1;
2008                                                 /* 
2009                                                  * Dev is still blocked by this cmd, so
2010                                                  * it's OK to clear SCST_DEV_SERIALIZED
2011                                                  * here.
2012                                                  */
2013                                                 dev->dev_double_ua_possible = 0;
2014                                                 dev->dev_serialized = 0;
2015                                                 dev->dev_reset_ua_sent = 0;
2016                                                 goto out_unlock;
2017                                         } else
2018                                                 dev->dev_reset_ua_sent = 1;
2019                                 }
2020                         }
2021                         if (cmd->ua_ignore == 0) {
2022                                 if (unlikely(dbl_ua_possible)) {
2023                                         __scst_process_UA(dev, cmd,
2024                                                 cmd->sense_buffer,
2025                                                 sizeof(cmd->sense_buffer), 0);
2026                                 } else {
2027                                         scst_process_UA(dev, cmd,
2028                                                 cmd->sense_buffer,
2029                                                 sizeof(cmd->sense_buffer), 0);
2030                                 }
2031                         }
2032                 }
2033         }
2034
2035         if (unlikely(dbl_ua_possible)) {
2036                 if (ua_sent && scst_is_ua_command(cmd)) {
2037                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
2038                         dev->dev_double_ua_possible = 0;
2039                         dev->dev_serialized = 0;
2040                         dev->dev_reset_ua_sent = 0;
2041                 }
2042                 spin_unlock_bh(&dev->dev_lock);
2043         }
2044
2045 out:
2046         TRACE_EXIT_RES(res);
2047         return res;
2048
2049 out_unlock:
2050         spin_unlock_bh(&dev->dev_lock);
2051         goto out;
2052 }
2053
2054 static int scst_check_auto_sense(struct scst_cmd *cmd)
2055 {
2056         int res = 0;
2057
2058         TRACE_ENTRY();
2059
2060         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2061             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
2062              SCST_NO_SENSE(cmd->sense_buffer)))
2063         {
2064                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2065                       "cmd->status=%x, cmd->msg_status=%x, "
2066                       "cmd->host_status=%x, cmd->driver_status=%x", cmd->status,
2067                       cmd->msg_status, cmd->host_status, cmd->driver_status);
2068                 res = 1;
2069         } else if (unlikely(cmd->host_status)) {
2070                 if ((cmd->host_status == DID_REQUEUE) ||
2071                     (cmd->host_status == DID_IMM_RETRY) ||
2072                     (cmd->host_status == DID_SOFT_ERROR)) {
2073                         scst_set_busy(cmd);
2074                 } else {
2075                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2076                                 "received, returning HARDWARE ERROR instead",
2077                                 cmd->host_status);
2078                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2079                 }
2080         }
2081
2082         TRACE_EXIT_RES(res);
2083         return res;
2084 }
2085
2086 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
2087 {
2088         int res = 0, rc;
2089         unsigned char type;
2090
2091         TRACE_ENTRY();
2092
2093         if (unlikely(cmd->cdb[0] == REQUEST_SENSE)) {
2094                 if (cmd->internal)
2095                         cmd = scst_complete_request_sense(cmd);
2096         } else if (unlikely(scst_check_auto_sense(cmd))) {
2097                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
2098                             "without sense data (opcode 0x%x), issuing "
2099                             "REQUEST SENSE", cmd->cdb[0]);
2100                 rc = scst_prepare_request_sense(cmd);
2101                 if (rc > 0) {
2102                         *pres = rc;
2103                         res = 1;
2104                         goto out;
2105                 } else {
2106                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
2107                                     "returning HARDWARE ERROR");
2108                         scst_set_cmd_error(cmd,
2109                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
2110                 }
2111         } else if (scst_check_sense(cmd)) {
2112                 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2113                 res = 1;
2114                 goto out;
2115         }
2116
2117         type = cmd->dev->handler->type;
2118         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
2119             cmd->tgt_dev->acg_dev->rd_only_flag &&
2120             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
2121              type == TYPE_TAPE))
2122         {
2123                 int32_t length;
2124                 uint8_t *address;
2125
2126                 length = scst_get_buf_first(cmd, &address);
2127                 if (length <= 0)
2128                         goto out;
2129                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2130                         address[2] |= 0x80;   /* Write Protect*/
2131                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2132                         address[3] |= 0x80;   /* Write Protect*/
2133                 scst_put_buf(cmd, address);
2134         }
2135
2136         /* 
2137          * Check and clear NormACA option for the device, if necessary,
2138          * since we don't support ACA
2139          */
2140         if ((cmd->cdb[0] == INQUIRY) &&
2141             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
2142             (cmd->resp_data_len > SCST_INQ_BYTE3))
2143         {
2144                 uint8_t *buffer;
2145                 int buflen;
2146
2147                 /* ToDo: all pages ?? */
2148                 buflen = scst_get_buf_first(cmd, &buffer);
2149                 if (buflen > 0) {
2150                         if (buflen > SCST_INQ_BYTE3) {
2151 #ifdef EXTRACHECKS
2152                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2153                                         PRINT_INFO_PR("NormACA set for device: "
2154                                             "lun=%Ld, type 0x%02x", 
2155                                             (uint64_t)cmd->lun, buffer[0]);
2156                                 }
2157 #endif
2158                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2159                         } else
2160                                 scst_set_cmd_error(cmd,
2161                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
2162
2163                         scst_put_buf(cmd, buffer);
2164                 }
2165         }
2166
2167         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
2168                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
2169                                                 &cmd->tgt_dev->tgt_dev_flags)) {
2170                         struct scst_tgt_dev *tgt_dev_tmp;
2171                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2172                               (uint64_t)cmd->lun, cmd->status);
2173                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2174                                      sizeof(cmd->sense_buffer));
2175                         /* Clearing the reservation */
2176                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2177                                             dev_tgt_dev_list_entry) {
2178                                 clear_bit(SCST_TGT_DEV_RESERVED, 
2179                                         &tgt_dev_tmp->tgt_dev_flags);
2180                         }
2181                         cmd->dev->dev_reserved = 0;
2182                 }
2183                 scst_unblock_dev(cmd->dev);
2184         }
2185         
2186         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
2187                      (cmd->cdb[0] == MODE_SELECT_10) ||
2188                      (cmd->cdb[0] == LOG_SELECT)))
2189         {
2190                 if (cmd->status == 0) {
2191                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2192                                 "setting the SELECT UA (lun=%Ld)", 
2193                                 (uint64_t)cmd->lun);
2194                         spin_lock_bh(&scst_temp_UA_lock);
2195                         if (cmd->cdb[0] == LOG_SELECT) {
2196                                 scst_set_sense(scst_temp_UA,
2197                                         sizeof(scst_temp_UA),
2198                                         UNIT_ATTENTION, 0x2a, 0x02);
2199                         } else {
2200                                 scst_set_sense(scst_temp_UA,
2201                                         sizeof(scst_temp_UA),
2202                                         UNIT_ATTENTION, 0x2a, 0x01);
2203                         }
2204                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2205                                 sizeof(scst_temp_UA), 1);
2206                         spin_unlock_bh(&scst_temp_UA_lock);
2207                 }
2208                 scst_unblock_dev(cmd->dev);
2209         }
2210
2211 out:
2212         TRACE_EXIT_RES(res);
2213         return res;
2214 }
2215
2216 static int scst_dev_done(struct scst_cmd *cmd)
2217 {
2218         int res = SCST_CMD_STATE_RES_CONT_SAME;
2219         int state;
2220         int atomic = scst_cmd_atomic(cmd);
2221
2222         TRACE_ENTRY();
2223
2224         if (atomic && !cmd->dev->handler->dev_done_atomic) 
2225         {
2226                 TRACE_DBG("Dev handler %s dev_done() can not be "
2227                       "called in atomic context, rescheduling to the thread",
2228                       cmd->dev->handler->name);
2229                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2230                 goto out;
2231         }
2232
2233         if (scst_done_cmd_check(cmd, &res))
2234                 goto out;
2235
2236         state = SCST_CMD_STATE_XMIT_RESP;
2237         if (likely(!scst_is_cmd_local(cmd)) && 
2238             likely(cmd->dev->handler->dev_done != NULL))
2239         {
2240                 int rc;
2241                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2242                       cmd->dev->handler->name, cmd);
2243                 rc = cmd->dev->handler->dev_done(cmd);
2244                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2245                       cmd->dev->handler->name, rc);
2246                 if (rc != SCST_CMD_STATE_DEFAULT)
2247                         state = rc;
2248         }
2249
2250         switch (state) {
2251         case SCST_CMD_STATE_DEV_PARSE:
2252         case SCST_CMD_STATE_PREPARE_SPACE:
2253         case SCST_CMD_STATE_RDY_TO_XFER:
2254         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2255         case SCST_CMD_STATE_DEV_DONE:
2256         case SCST_CMD_STATE_XMIT_RESP:
2257         case SCST_CMD_STATE_FINISHED:
2258                 cmd->state = state;
2259                 res = SCST_CMD_STATE_RES_CONT_SAME;
2260                 break;
2261
2262         case SCST_CMD_STATE_NEED_THREAD_CTX:
2263                 TRACE_DBG("Dev handler %s dev_done() requested "
2264                       "thread context, rescheduling",
2265                       cmd->dev->handler->name);
2266                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2267                 break;
2268
2269         default:
2270                 if (state >= 0) {
2271                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2272                                 "invalid cmd state %d", 
2273                                 cmd->dev->handler->name, state);
2274                 } else {
2275                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2276                                 "error %d", cmd->dev->handler->name, 
2277                                 state);
2278                 }
2279                 scst_set_cmd_error(cmd,
2280                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2281                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2282                 res = SCST_CMD_STATE_RES_CONT_SAME;
2283                 break;
2284         }
2285
2286 out:
2287         TRACE_EXIT_HRES(res);
2288         return res;
2289 }
2290
2291 static int scst_xmit_response(struct scst_cmd *cmd)
2292 {
2293         int res, rc;
2294         int atomic = scst_cmd_atomic(cmd);
2295
2296         TRACE_ENTRY();
2297
2298         /*
2299          * Check here also in order to avoid unnecessary delays of other
2300          * commands.
2301          */
2302         if (unlikely(!cmd->sent_to_midlev) && (cmd->tgt_dev != NULL)) {
2303                 TRACE_SN("cmd %p was not sent to mid-lev (sn %ld)",
2304                         cmd, cmd->sn);
2305                 scst_unblock_deferred(cmd->tgt_dev, cmd);
2306                 cmd->sent_to_midlev = 1;
2307         }
2308
2309         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2310                 TRACE_DBG("%s", "xmit_response() can not be "
2311                       "called in atomic context, rescheduling to the thread");
2312                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2313                 goto out;
2314         }
2315
2316         /*
2317          * If we don't remove cmd from the search list here, before
2318          * submitting it for transmittion, we will have a race, when for
2319          * some reason cmd's release is delayed after transmittion and
2320          * initiator sends cmd with the same tag => it is possible that
2321          * a wrong cmd will be found by find() functions.
2322          */
2323         spin_lock_irq(&cmd->sess->sess_list_lock);
2324         list_del(&cmd->search_cmd_list_entry);
2325         spin_unlock_irq(&cmd->sess->sess_list_lock);
2326
2327         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2328         smp_mb__after_set_bit();
2329
2330         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2331                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2332                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2333                                 "(tag %lld), returning TASK ABORTED", cmd, cmd->tag);
2334                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2335                 }
2336         }
2337
2338         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2339                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %lld), skipping",
2340                         cmd, cmd->tag);
2341                 cmd->state = SCST_CMD_STATE_FINISHED;
2342                 res = SCST_CMD_STATE_RES_CONT_SAME;
2343                 goto out;
2344         }
2345
2346 #ifdef DEBUG_TM
2347         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2348                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2349                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2350                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2351                         goto out;
2352                 }
2353                 TRACE_MGMT_DBG("Delaying cmd %p (tag %lld) for 1 second",
2354                         cmd, cmd->tag);
2355                 schedule_timeout_uninterruptible(HZ);
2356         }
2357 #endif
2358
2359         while (1) {
2360                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2361
2362                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2363                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2364
2365                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2366
2367 #if defined(DEBUG) || defined(TRACING)
2368                 if (cmd->sg) {
2369                         int i;
2370                         struct scatterlist *sg = cmd->sg;
2371                         TRACE(TRACE_SEND_BOT,
2372                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2373                               cmd->sg_cnt, sg, (void*)sg[0].page);
2374                         for(i = 0; i < cmd->sg_cnt; ++i) {
2375                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2376                                     "Xmitting sg", page_address(sg[i].page),
2377                                     sg[i].length);
2378                         }
2379                 }
2380 #endif
2381
2382 #ifdef DEBUG_RETRY
2383                 if (((scst_random() % 100) == 77))
2384                         rc = SCST_TGT_RES_QUEUE_FULL;
2385                 else
2386 #endif
2387                         rc = cmd->tgtt->xmit_response(cmd);
2388                 TRACE_DBG("xmit_response() returned %d", rc);
2389
2390                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2391                         goto out;
2392
2393                 /* Restore the previous state */
2394                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2395
2396                 switch (rc) {
2397                 case SCST_TGT_RES_QUEUE_FULL:
2398                 {
2399                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2400                                 break;
2401                         else
2402                                 continue;
2403                 }
2404
2405                 case SCST_TGT_RES_NEED_THREAD_CTX:
2406                 {
2407                         TRACE_DBG("Target driver %s xmit_response() "
2408                               "requested thread context, rescheduling",
2409                               cmd->tgtt->name);
2410                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2411                         break;
2412                 }
2413
2414                 default:
2415                         goto out_error;
2416                 }
2417                 break;
2418         }
2419
2420 out:
2421         /* Caution: cmd can be already dead here */
2422         TRACE_EXIT_HRES(res);
2423         return res;
2424
2425 out_error:
2426         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2427                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2428                         "fatal error", cmd->tgtt->name);
2429         } else {
2430                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2431                         "invalid value %d", cmd->tgtt->name, rc);
2432         }
2433         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2434         cmd->state = SCST_CMD_STATE_FINISHED;
2435         res = SCST_CMD_STATE_RES_CONT_SAME;
2436         goto out;
2437 }
2438
2439 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2440 {
2441         TRACE_ENTRY();
2442
2443         sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2444
2445         cmd->state = SCST_CMD_STATE_FINISHED;
2446         scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2447
2448         TRACE_EXIT();
2449         return;
2450 }
2451
2452 static int scst_finish_cmd(struct scst_cmd *cmd)
2453 {
2454         int res;
2455
2456         TRACE_ENTRY();
2457
2458         if (cmd->mem_checked) {
2459                 spin_lock_bh(&scst_cmd_mem_lock);
2460                 scst_cur_cmd_mem -= cmd->bufflen;
2461                 spin_unlock_bh(&scst_cmd_mem_lock);
2462         }
2463
2464         atomic_dec(&cmd->sess->sess_cmd_count);
2465
2466         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2467                 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
2468                         "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
2469                         atomic_read(&scst_cmd_count));
2470         }
2471
2472         scst_cmd_put(cmd);
2473
2474         res = SCST_CMD_STATE_RES_CONT_NEXT;
2475
2476         TRACE_EXIT_HRES(res);
2477         return res;
2478 }
2479
2480 /*
2481  * No locks, but it must be externally serialized (see comment for
2482  * scst_cmd_init_done() in scsi_tgt.h)
2483  */
2484 static void scst_cmd_set_sn(struct scst_cmd *cmd)
2485 {
2486         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2487         unsigned long flags;
2488
2489         if (scst_is_implicit_hq(cmd)) {
2490                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "Implicit HQ cmd %p", cmd);
2491                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2492         }
2493
2494         /* Optimized for lockless fast path */
2495
2496         scst_check_debug_sn(cmd);
2497
2498         switch(cmd->queue_type) {
2499         case SCST_CMD_QUEUE_SIMPLE:
2500         case SCST_CMD_QUEUE_UNTAGGED:
2501                 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
2502                         if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
2503                                 tgt_dev->curr_sn++;
2504                                 TRACE_SN("Incremented curr_sn %ld",
2505                                         tgt_dev->curr_sn);
2506                         }
2507                         cmd->sn_slot = tgt_dev->cur_sn_slot;
2508                         cmd->sn = tgt_dev->curr_sn;
2509                         tgt_dev->prev_cmd_ordered = 0;
2510                 } else {
2511                         TRACE(TRACE_MINOR, "%s", "Not enough SN slots");
2512                         goto ordered;
2513                 }
2514                 break;
2515
2516         case SCST_CMD_QUEUE_ORDERED:
2517                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "ORDERED cmd %p "
2518                         "(op %x)", cmd, cmd->cdb[0]);
2519 ordered:
2520                 if (!tgt_dev->prev_cmd_ordered) {
2521                         spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2522                         tgt_dev->num_free_sn_slots--;
2523                         smp_mb();
2524                         if ((tgt_dev->num_free_sn_slots >= 0) &&
2525                             (atomic_read(tgt_dev->cur_sn_slot) > 0)) {
2526                                 do {
2527                                         tgt_dev->cur_sn_slot++;
2528                                         if (tgt_dev->cur_sn_slot == 
2529                                                 tgt_dev->sn_slots +
2530                                                 ARRAY_SIZE(tgt_dev->sn_slots))
2531                                             tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
2532                                 } while(atomic_read(tgt_dev->cur_sn_slot) != 0);
2533                                 TRACE_SN("New cur SN slot %zd",
2534                                         tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2535                         } else
2536                                 tgt_dev->num_free_sn_slots++;
2537                         spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2538                 }
2539                 tgt_dev->prev_cmd_ordered = 1;
2540                 tgt_dev->curr_sn++;
2541                 cmd->sn = tgt_dev->curr_sn;
2542                 break;
2543
2544         case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
2545                 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "HQ cmd %p "
2546                         "(op %x)", cmd, cmd->cdb[0]);
2547                 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2548                 /* Add in the head as required by SAM */
2549                 list_add(&cmd->sn_cmd_list_entry, &tgt_dev->hq_cmd_list);
2550                 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2551                 break;
2552
2553         default:
2554                 PRINT_ERROR_PR("Unsupported queue type %d, treating it as "
2555                         "ORDERED", cmd->queue_type);
2556                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2557                 goto ordered;
2558         }
2559
2560         TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
2561                 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
2562                 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
2563                 atomic_read(tgt_dev->cur_sn_slot), 
2564                 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
2565                 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2566
2567         cmd->no_sn = 0;
2568         return;
2569 }
2570
2571 /*
2572  * Returns 0 on success, > 0 when we need to wait for unblock,
2573  * < 0 if there is no device (lun) or device type handler.
2574  *
2575  * No locks, but might be on IRQ, protection is done by the
2576  * suspended activity.
2577  */
2578 static int scst_translate_lun(struct scst_cmd *cmd)
2579 {
2580         struct scst_tgt_dev *tgt_dev = NULL;
2581         int res;
2582
2583         TRACE_ENTRY();
2584
2585         __scst_get(1);
2586
2587         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2588                 struct list_head *sess_tgt_dev_list_head =
2589                         &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
2590                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2591                         (uint64_t)cmd->lun);
2592                 res = -1;
2593                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
2594                                 sess_tgt_dev_list_entry) {
2595                         if (tgt_dev->lun == cmd->lun) {
2596                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2597
2598                                 if (unlikely(tgt_dev->dev->handler == NULL)) {
2599                                         PRINT_INFO_PR("Dev handler for device "
2600                                           "%Ld is NULL, the device will not be "
2601                                           "visible remotely", (uint64_t)cmd->lun);
2602                                         break;
2603                                 }
2604                                 
2605                                 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
2606                                 cmd->tgt_dev = tgt_dev;
2607                                 cmd->dev = tgt_dev->dev;
2608
2609                                 res = 0;
2610                                 break;
2611                         }
2612                 }
2613                 if (res != 0) {
2614                         TRACE(TRACE_MINOR, "tgt_dev for lun %Ld not found, command to "
2615                                 "unexisting LU?", (uint64_t)cmd->lun);
2616                         __scst_put();
2617                 }
2618         } else {
2619                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
2620                 __scst_put();
2621                 res = 1;
2622         }
2623
2624         TRACE_EXIT_RES(res);
2625         return res;
2626 }
2627
2628 /*
2629  * No locks, but might be on IRQ
2630  *
2631  * Returns 0 on success, > 0 when we need to wait for unblock,
2632  * < 0 if there is no device (lun) or device type handler.
2633  */
2634 static int __scst_init_cmd(struct scst_cmd *cmd)
2635 {
2636         int res = 0;
2637
2638         TRACE_ENTRY();
2639
2640         res = scst_translate_lun(cmd);
2641         if (likely(res == 0)) {
2642                 int cnt;
2643                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2644                 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
2645                 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
2646                         TRACE(TRACE_RETRY, "Too many pending commands in "
2647                                 "session, returning BUSY to initiator \"%s\"",
2648                                 (cmd->sess->initiator_name[0] == '\0') ?
2649                                   "Anonymous" : cmd->sess->initiator_name);
2650                         goto out_busy;
2651                 }
2652                 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
2653                 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
2654                         TRACE(TRACE_RETRY, "Too many pending device commands, "
2655                                 "returning BUSY to initiator \"%s\"",
2656                                 (cmd->sess->initiator_name[0] == '\0') ?
2657                                   "Anonymous" : cmd->sess->initiator_name);
2658                         goto out_busy;
2659                 }
2660                 if (!cmd->no_sn)
2661                         scst_cmd_set_sn(cmd);
2662         } else if (res < 0) {
2663                 TRACE_DBG("Finishing cmd %p", cmd);
2664                 scst_set_cmd_error(cmd,
2665                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2666                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2667         } else
2668                 goto out;
2669
2670 out:
2671         TRACE_EXIT_RES(res);
2672         return res;
2673
2674 out_busy:
2675         scst_set_busy(cmd);
2676         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2677         goto out;
2678 }
2679
2680 /* Called under scst_init_lock and IRQs disabled */
2681 static void scst_do_job_init(void)
2682 {
2683         struct scst_cmd *cmd;
2684         int susp;
2685
2686         TRACE_ENTRY();
2687
2688 restart:
2689         susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
2690         if (scst_init_poll_cnt > 0)
2691                 scst_init_poll_cnt--;
2692
2693         list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
2694                 int rc;
2695                 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
2696                         continue;
2697                 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2698                         spin_unlock_irq(&scst_init_lock);
2699                         rc = __scst_init_cmd(cmd);
2700                         spin_lock_irq(&scst_init_lock);
2701                         if (rc > 0) {
2702                                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, restarting");
2703                                 goto restart;
2704                         }
2705                 } else {
2706                         TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %lld)",
2707                                 cmd, cmd->tag);
2708                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2709                 }
2710
2711                 /*
2712                  * Deleting cmd from init cmd list after __scst_init_cmd()
2713                  * is necessary to keep the check in scst_init_cmd() correct
2714                  * to preserve the commands order.
2715                  *
2716                  * We don't care about the race, when init cmd list is empty
2717                  * and one command detected that it just was not empty, so
2718                  * it's inserting to it, but another command at the same time
2719                  * seeing init cmd list empty and goes directly, because it
2720                  * could affect only commands from the same initiator to the
2721                  * same tgt_dev, but init_cmd_done() doesn't guarantee the order
2722                  * in case of simultaneous such calls anyway.
2723                  */
2724                 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
2725                 list_del(&cmd->cmd_list_entry);
2726                 spin_unlock(&scst_init_lock);
2727
2728                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2729                 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
2730                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2731                         list_add(&cmd->cmd_list_entry,
2732                                 &cmd->cmd_lists->active_cmd_list);
2733                 else
2734                         list_add_tail(&cmd->cmd_list_entry,
2735                                 &cmd->cmd_lists->active_cmd_list);
2736                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2737                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2738
2739                 spin_lock(&scst_init_lock);
2740                 goto restart;
2741         }
2742
2743         if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
2744                 goto restart;
2745
2746         TRACE_EXIT();
2747         return;
2748 }
2749
2750 static inline int test_init_cmd_list(void)
2751 {
2752         int res = (!list_empty(&scst_init_cmd_list) &&
2753                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2754                   unlikely(kthread_should_stop()) ||
2755                   (scst_init_poll_cnt > 0);
2756         return res;
2757 }
2758
2759 int scst_init_cmd_thread(void *arg)
2760 {
2761         TRACE_ENTRY();
2762
2763         current->flags |= PF_NOFREEZE;
2764
2765         spin_lock_irq(&scst_init_lock);
2766         while(!kthread_should_stop()) {
2767                 wait_queue_t wait;
2768                 init_waitqueue_entry(&wait, current);
2769
2770                 if (!test_init_cmd_list()) {
2771                         add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
2772                                                  &wait);
2773                         for (;;) {
2774                                 set_current_state(TASK_INTERRUPTIBLE);
2775                                 if (test_init_cmd_list())
2776                                         break;
2777                                 spin_unlock_irq(&scst_init_lock);
2778                                 schedule();
2779                                 spin_lock_irq(&scst_init_lock);
2780                         }
2781                         set_current_state(TASK_RUNNING);
2782                         remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
2783                 }
2784                 scst_do_job_init();
2785         }
2786         spin_unlock_irq(&scst_init_lock);
2787
2788         /*
2789          * If kthread_should_stop() is true, we are guaranteed to be
2790          * on the module unload, so scst_init_cmd_list must be empty.
2791          */
2792         sBUG_ON(!list_empty(&scst_init_cmd_list));
2793
2794         TRACE_EXIT();
2795         return 0;
2796 }
2797
2798 /* Called with no locks held */
2799 void scst_process_active_cmd(struct scst_cmd *cmd, int context)
2800 {
2801         int res;
2802
2803         TRACE_ENTRY();
2804
2805         EXTRACHECKS_BUG_ON(in_irq());
2806
2807         cmd->atomic = (context == SCST_CONTEXT_DIRECT_ATOMIC);
2808
2809         do {
2810                 switch (cmd->state) {
2811                 case SCST_CMD_STATE_DEV_PARSE:
2812                         res = scst_parse_cmd(cmd);
2813                         break;
2814
2815                 case SCST_CMD_STATE_PREPARE_SPACE:
2816                         res = scst_prepare_space(cmd);
2817                         break;
2818
2819                 case SCST_CMD_STATE_RDY_TO_XFER:
2820                         res = scst_rdy_to_xfer(cmd);
2821                         break;
2822
2823                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2824                         if (tm_dbg_check_cmd(cmd) != 0) {
2825                                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2826                                 TRACE_MGMT_DBG("Skipping cmd %p (tag %lld), "
2827                                         "because of TM DBG delay", cmd,
2828                                         cmd->tag);
2829                                 break;
2830                         }
2831                         res = scst_send_to_midlev(cmd);
2832                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2833                         break;
2834
2835                 case SCST_CMD_STATE_DEV_DONE:
2836                         res = scst_dev_done(cmd);
2837                         break;
2838
2839                 case SCST_CMD_STATE_XMIT_RESP:
2840                         res = scst_xmit_response(cmd);
2841                         break;
2842
2843                 case SCST_CMD_STATE_FINISHED:
2844                         res = scst_finish_cmd(cmd);
2845                         break;
2846
2847                 default:
2848                         PRINT_ERROR_PR("cmd (%p) in state %d, but shouldn't be",
2849                                cmd, cmd->state);
2850                         sBUG();
2851                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2852                         break;
2853                 }
2854         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2855
2856         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2857                 /* None */
2858         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2859                 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2860                 switch (cmd->state) {
2861                 case SCST_CMD_STATE_DEV_PARSE:
2862                 case SCST_CMD_STATE_PREPARE_SPACE:
2863                 case SCST_CMD_STATE_RDY_TO_XFER:
2864                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2865                 case SCST_CMD_STATE_DEV_DONE:
2866                 case SCST_CMD_STATE_XMIT_RESP:
2867                 case SCST_CMD_STATE_FINISHED:
2868                         TRACE_DBG("Adding cmd %p to head of active cmd list", cmd);
2869                         list_add(&cmd->cmd_list_entry,
2870                                 &cmd->cmd_lists->active_cmd_list);
2871                         break;
2872 #ifdef EXTRACHECKS
2873                 /* not very valid commands */
2874                 case SCST_CMD_STATE_DEFAULT:
2875                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2876                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2877                                 "useful list (left on scst cmd list)", cmd, 
2878                                 cmd->state);
2879                         spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2880                         sBUG();
2881                         spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2882                         break;
2883 #endif
2884                 default:
2885                         break;
2886                 }
2887                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2888                 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2889         } else
2890                 sBUG();
2891
2892         TRACE_EXIT();
2893         return;
2894 }
2895
2896 /* Called under cmd_list_lock and IRQs disabled */
2897 static void scst_do_job_active(struct list_head *cmd_list,
2898         spinlock_t *cmd_list_lock, int context)
2899 {
2900         TRACE_ENTRY();
2901
2902 #ifdef EXTRACHECKS
2903         WARN_ON((context != SCST_CONTEXT_DIRECT_ATOMIC) && 
2904                 (context != SCST_CONTEXT_DIRECT));
2905 #endif
2906
2907         while (!list_empty(cmd_list)) {
2908                 struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
2909                                         cmd_list_entry);
2910                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
2911                 list_del(&cmd->cmd_list_entry);
2912                 spin_unlock_irq(cmd_list_lock);
2913                 scst_process_active_cmd(cmd, context);
2914                 spin_lock_irq(cmd_list_lock);
2915         }
2916
2917         TRACE_EXIT();
2918         return;
2919 }
2920
2921 static inline int test_cmd_lists(struct scst_cmd_lists *p_cmd_lists)
2922 {
2923         int res = !list_empty(&p_cmd_lists->active_cmd_list) ||
2924             unlikely(kthread_should_stop()) ||
2925             tm_dbg_is_release();
2926         return res;
2927 }
2928
2929 int scst_cmd_thread(void *arg)
2930 {
2931         struct scst_cmd_lists *p_cmd_lists = (struct scst_cmd_lists*)arg;
2932
2933         TRACE_ENTRY();
2934
2935 #if 0
2936         set_user_nice(current, 10);
2937 #endif
2938         current->flags |= PF_NOFREEZE;
2939
2940         spin_lock_irq(&p_cmd_lists->cmd_list_lock);
2941         while (!kthread_should_stop()) {
2942                 wait_queue_t wait;
2943                 init_waitqueue_entry(&wait, current);
2944
2945                 if (!test_cmd_lists(p_cmd_lists)) {
2946                         add_wait_queue_exclusive(&p_cmd_lists->cmd_list_waitQ,
2947                                 &wait);
2948                         for (;;) {
2949                                 set_current_state(TASK_INTERRUPTIBLE);
2950                                 if (test_cmd_lists(p_cmd_lists))
2951                                         break;
2952                                 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
2953                                 schedule();
2954                                 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
2955                         }
2956                         set_current_state(TASK_RUNNING);
2957                         remove_wait_queue(&p_cmd_lists->cmd_list_waitQ, &wait);
2958                 }
2959
2960                 if (tm_dbg_is_release()) {
2961                         spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
2962                         tm_dbg_check_released_cmds();
2963                         spin_lock_irq(&p_cmd_lists->cmd_list_lock);
2964                 }
2965
2966                 scst_do_job_active(&p_cmd_lists->active_cmd_list,
2967                         &p_cmd_lists->cmd_list_lock, SCST_CONTEXT_DIRECT);
2968         }
2969         spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
2970
2971 #ifdef EXTRACHECKS
2972         /*
2973          * If kthread_should_stop() is true, we are guaranteed to be either
2974          * on the module unload, or there must be at least one other thread to
2975          * process the commands lists.
2976          */
2977         if (p_cmd_lists == &scst_main_cmd_lists) {
2978                 sBUG_ON((scst_threads_info.nr_cmd_threads == 1) &&
2979                          !list_empty(&scst_main_cmd_lists.active_cmd_list));
2980         }
2981 #endif
2982
2983         TRACE_EXIT();
2984         return 0;
2985 }
2986
2987 void scst_cmd_tasklet(long p)
2988 {
2989         struct scst_tasklet *t = (struct scst_tasklet*)p;
2990
2991         TRACE_ENTRY();
2992
2993         spin_lock_irq(&t->tasklet_lock);
2994         scst_do_job_active(&t->tasklet_cmd_list, &t->tasklet_lock,
2995                 SCST_CONTEXT_DIRECT_ATOMIC);
2996         spin_unlock_irq(&t->tasklet_lock);
2997
2998         TRACE_EXIT();
2999         return;
3000 }
3001
3002 /*
3003  * Returns 0 on success, < 0 if there is no device handler or
3004  * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
3005  * No locks, protection is done by the suspended activity.
3006  */
3007 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
3008 {
3009         struct scst_tgt_dev *tgt_dev = NULL;
3010         struct list_head *sess_tgt_dev_list_head;
3011         int res = -1;
3012
3013         TRACE_ENTRY();
3014
3015         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
3016               (uint64_t)mcmd->lun);
3017
3018         __scst_get(1);
3019
3020         if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
3021                      !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
3022                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3023                 __scst_put();
3024                 res = 1;
3025                 goto out;
3026         }
3027
3028         sess_tgt_dev_list_head =
3029                 &mcmd->sess->sess_tgt_dev_list_hash[HASH_VAL(mcmd->lun)];
3030         list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3031                         sess_tgt_dev_list_entry) {
3032                 if (tgt_dev->lun == mcmd->lun) {
3033                         TRACE_DBG("tgt_dev %p found", tgt_dev);
3034                         mcmd->mcmd_tgt_dev = tgt_dev;
3035                         res = 0;
3036                         break;
3037                 }
3038         }
3039         if (mcmd->mcmd_tgt_dev == NULL)
3040                 __scst_put();
3041
3042 out:
3043         TRACE_EXIT_HRES(res);
3044         return res;
3045 }
3046
3047 /* No locks */
3048 void scst_complete_cmd_mgmt(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd)
3049 {
3050         TRACE_ENTRY();
3051
3052         spin_lock_irq(&scst_mcmd_lock);
3053
3054         TRACE_MGMT_DBG("cmd %p completed (tag %lld, mcmd %p, "
3055                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
3056                 mcmd->cmd_wait_count);
3057
3058         cmd->mgmt_cmnd = NULL;
3059
3060         if (cmd->completed)
3061                 mcmd->completed_cmd_count++;
3062
3063         mcmd->cmd_wait_count--;
3064         if (mcmd->cmd_wait_count > 0) {
3065                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
3066                         mcmd->cmd_wait_count);
3067                 goto out_unlock;
3068         }
3069
3070         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3071
3072         if (mcmd->completed) {
3073                 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list",
3074                         mcmd);
3075                 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3076                         &scst_active_mgmt_cmd_list);
3077         }
3078
3079         spin_unlock_irq(&scst_mcmd_lock);
3080
3081         wake_up(&scst_mgmt_cmd_list_waitQ);
3082
3083 out:
3084         TRACE_EXIT();
3085         return;
3086
3087 out_unlock:
3088         spin_unlock_irq(&scst_mcmd_lock);
3089         goto out;
3090 }
3091
3092 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
3093         struct scst_tgt_dev *tgt_dev, int set_status)
3094 {
3095         int res = SCST_DEV_TM_NOT_COMPLETED;
3096         struct scst_dev_type *h = tgt_dev->dev->handler;
3097
3098         if (h->task_mgmt_fn) {
3099                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
3100                         h->name, mcmd->fn);
3101                 EXTRACHECKS_BUG_ON(in_irq());
3102                 res = h->task_mgmt_fn(mcmd, tgt_dev);
3103                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
3104                       h->name, res);
3105                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
3106                         mcmd->status = res;
3107         }
3108         return res;
3109 }
3110
3111 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
3112 {
3113         switch(mgmt_fn) {
3114                 case SCST_ABORT_TASK:
3115                 case SCST_ABORT_TASK_SET:
3116                 case SCST_CLEAR_TASK_SET:
3117                         return 1;
3118                 default:
3119                         return 0;
3120         }
3121 }
3122
3123 /* 
3124  * Might be called under sess_list_lock and IRQ off + BHs also off
3125  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
3126  */
3127 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
3128         int other_ini, int call_dev_task_mgmt_fn)
3129 {
3130         TRACE_ENTRY();
3131
3132         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %lld)", cmd, cmd->tag);
3133
3134         if (other_ini) {
3135                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3136                 smp_mb__after_set_bit();
3137         }
3138         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
3139         smp_mb__after_set_bit();
3140
3141         if (cmd->tgt_dev == NULL) {
3142                 unsigned long flags;
3143                 spin_lock_irqsave(&scst_init_lock, flags);
3144                 scst_init_poll_cnt++;
3145                 spin_unlock_irqrestore(&scst_init_lock, flags);
3146                 wake_up(&scst_init_cmd_list_waitQ);
3147         }
3148
3149         if (call_dev_task_mgmt_fn && (cmd->tgt_dev != NULL)) {
3150                 EXTRACHECKS_BUG_ON(irqs_disabled());
3151                 scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 1);
3152         }
3153
3154         if (mcmd) {
3155                 unsigned long flags;
3156                 /*
3157                  * Delay the response until the command's finish in
3158                  * order to guarantee that "no further responses from
3159                  * the task are sent to the SCSI initiator port" after
3160                  * response from the TM function is sent (SAM). Plus,
3161                  * we must wait here to be sure that we won't receive
3162                  * double commands with the same tag.
3163                  */
3164                 TRACE(TRACE_MGMT, "cmd %p (tag %lld) being executed/"
3165                         "xmitted (state %d), deferring ABORT...", cmd,
3166                         cmd->tag, cmd->state);
3167 #ifdef EXTRACHECKS
3168                 if (cmd->mgmt_cmnd) {
3169                         printk(KERN_ALERT "cmd %p (tag %lld, state %d) "
3170                                 "has non-NULL mgmt_cmnd %p!!! Current "
3171                                 "mcmd %p\n", cmd, cmd->tag, cmd->state,
3172                                 cmd->mgmt_cmnd, mcmd);
3173                 }
3174 #endif
3175                 sBUG_ON(cmd->mgmt_cmnd);
3176                 spin_lock_irqsave(&scst_mcmd_lock, flags);
3177                 mcmd->cmd_wait_count++;
3178                 spin_unlock_irqrestore(&scst_mcmd_lock, flags);
3179                 /* cmd can't die here or sess_list_lock already taken */
3180                 cmd->mgmt_cmnd = mcmd;
3181         }
3182
3183         tm_dbg_release_cmd(cmd);
3184
3185         TRACE_EXIT();
3186         return;
3187 }
3188
3189 /* No locks */
3190 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
3191 {
3192         int res;
3193         spin_lock_irq(&scst_mcmd_lock);
3194         if (mcmd->cmd_wait_count != 0) {
3195                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
3196                         "wait", mcmd->cmd_wait_count);
3197                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
3198                 res = -1;
3199         } else {
3200                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3201                 res = 0;
3202         }
3203         mcmd->completed = 1;
3204         spin_unlock_irq(&scst_mcmd_lock);
3205         return res;
3206 }
3207
3208 static int __scst_check_unblock_aborted_cmd(struct scst_cmd *cmd)
3209 {
3210         int res;
3211         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3212                 TRACE_MGMT_DBG("Adding aborted blocked cmd %p to active cmd "
3213                         "list", cmd);
3214                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3215                 list_add_tail(&cmd->cmd_list_entry,
3216                         &cmd->cmd_lists->active_cmd_list);
3217                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3218                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3219                 res = 1;
3220         } else
3221                 res = 0;
3222         return res;
3223 }
3224
3225 static void scst_unblock_aborted_cmds(int scst_mutex_held)
3226 {
3227         struct scst_device *dev;
3228
3229         TRACE_ENTRY();
3230
3231         if (!scst_mutex_held)
3232                 down(&scst_mutex);
3233
3234         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3235                 struct scst_cmd *cmd, *tcmd;
3236                 struct scst_tgt_dev *tgt_dev;
3237                 spin_lock_bh(&dev->dev_lock);
3238                 local_irq_disable();
3239                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3240                                         blocked_cmd_list_entry) {
3241                         if (__scst_check_unblock_aborted_cmd(cmd))
3242                                 list_del(&cmd->blocked_cmd_list_entry);
3243                 }
3244                 local_irq_enable();
3245                 spin_unlock_bh(&dev->dev_lock);
3246
3247                 local_irq_disable();
3248                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3249                                          dev_tgt_dev_list_entry) {
3250                         spin_lock(&tgt_dev->sn_lock);
3251                         list_for_each_entry_safe(cmd, tcmd,
3252                                         &tgt_dev->deferred_cmd_list,
3253                                         sn_cmd_list_entry) {
3254                                 if (__scst_check_unblock_aborted_cmd(cmd)) {
3255                                         TRACE_MGMT_DBG("Deleting aborted SN "
3256                                                 "cmd %p from SN list", cmd);
3257                                         tgt_dev->def_cmd_count--;
3258                                         list_del(&cmd->sn_cmd_list_entry);
3259                                 }
3260                         }
3261                         spin_unlock(&tgt_dev->sn_lock);
3262                 }
3263                 local_irq_enable();
3264         }
3265
3266         if (!scst_mutex_held)
3267                 up(&scst_mutex);
3268
3269         TRACE_EXIT();
3270         return;
3271 }
3272
3273 /* Returns 0 if the command processing should be continued, <0 otherwise */
3274 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
3275         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
3276 {
3277         struct scst_cmd *cmd;
3278         struct scst_session *sess = tgt_dev->sess;
3279
3280         TRACE_ENTRY();
3281
3282         spin_lock_irq(&sess->sess_list_lock);
3283
3284         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
3285         list_for_each_entry(cmd, &sess->search_cmd_list, 
3286                         search_cmd_list_entry) {
3287                 if ((cmd->tgt_dev == tgt_dev) ||
3288                     ((cmd->tgt_dev == NULL) && 
3289                      (cmd->lun == tgt_dev->lun)))
3290                         scst_abort_cmd(cmd, mcmd, other_ini, 0);
3291         }
3292         spin_unlock_irq(&sess->sess_list_lock);
3293
3294         scst_unblock_aborted_cmds(scst_mutex_held);
3295
3296         TRACE_EXIT();
3297         return;
3298 }
3299
3300 /* Returns 0 if the command processing should be continued, <0 otherwise */
3301 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
3302 {
3303         int res;
3304         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3305         struct scst_device *dev = tgt_dev->dev;
3306
3307         TRACE(TRACE_MGMT, "Aborting task set (lun=%Ld, mcmd=%p)",
3308                 tgt_dev->lun, mcmd);
3309
3310         spin_lock_bh(&dev->dev_lock);
3311         __scst_block_dev(dev);
3312         spin_unlock_bh(&dev->dev_lock);
3313
3314         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
3315         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3316
3317         res = scst_set_mcmd_next_state(mcmd);
3318
3319         TRACE_EXIT_RES(res);
3320         return res;
3321 }
3322
3323 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3324 {
3325         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags) && !mcmd->active) {
3326                 TRACE_MGMT_DBG("Adding mgmt cmd %p to delayed mgmt cmd list",
3327                         mcmd);
3328                 spin_lock_irq(&scst_mcmd_lock);
3329                 list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3330                         &scst_delayed_mgmt_cmd_list);
3331                 spin_unlock_irq(&scst_mcmd_lock);
3332                 return -1;
3333         } else {
3334                 mcmd->active = 1;
3335                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3336                 return 0;
3337         }
3338 }
3339
3340 /* Returns 0 if the command processing should be continued, 
3341  * >0, if it should be requeued, <0 otherwise */
3342 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3343 {
3344         int res = 0;
3345
3346         TRACE_ENTRY();
3347
3348         res = scst_check_delay_mgmt_cmd(mcmd);
3349         if (res != 0)
3350                 goto out;
3351
3352         if (mcmd->fn == SCST_ABORT_TASK) {
3353                 struct scst_session *sess = mcmd->sess;
3354                 struct scst_cmd *cmd;
3355
3356                 spin_lock_irq(&sess->sess_list_lock);
3357                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3358                 if (cmd == NULL) {
3359                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3360                                 "tag %lld not found", mcmd->tag);
3361                         mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
3362                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3363                         spin_unlock_irq(&sess->sess_list_lock);
3364                         goto out;
3365                 }
3366                 scst_cmd_get(cmd);
3367                 spin_unlock_irq(&sess->sess_list_lock);
3368                 TRACE(TRACE_MGMT, "Cmd %p for tag %lld (sn %ld) found, "
3369                         "aborting it", cmd, mcmd->tag, cmd->sn);
3370                 mcmd->cmd_to_abort = cmd;
3371                 scst_abort_cmd(cmd, mcmd, 0, 1);
3372                 scst_unblock_aborted_cmds(0);
3373                 res = scst_set_mcmd_next_state(mcmd);
3374                 mcmd->cmd_to_abort = NULL; /* just in case */
3375                 scst_cmd_put(cmd);
3376         } else {
3377                 int rc;
3378                 rc = scst_mgmt_translate_lun(mcmd);
3379                 if (rc < 0) {
3380                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3381                                 "found", (uint64_t)mcmd->lun);
3382                         mcmd->status = SCST_MGMT_STATUS_LUN_NOT_EXIST;
3383                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3384                 } else if (rc == 0)
3385                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
3386                 else
3387                         res = rc;
3388         }
3389
3390 out:
3391         TRACE_EXIT_RES(res);
3392         return res;
3393 }
3394
3395 /* Returns 0 if the command processing should be continued, <0 otherwise */
3396 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3397 {
3398         int res, rc;
3399         struct scst_device *dev, *d;
3400         struct scst_tgt_dev *tgt_dev;
3401         int cont, c;
3402         LIST_HEAD(host_devs);
3403
3404         TRACE_ENTRY();
3405
3406         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3407                 mcmd, atomic_read(&mcmd->sess->sess_cmd_count));
3408
3409         down(&scst_mutex);
3410
3411         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3412                 int found = 0;
3413
3414                 spin_lock_bh(&dev->dev_lock);
3415                 __scst_block_dev(dev);
3416                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3417                 spin_unlock_bh(&dev->dev_lock);
3418
3419                 cont = 0;
3420                 c = 0;
3421                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3422                         dev_tgt_dev_list_entry) 
3423                 {
3424                         cont = 1;
3425                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3426                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3427                                 c = 1;
3428                         else if ((rc < 0) &&
3429                                  (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
3430                                 mcmd->status = rc;
3431                 }
3432                 if (cont && !c)
3433                         continue;
3434                 
3435                 if (dev->scsi_dev == NULL)
3436                         continue;
3437
3438                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3439                         if (dev->scsi_dev->host->host_no ==
3440                                     d->scsi_dev->host->host_no) 
3441                         {
3442                                 found = 1;
3443                                 break;
3444                         }
3445                 }
3446                 if (!found)
3447                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3448         }
3449
3450         /*
3451          * We suppose here that for all commands that already on devices
3452          * on/after scsi_reset_provider() completion callbacks will be called.
3453          */
3454
3455         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3456                 /* dev->scsi_dev must be non-NULL here */
3457                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3458                       dev->scsi_dev->host->host_no);
3459                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3460                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3461                       dev->scsi_dev->host->host_no,
3462                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3463                 if ((rc != SUCCESS) &&
3464                     (mcmd->status == SCST_MGMT_STATUS_SUCCESS)) {
3465                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3466                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3467                 }
3468         }
3469
3470         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3471                 if (dev->scsi_dev != NULL)
3472                         dev->scsi_dev->was_reset = 0;
3473         }
3474
3475         up(&scst_mutex);
3476
3477         tm_dbg_task_mgmt("TARGET RESET", 0);
3478         res = scst_set_mcmd_next_state(mcmd);
3479
3480         TRACE_EXIT_RES(res);
3481         return res;
3482 }
3483
3484 /* Returns 0 if the command processing should be continued, <0 otherwise */
3485 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3486 {
3487         int res, rc;
3488         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3489         struct scst_device *dev = tgt_dev->dev;
3490
3491         TRACE_ENTRY();
3492
3493         TRACE(TRACE_MGMT, "Resetting lun %Ld (mcmd %p)", tgt_dev->lun, mcmd);
3494
3495         spin_lock_bh(&dev->dev_lock);
3496         __scst_block_dev(dev);
3497         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3498         spin_unlock_bh(&dev->dev_lock);
3499
3500         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3501         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3502                 goto out_tm_dbg;
3503
3504         if (dev->scsi_dev != NULL) {
3505                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3506                       dev->scsi_dev->host->host_no);
3507                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3508                 if ((rc != SUCCESS) && (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
3509                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3510                 dev->scsi_dev->was_reset = 0;
3511         }
3512
3513 out_tm_dbg:
3514         tm_dbg_task_mgmt("LUN RESET", 0);
3515         res = scst_set_mcmd_next_state(mcmd);
3516
3517         TRACE_EXIT_RES(res);
3518         return res;
3519 }
3520
3521 /* Returns 0 if the command processing should be continued, <0 otherwise */
3522 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3523         int nexus_loss)
3524 {
3525         int res;
3526         int i;
3527         struct scst_session *sess = mcmd->sess;
3528         struct scst_tgt_dev *tgt_dev;
3529
3530         TRACE_ENTRY();
3531
3532         if (nexus_loss) {
3533                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3534                         mcmd);
3535         } else {
3536                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3537                         mcmd);
3538         }
3539
3540         down(&scst_mutex);
3541         for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
3542                 struct list_head *sess_tgt_dev_list_head =
3543                         &sess->sess_tgt_dev_list_hash[i];
3544                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3545                                 sess_tgt_dev_list_entry) {
3546                         struct scst_device *dev = tgt_dev->dev;
3547                         int rc;
3548         
3549                         spin_lock_bh(&dev->dev_lock);
3550