2f995e0bdd1ca79701fed9865ff903bdb62e899d
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28
29 #include "scst_debug.h"
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static int scst_do_job_init(struct list_head *init_cmd_list);
34
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
36         int left_locked);
37
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39         struct scst_mgmt_cmd *mcmd);
40
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43         unsigned long *pflags, int left_locked)
44 {
45         int res;
46
47         TRACE_ENTRY();
48
49         TRACE_DBG("Moving cmd %p to cmd list", cmd);
50         list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
51
52         /* This is an inline func., so unneeded code will be optimized out */
53         if (pflags)
54                 spin_unlock_irqrestore(&scst_list_lock, *pflags);
55         else
56                 spin_unlock_irq(&scst_list_lock);
57
58         res = __scst_process_active_cmd(cmd, context, left_locked);
59
60         TRACE_EXIT_RES(res);
61         return res;
62 }
63
64 static inline void scst_schedule_tasklet(void)
65 {
66         struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
67
68 #if 0 /* Looks like #else is better for performance */
69         if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
70                 tasklet_schedule(t);
71         else {
72                 /* 
73                  * We suppose that other CPU(s) are rather idle, so we
74                  * ask one of them to help
75                  */
76                 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77                         "instead", smp_processor_id());
78                 wake_up(&scst_list_waitQ);
79         }
80 #else
81         tasklet_schedule(t);
82 #endif
83 }
84
85 /* 
86  * Must not been called in parallel with scst_unregister_session() for the 
87  * same sess
88  */
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90                              const uint8_t *lun, int lun_len,
91                              const uint8_t *cdb, int cdb_len, int atomic)
92 {
93         struct scst_cmd *cmd;
94
95         TRACE_ENTRY();
96
97 #ifdef EXTRACHECKS
98         if (unlikely(sess->shutting_down)) {
99                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
100                 BUG();
101         }
102 #endif
103
104         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
105         if (cmd == NULL)
106                 goto out;
107
108         cmd->sess = sess;
109         cmd->tgt = sess->tgt;
110         cmd->tgtt = sess->tgt->tgtt;
111         cmd->state = SCST_CMD_STATE_INIT_WAIT;
112
113         /* 
114          * For both wrong lun and CDB defer the error reporting for
115          * scst_cmd_init_done()
116          */
117
118         cmd->lun = scst_unpack_lun(lun, lun_len);
119
120         if (cdb_len <= MAX_COMMAND_SIZE) {
121                 memcpy(cmd->cdb, cdb, cdb_len);
122                 cmd->cdb_len = cdb_len;
123         }
124
125         TRACE_DBG("cmd %p, sess %p", cmd, sess);
126         scst_sess_get(sess);
127
128 out:
129         TRACE_EXIT();
130         return cmd;
131 }
132
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
134 {
135         int res = 0;
136         unsigned long flags = 0;
137         struct scst_session *sess = cmd->sess;
138
139         TRACE_ENTRY();
140
141         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142         TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag, 
143                 (uint64_t)cmd->lun, cmd->cdb_len);
144         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145                 cmd->cdb, cmd->cdb_len);
146
147         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
149         {
150                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
152                         cmd->tgtt->name);
153                 pref_context = SCST_CONTEXT_TASKLET;
154         }
155
156         spin_lock_irqsave(&scst_list_lock, flags);
157
158         /* Let's make it here, this will save us a lock or atomic */
159         sess->sess_cmd_count++;
160
161         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
162
163         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164                 switch(sess->init_phase) {
165                 case SCST_SESS_IPH_SUCCESS:
166                         break;
167                 case SCST_SESS_IPH_INITING:
168                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169                         list_add_tail(&cmd->cmd_list_entry, 
170                                 &sess->init_deferred_cmd_list);
171                         goto out_unlock_flags;
172                 case SCST_SESS_IPH_FAILED:
173                         scst_set_busy(cmd);
174                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
175                         TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176                         list_add_tail(&cmd->cmd_list_entry, 
177                                 &scst_active_cmd_list);
178                         goto active;
179                 default:
180                         BUG();
181                 }
182         }
183
184         if (unlikely(cmd->lun == (lun_t)-1)) {
185                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186                 scst_set_cmd_error(cmd,
187                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
191                 goto active;
192         }
193
194         if (unlikely(cmd->cdb_len == 0)) {
195                 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196                 scst_set_cmd_error(cmd,
197                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
201                 goto active;
202         }
203
204         cmd->state = SCST_CMD_STATE_INIT;
205
206         TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207         list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
208
209         switch (pref_context) {
210         case SCST_CONTEXT_DIRECT:
211         case SCST_CONTEXT_DIRECT_ATOMIC:
212                 res = scst_do_job_init(&scst_init_cmd_list);
213                 if (res > 0)
214                         goto out_unlock_flags;
215                 break;
216
217         case SCST_CONTEXT_THREAD:
218                 goto out_thread_unlock_flags;
219
220         case SCST_CONTEXT_TASKLET:
221                 scst_schedule_tasklet();
222                 goto out_unlock_flags;
223
224         default:
225                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
226                             pref_context);
227                 goto out_thread_unlock_flags;
228         }
229
230 active:
231         switch (pref_context) {
232         case SCST_CONTEXT_DIRECT:
233         case SCST_CONTEXT_DIRECT_ATOMIC:
234                 scst_process_active_cmd(cmd, pref_context, &flags, 0);
235                 break;
236
237         case SCST_CONTEXT_THREAD:
238                 goto out_thread_unlock_flags;
239
240         case SCST_CONTEXT_TASKLET:
241                 scst_schedule_tasklet();
242                 goto out_unlock_flags;
243
244         default:
245                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
246                             pref_context);
247                 goto out_thread_unlock_flags;
248         }
249
250 out:
251         TRACE_EXIT();
252         return;
253
254 out_unlock_flags:
255         spin_unlock_irqrestore(&scst_list_lock, flags);
256         goto out;
257
258 out_thread_unlock_flags:
259         cmd->non_atomic_only = 1;
260         spin_unlock_irqrestore(&scst_list_lock, flags);
261         wake_up(&scst_list_waitQ);
262         goto out;
263 }
264
265 static int scst_parse_cmd(struct scst_cmd *cmd)
266 {
267         int res = SCST_CMD_STATE_RES_CONT_SAME;
268         int state;
269         struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270         struct scst_device *dev = cmd->dev;
271         struct scst_info_cdb cdb_info;
272         int atomic = scst_cmd_atomic(cmd);
273         int set_dir = 1;
274
275         TRACE_ENTRY();
276
277         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278                 TRACE_DBG("ABORTED set, returning ABORTED "
279                         "for cmd %p", cmd);
280                 goto out_xmit;
281         }
282
283         if (atomic && !dev->handler->parse_atomic) {
284                 TRACE_DBG("Dev handler %s parse() can not be "
285                       "called in atomic context, rescheduling to the thread",
286                       dev->handler->name);
287                 res = SCST_CMD_STATE_RES_NEED_THREAD;
288                 goto out;
289         }
290
291         /*
292          * Expected transfer data supplied by the SCSI transport via the
293          * target driver are untrusted, so we prefer to fetch them from CDB.
294          * Additionally, not all transports support supplying the expected
295          * transfer data.
296          */
297
298         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
299                         &cdb_info) != 0)) 
300         {
301                 static int t;
302                 if (t < 10) {
303                         t++;
304                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305                                 "Should you update scst_scsi_op_table?",
306                                 cmd->cdb[0], dev->handler->name);
307                 }
308                 if (scst_cmd_is_expected_set(cmd)) {
309                         TRACE(TRACE_SCSI, "Using initiator supplied values: "
310                                 "direction %d, transfer_len %d",
311                                 cmd->expected_data_direction,
312                                 cmd->expected_transfer_len);
313                         cmd->data_direction = cmd->expected_data_direction;
314                         cmd->bufflen = cmd->expected_transfer_len;
315                         /* Restore (most probably) lost CDB length */
316                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317                         if (cmd->cdb_len == -1) {
318                                 PRINT_ERROR_PR("Unable to get CDB length for "
319                                         "opcode 0x%02x. Returning INVALID "
320                                         "OPCODE", cmd->cdb[0]);
321                                 scst_set_cmd_error(cmd,
322                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
323                                 goto out_xmit;
324                         }
325                 }
326                 else {
327                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328                              "target %s not supplied expected values. "
329                              "Returning INVALID OPCODE.", cmd->cdb[0], 
330                              dev->handler->name, cmd->tgtt->name);
331                         scst_set_cmd_error(cmd,
332                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
333                         goto out_xmit;
334                 }
335         } else {
336                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337                         "set %s), transfer_len=%d (expected len %d), flags=%d",
338                         cdb_info.op_name, cdb_info.direction,
339                         cmd->expected_data_direction,
340                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341                         cdb_info.transfer_len, cmd->expected_transfer_len,
342                         cdb_info.flags);
343
344                 /* Restore (most probably) lost CDB length */
345                 cmd->cdb_len = cdb_info.cdb_len;
346
347                 cmd->data_direction = cdb_info.direction;
348                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349                         cmd->bufflen = cdb_info.transfer_len;
350                 /* else cmd->bufflen remained as it was inited in 0 */
351         }
352
353         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355                             "(opcode 0x%02x)", cmd->cdb[0]);
356                 scst_set_cmd_error(cmd,
357                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
358                 goto out_xmit;
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362                 PRINT_ERROR_PR("Linked commands are not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (likely(!scst_is_cmd_local(cmd))) {
370                 TRACE_DBG("Calling dev handler %s parse(%p)",
371                       dev->handler->name, cmd);
372                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373                 state = dev->handler->parse(cmd, &cdb_info);
374                 TRACE_DBG("Dev handler %s parse() returned %d",
375                         dev->handler->name, state);
376
377                 if (state == SCST_CMD_STATE_DEFAULT)
378                         state = SCST_CMD_STATE_PREPARE_SPACE;
379         }
380         else
381                 state = SCST_CMD_STATE_PREPARE_SPACE;
382
383         if (scst_cmd_is_expected_set(cmd)) {
384                 if (cmd->expected_transfer_len < cmd->bufflen) {
385                         TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
386                                 "cmd->bufflen(%d), using expected_transfer_len "
387                                 "instead", cmd->expected_transfer_len,
388                                 cmd->bufflen);
389                         cmd->bufflen = cmd->expected_transfer_len;
390                 }
391         }
392
393         if (cmd->data_len == -1)
394                 cmd->data_len = cmd->bufflen;
395
396 #ifdef EXTRACHECKS
397         if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
398                 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
399                         (state != SCST_CMD_STATE_DEV_PARSE)) ||
400                     ((cmd->bufflen != 0) && 
401                         (cmd->data_direction == SCST_DATA_NONE)) ||
402                     ((cmd->bufflen == 0) && 
403                         (cmd->data_direction != SCST_DATA_NONE)) ||
404                     ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
405                         (state > SCST_CMD_STATE_PREPARE_SPACE))) 
406                 {
407                         PRINT_ERROR_PR("Dev handler %s parse() returned "
408                                        "invalid cmd data_direction %d, "
409                                        "bufflen %zd or state %d (opcode 0x%x)",
410                                        dev->handler->name, 
411                                        cmd->data_direction, cmd->bufflen,
412                                        state, cmd->cdb[0]);
413                         goto out_error;
414                 }
415         }
416 #endif
417
418         switch (state) {
419         case SCST_CMD_STATE_PREPARE_SPACE:
420         case SCST_CMD_STATE_DEV_PARSE:
421         case SCST_CMD_STATE_RDY_TO_XFER:
422         case SCST_CMD_STATE_SEND_TO_MIDLEV:
423         case SCST_CMD_STATE_DEV_DONE:
424         case SCST_CMD_STATE_XMIT_RESP:
425         case SCST_CMD_STATE_FINISHED:
426                 cmd->state = state;
427                 res = SCST_CMD_STATE_RES_CONT_SAME;
428                 break;
429
430         case SCST_CMD_STATE_REINIT:
431                 cmd->tgt_dev_saved = tgt_dev_saved;
432                 cmd->state = state;
433                 res = SCST_CMD_STATE_RES_RESTART;
434                 set_dir = 0;
435                 break;
436
437         case SCST_CMD_STATE_NEED_THREAD_CTX:
438                 TRACE_DBG("Dev handler %s parse() requested thread "
439                       "context, rescheduling", dev->handler->name);
440                 res = SCST_CMD_STATE_RES_NEED_THREAD;
441                 set_dir = 0;
442                 break;
443
444         default:
445                 if (state >= 0) {
446                         PRINT_ERROR_PR("Dev handler %s parse() returned "
447                              "invalid cmd state %d (opcode %d)", 
448                              dev->handler->name, state, cmd->cdb[0]);
449                 } else {
450                         PRINT_ERROR_PR("Dev handler %s parse() returned "
451                                 "error %d (opcode %d)", dev->handler->name, 
452                                 state, cmd->cdb[0]);
453                 }
454                 goto out_error;
455         }
456
457         if ((cmd->resp_data_len == -1) && set_dir) {
458                 if (cmd->data_direction == SCST_DATA_READ)
459                         cmd->resp_data_len = cmd->bufflen;
460                 else
461                          cmd->resp_data_len = 0;
462         }
463         
464 out:
465         TRACE_EXIT_HRES(res);
466         return res;
467
468 out_error:
469         /* dev_done() will be called as part of the regular cmd's finish */
470         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
471         cmd->state = SCST_CMD_STATE_DEV_DONE;
472         res = SCST_CMD_STATE_RES_CONT_SAME;
473         goto out;
474
475 out_xmit:
476         cmd->state = SCST_CMD_STATE_XMIT_RESP;
477         res = SCST_CMD_STATE_RES_CONT_SAME;
478         goto out;
479 }
480
481 void scst_cmd_mem_work_fn(void *p)
482 {
483         TRACE_ENTRY();
484
485         spin_lock_bh(&scst_cmd_mem_lock);
486
487         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
488         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
489                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
490                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
491         } else {
492                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
493                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
494         }
495         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
496
497         spin_unlock_bh(&scst_cmd_mem_lock);
498
499         TRACE_EXIT();
500         return;
501 }
502
503 int scst_check_mem(struct scst_cmd *cmd)
504 {
505         int res = 0;
506
507         TRACE_ENTRY();
508
509         if (cmd->mem_checked)
510                 goto out;
511
512         spin_lock_bh(&scst_cmd_mem_lock);
513
514         scst_cur_cmd_mem += cmd->bufflen;
515         cmd->mem_checked = 1;
516         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
517                 goto out_unlock;
518
519         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
520                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
521                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
522                 (cmd->sess->initiator_name[0] == '\0') ?
523                   "Anonymous" : cmd->sess->initiator_name,
524                 scst_cur_max_cmd_mem >> 10);
525
526         scst_cur_cmd_mem -= cmd->bufflen;
527         cmd->mem_checked = 0;
528         scst_set_busy(cmd);
529         cmd->state = SCST_CMD_STATE_XMIT_RESP;
530         res = 1;
531
532 out_unlock:
533         spin_unlock_bh(&scst_cmd_mem_lock);
534
535 out:
536         TRACE_EXIT_RES(res);
537         return res;
538 }
539
540 static void scst_low_cur_max_cmd_mem(void)
541 {
542         TRACE_ENTRY();
543
544         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
545                 cancel_delayed_work(&scst_cmd_mem_work);
546                 flush_scheduled_work();
547                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
548         }
549
550         spin_lock_bh(&scst_cmd_mem_lock);
551
552         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
553                                 (scst_cur_cmd_mem >> 2);
554         if (scst_cur_max_cmd_mem < 16*1024*1024)
555                 scst_cur_max_cmd_mem = 16*1024*1024;
556
557         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
558                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
559                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
560                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
561         }
562
563         spin_unlock_bh(&scst_cmd_mem_lock);
564
565         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
566
567         TRACE_EXIT();
568         return;
569 }
570
571 static int scst_prepare_space(struct scst_cmd *cmd)
572 {
573         int r, res = SCST_CMD_STATE_RES_CONT_SAME;
574
575         TRACE_ENTRY();
576
577         if (cmd->data_direction == SCST_DATA_NONE) {
578                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
579                 goto out;
580         }
581
582         r = scst_check_mem(cmd);
583         if (unlikely(r != 0))
584                 goto out;
585
586         if (cmd->data_buf_tgt_alloc) {
587                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
588                 r = cmd->tgtt->alloc_data_buf(cmd);
589                 cmd->data_buf_alloced = (r == 0);
590         } else
591                 r = scst_alloc_space(cmd);
592
593         if (r != 0) {
594                 if (scst_cmd_atomic(cmd)) {
595                         TRACE_MEM("%s", "Atomic memory allocation failed, "
596                               "rescheduling to the thread");
597                         res = SCST_CMD_STATE_RES_NEED_THREAD;
598                         goto out;
599                 } else
600                         goto out_no_space;
601         }
602
603         switch (cmd->data_direction) {
604         case SCST_DATA_WRITE:
605                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
606                 break;
607
608         default:
609                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
610                 break;
611         }
612
613 out:
614         TRACE_EXIT_HRES(res);
615         return res;
616
617 out_no_space:
618         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
619                 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
620         scst_low_cur_max_cmd_mem();
621         scst_set_busy(cmd);
622         cmd->state = SCST_CMD_STATE_DEV_DONE;
623         res = SCST_CMD_STATE_RES_CONT_SAME;
624         goto out;
625 }
626
627 /* No locks */
628 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
629 {
630         struct scst_tgt *tgt = cmd->sess->tgt;
631         int res = 0;
632         unsigned long flags;
633
634         TRACE_ENTRY();
635
636         spin_lock_irqsave(&tgt->tgt_lock, flags);
637         tgt->retry_cmds++;
638         smp_mb();
639         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
640               tgt->retry_cmds);
641         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
642                 /* At least one cmd finished, so try again */
643                 tgt->retry_cmds--;
644                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
645                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
646                       "retry_cmds=%d)", finished_cmds,
647                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
648                 res = -1;
649                 goto out_unlock_tgt;
650         }
651
652         TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
653         /* IRQ already off */
654         spin_lock(&scst_list_lock);
655         list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
656         spin_unlock(&scst_list_lock);
657
658         if (!tgt->retry_timer_active) {
659                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
660                 add_timer(&tgt->retry_timer);
661                 tgt->retry_timer_active = 1;
662         }
663
664 out_unlock_tgt:
665         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
666
667         TRACE_EXIT_RES(res);
668         return res;
669 }
670
671 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
672 {
673         int res, rc;
674         int atomic = scst_cmd_atomic(cmd);
675
676         TRACE_ENTRY();
677
678         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
679         {
680                 TRACE_DBG("ABORTED set, returning ABORTED for "
681                         "cmd %p", cmd);
682                 goto out_dev_done;
683         }
684
685         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
686                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
687                       "called in atomic context, rescheduling to the thread");
688                 res = SCST_CMD_STATE_RES_NEED_THREAD;
689                 goto out;
690         }
691
692         while (1) {
693                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
694
695                 res = SCST_CMD_STATE_RES_CONT_NEXT;
696                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
697
698                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
699 #ifdef DEBUG_RETRY
700                 if (((scst_random() % 100) == 75))
701                         rc = SCST_TGT_RES_QUEUE_FULL;
702                 else
703 #endif
704                         rc = cmd->tgtt->rdy_to_xfer(cmd);
705                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
706
707                 if (likely(rc == SCST_TGT_RES_SUCCESS))
708                         goto out;
709
710                 /* Restore the previous state */
711                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
712
713                 switch (rc) {
714                 case SCST_TGT_RES_QUEUE_FULL:
715                 {
716                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
717                                 break;
718                         else
719                                 continue;
720                 }
721
722                 case SCST_TGT_RES_NEED_THREAD_CTX:
723                 {
724                         TRACE_DBG("Target driver %s "
725                               "rdy_to_xfer() requested thread "
726                               "context, rescheduling", cmd->tgtt->name);
727                         res = SCST_CMD_STATE_RES_NEED_THREAD;
728                         break;
729                 }
730
731                 default:
732                         goto out_error_rc;
733                 }
734                 break;
735         }
736
737 out:
738         TRACE_EXIT_HRES(res);
739         return res;
740
741 out_error_rc:
742         if (rc == SCST_TGT_RES_FATAL_ERROR) {
743                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
744                      "fatal error", cmd->tgtt->name);
745         } else {
746                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
747                             "value %d", cmd->tgtt->name, rc);
748         }
749         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
750
751 out_dev_done:
752         cmd->state = SCST_CMD_STATE_DEV_DONE;
753         res = SCST_CMD_STATE_RES_CONT_SAME;
754         goto out;
755 }
756
757 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
758         int check_retries)
759 {
760         unsigned long flags;
761         int rc;
762
763         TRACE_ENTRY();
764
765         TRACE_DBG("Context: %d", context);
766
767         switch(context) {
768         case SCST_CONTEXT_DIRECT:
769         case SCST_CONTEXT_DIRECT_ATOMIC:
770                 if (check_retries)
771                         scst_check_retries(cmd->tgt, 0);
772                 cmd->non_atomic_only = 0;
773                 rc = __scst_process_active_cmd(cmd, context, 0);
774                 if (rc == SCST_CMD_STATE_RES_NEED_THREAD)
775                         goto out_thread;
776                 break;
777
778         default:
779                 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
780                             context);
781                 /* go through */
782         case SCST_CONTEXT_THREAD:
783                 if (check_retries)
784                         scst_check_retries(cmd->tgt, 1);
785                 goto out_thread;
786
787         case SCST_CONTEXT_TASKLET:
788                 if (check_retries)
789                         scst_check_retries(cmd->tgt, 1);
790                 cmd->non_atomic_only = 0;
791                 spin_lock_irqsave(&scst_list_lock, flags);
792                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
793                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
794                 spin_unlock_irqrestore(&scst_list_lock, flags);
795                 scst_schedule_tasklet();
796                 break;
797         }
798 out:
799         TRACE_EXIT();
800         return;
801
802 out_thread:
803         cmd->non_atomic_only = 1;
804         spin_lock_irqsave(&scst_list_lock, flags);
805         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
806         list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
807         spin_unlock_irqrestore(&scst_list_lock, flags);
808         wake_up(&scst_list_waitQ);
809         goto out;
810 }
811
812 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
813 {
814         TRACE_ENTRY();
815
816         TRACE_DBG("Preferred context: %d", pref_context);
817         TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
818         cmd->non_atomic_only = 0;
819
820         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
821                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
822         {
823                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
824                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
825                         cmd->tgtt->name);
826                 pref_context = SCST_CONTEXT_TASKLET;
827         }
828
829         switch (status) {
830         case SCST_RX_STATUS_SUCCESS:
831                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
832                 break;
833
834         case SCST_RX_STATUS_ERROR_SENSE_SET:
835                 cmd->state = SCST_CMD_STATE_DEV_DONE;
836                 break;
837
838         case SCST_RX_STATUS_ERROR_FATAL:
839                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
840                 /* go through */
841         case SCST_RX_STATUS_ERROR:
842                 scst_set_cmd_error(cmd,
843                            SCST_LOAD_SENSE(scst_sense_hardw_error));
844                 cmd->state = SCST_CMD_STATE_DEV_DONE;
845                 break;
846
847         default:
848                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
849                         status);
850                 cmd->state = SCST_CMD_STATE_DEV_DONE;
851                 break;
852         }
853
854         scst_proccess_redirect_cmd(cmd, pref_context, 1);
855
856         TRACE_EXIT();
857         return;
858 }
859
860 /* No locks supposed to be held */
861 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
862         int rq_sense_len, int *next_state)
863 {
864         int sense_valid;
865         struct scst_device *dev = cmd->dev;
866         int dbl_ua_possible, ua_sent = 0;
867
868         TRACE_ENTRY();
869
870         /* If we had a internal bus reset behind us, set the command error UA */
871         if ((dev->scsi_dev != NULL) &&
872             unlikely(cmd->host_status == DID_RESET) &&
873             scst_is_ua_command(cmd))
874         {
875                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
876                       dev->scsi_dev->was_reset, cmd->host_status);
877                 scst_set_cmd_error(cmd,
878                    SCST_LOAD_SENSE(scst_sense_reset_UA));
879                 /* just in case */
880                 cmd->ua_ignore = 0;
881                 /* It looks like it is safe to clear was_reset here */
882                 dev->scsi_dev->was_reset = 0;
883                 smp_mb();
884         }
885
886         if (rq_sense != NULL) {
887                 sense_valid = SCST_SENSE_VALID(rq_sense);
888                 if (sense_valid) {
889                         memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
890                         /* 
891                          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
892                          * in init_scst()
893                          */
894                         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
895                 }
896         } else
897                 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
898
899         dbl_ua_possible = dev->dev_double_ua_possible;
900         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
901         if (unlikely(dbl_ua_possible)) {
902                 spin_lock_bh(&dev->dev_lock);
903                 barrier(); /* to reread dev_double_ua_possible */
904                 dbl_ua_possible = dev->dev_double_ua_possible;
905                 if (dbl_ua_possible)
906                         ua_sent = dev->dev_reset_ua_sent;
907                 else
908                         spin_unlock_bh(&dev->dev_lock);
909         }
910
911         if (sense_valid) {
912                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
913                              sizeof(cmd->sense_buffer));
914                 /* Check Unit Attention Sense Key */
915                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
916                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
917                                 if (dbl_ua_possible) 
918                                 {
919                                         if (ua_sent) {
920                                                 TRACE(TRACE_MGMT, "%s", 
921                                                         "Double UA detected");
922                                                 /* Do retry */
923                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
924                                                         "(tag %d)", cmd, cmd->tag);
925                                                 cmd->status = 0;
926                                                 cmd->masked_status = 0;
927                                                 cmd->msg_status = 0;
928                                                 cmd->host_status = DID_OK;
929                                                 cmd->driver_status = 0;
930                                                 memset(cmd->sense_buffer, 0,
931                                                         sizeof(cmd->sense_buffer));
932                                                 cmd->retry = 1;
933                                                 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
934                                                 /* 
935                                                  * Dev is still blocked by this cmd, so
936                                                  * it's OK to clear SCST_DEV_SERIALIZED
937                                                  * here.
938                                                  */
939                                                 dev->dev_double_ua_possible = 0;
940                                                 dev->dev_serialized = 0;
941                                                 dev->dev_reset_ua_sent = 0;
942                                                 goto out_unlock;
943                                         } else
944                                                 dev->dev_reset_ua_sent = 1;
945                                 }
946                         }
947                         if (cmd->ua_ignore == 0) {
948                                 if (unlikely(dbl_ua_possible)) {
949                                         __scst_process_UA(dev, cmd,
950                                                 cmd->sense_buffer,
951                                                 sizeof(cmd->sense_buffer), 0);
952                                 } else {
953                                         scst_process_UA(dev, cmd,
954                                                 cmd->sense_buffer,
955                                                 sizeof(cmd->sense_buffer), 0);
956                                 }
957                         }
958                 }
959         }
960
961         if (unlikely(dbl_ua_possible)) {
962                 if (ua_sent && scst_is_ua_command(cmd)) {
963                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
964                         dev->dev_double_ua_possible = 0;
965                         dev->dev_serialized = 0;
966                         dev->dev_reset_ua_sent = 0;
967                 }
968                 spin_unlock_bh(&dev->dev_lock);
969         }
970
971 out:
972         TRACE_EXIT();
973         return;
974
975 out_unlock:
976         spin_unlock_bh(&dev->dev_lock);
977         goto out;
978 }
979
980 static int scst_check_auto_sense(struct scst_cmd *cmd)
981 {
982         int res = 0;
983
984         TRACE_ENTRY();
985
986         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
987             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
988              SCST_NO_SENSE(cmd->sense_buffer)))
989         {
990                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
991                       "cmd->status=%x, cmd->masked_status=%x, "
992                       "cmd->msg_status=%x, cmd->host_status=%x, "
993                       "cmd->driver_status=%x", cmd->status, cmd->masked_status, 
994                       cmd->msg_status, cmd->host_status, cmd->driver_status);
995                 res = 1;
996         } else if (unlikely(cmd->host_status)) {
997                 if ((cmd->host_status == DID_REQUEUE) ||
998                     (cmd->host_status == DID_IMM_RETRY) ||
999                     (cmd->host_status == DID_SOFT_ERROR)) {
1000                         scst_set_busy(cmd);
1001                 } else {
1002                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
1003                                 "received, returning HARDWARE ERROR instead",
1004                                 cmd->host_status);
1005                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1006                 }
1007         }
1008
1009         TRACE_EXIT_RES(res);
1010         return res;
1011 }
1012
1013 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1014         const uint8_t *rq_sense, int rq_sense_len, int resid,
1015         int *next_state)
1016 {
1017         unsigned char type;
1018
1019         TRACE_ENTRY();
1020
1021         cmd->status = result & 0xff;
1022         cmd->masked_status = status_byte(result);
1023         cmd->msg_status = msg_byte(result);
1024         cmd->host_status = host_byte(result);
1025         cmd->driver_status = driver_byte(result);
1026         if (unlikely(resid != 0)) {
1027 #ifdef EXTRACHECKS
1028                 if ((resid < 0) || (resid >= cmd->resp_data_len)) {
1029                         PRINT_ERROR_PR("Wrong resid %d (cmd->resp_data_len=%d)",
1030                                 resid, cmd->resp_data_len);
1031                 } else
1032 #endif
1033                         scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1034         }
1035
1036         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, resid=%d, "
1037               "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
1038               "cmd->driver_status=%x", result, cmd->status, resid,
1039               cmd->masked_status, cmd->msg_status, cmd->host_status,
1040               cmd->driver_status);
1041
1042         cmd->completed = 1;
1043
1044         scst_dec_on_dev_cmd(cmd);
1045
1046         type = cmd->dev->handler->type;
1047         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1048             cmd->tgt_dev->acg_dev->rd_only_flag &&
1049             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1050              type == TYPE_TAPE)) {
1051                 int32_t length;
1052                 uint8_t *address;
1053
1054                 length = scst_get_buf_first(cmd, &address);
1055                 TRACE_DBG("length %d", length);
1056                 if (unlikely(length <= 0)) {
1057                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1058                                 __func__);
1059                         goto next;
1060                 }
1061                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1062                         address[2] |= 0x80;   /* Write Protect*/
1063                 }
1064                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1065                         address[3] |= 0x80;   /* Write Protect*/
1066                 }
1067                 scst_put_buf(cmd, address);
1068         }
1069
1070 next:
1071         scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1072
1073         TRACE_EXIT();
1074         return;
1075 }
1076
1077 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1078 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1079                                             struct scsi_request **req)
1080 {
1081         struct scst_cmd *cmd = NULL;
1082
1083         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1084                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1085
1086         if (cmd == NULL) {
1087                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1088                 if (*req)
1089                         scsi_release_request(*req);
1090         }
1091
1092         return cmd;
1093 }
1094
1095 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1096 {
1097         struct scsi_request *req = NULL;
1098         struct scst_cmd *cmd;
1099         int next_state;
1100
1101         TRACE_ENTRY();
1102
1103         WARN_ON(in_irq());
1104
1105         cmd = scst_get_cmd(scsi_cmd, &req);
1106         if (cmd == NULL)
1107                 goto out;
1108
1109         next_state = SCST_CMD_STATE_DEV_DONE;
1110         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1111                 sizeof(req->sr_sense_buffer), scsi_cmd->resid, &next_state);
1112
1113         /* Clear out request structure */
1114         req->sr_use_sg = 0;
1115         req->sr_sglist_len = 0;
1116         req->sr_bufflen = 0;
1117         req->sr_buffer = NULL;
1118         req->sr_underflow = 0;
1119         req->sr_request->rq_disk = NULL; /* disown request blk */
1120
1121         cmd->bufflen = req->sr_bufflen; //??
1122
1123         scst_release_request(cmd);
1124
1125         cmd->state = next_state;
1126         cmd->non_atomic_only = 0;
1127
1128         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1129
1130 out:
1131         TRACE_EXIT();
1132         return;
1133 }
1134 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1135 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1136 {
1137         struct scst_cmd *cmd;
1138         int next_state;
1139
1140         TRACE_ENTRY();
1141
1142         WARN_ON(in_irq());
1143
1144         cmd = (struct scst_cmd *)data;
1145         if (cmd == NULL)
1146                 goto out;
1147
1148         next_state = SCST_CMD_STATE_DEV_DONE;
1149         scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid,
1150                 &next_state);
1151
1152         cmd->state = next_state;
1153         cmd->non_atomic_only = 0;
1154
1155         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1156
1157 out:
1158         TRACE_EXIT();
1159         return;
1160 }
1161 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1162
1163 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1164 {
1165         TRACE_ENTRY();
1166
1167         BUG_ON(in_irq());
1168
1169         scst_dec_on_dev_cmd(cmd);
1170
1171         if (next_state == SCST_CMD_STATE_DEFAULT)
1172                 next_state = SCST_CMD_STATE_DEV_DONE;
1173
1174 #if defined(DEBUG) || defined(TRACING)
1175         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1176                 if (cmd->sg) {
1177                         int i;
1178                         struct scatterlist *sg = cmd->sg;
1179                         TRACE(TRACE_RECV_TOP, 
1180                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1181                               cmd->sg_cnt, sg, (void*)sg[0].page);
1182                         for(i = 0; i < cmd->sg_cnt; ++i) {
1183                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1184                                         "Exec'd sg", page_address(sg[i].page),
1185                                         sg[i].length);
1186                         }
1187                 }
1188         }
1189 #endif
1190
1191
1192 #ifdef EXTRACHECKS
1193         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1194             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1195             (next_state != SCST_CMD_STATE_FINISHED)) 
1196         {
1197                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1198                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1199                 scst_set_cmd_error(cmd,
1200                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1201                 next_state = SCST_CMD_STATE_DEV_DONE;
1202         }
1203
1204         if (scst_check_auto_sense(cmd)) {
1205                 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1206                         "opcode %d", cmd->cdb[0]);
1207         }
1208 #endif
1209
1210         scst_check_sense(cmd, NULL, 0, &next_state);
1211
1212         cmd->state = next_state;
1213         cmd->non_atomic_only = 0;
1214
1215         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1216
1217         TRACE_EXIT();
1218         return;
1219 }
1220
1221 static int scst_report_luns_local(struct scst_cmd *cmd)
1222 {
1223         int res = SCST_EXEC_COMPLETED;
1224         int dev_cnt = 0;
1225         int buffer_size;
1226         struct scst_tgt_dev *tgt_dev = NULL;
1227         uint8_t *buffer;
1228         int offs, overflow = 0;
1229
1230         TRACE_ENTRY();
1231
1232         cmd->status = 0;
1233         cmd->masked_status = 0;
1234         cmd->msg_status = 0;
1235         cmd->host_status = DID_OK;
1236         cmd->driver_status = 0;
1237
1238         if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1239                 PRINT_ERROR_PR("Unsupported SELECT REPORT value %x in REPORT "
1240                         "LUNS command", cmd->cdb[2]);
1241                 goto out_err;
1242         }
1243
1244         buffer_size = scst_get_buf_first(cmd, &buffer);
1245         if (unlikely(buffer_size <= 0))
1246                 goto out_err;
1247
1248         if (buffer_size < 16)
1249                 goto out_put_err;
1250
1251         memset(buffer, 0, buffer_size);
1252         offs = 8;
1253
1254         /* sess->sess_tgt_dev_list is protected by suspended activity */
1255         list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1256                             sess_tgt_dev_list_entry) 
1257         {
1258                 if (!overflow) {
1259                         if (offs >= buffer_size) {
1260                                 scst_put_buf(cmd, buffer);
1261                                 buffer_size = scst_get_buf_next(cmd, &buffer);
1262                                 if (buffer_size > 0) {
1263                                         memset(buffer, 0, buffer_size);
1264                                         offs = 0;
1265                                 } else {
1266                                         overflow = 1;
1267                                         goto inc_dev_cnt;
1268                                 }
1269                         }
1270                         if ((buffer_size - offs) < 8) {
1271                                 PRINT_ERROR_PR("Buffer allocated for REPORT "
1272                                         "LUNS command doesn't allow to fit 8 "
1273                                         "byte entry (buffer_size=%d)",
1274                                         buffer_size);
1275                                 goto out_put_hw_err;
1276                         }
1277                         buffer[offs] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1278                         buffer[offs+1] = tgt_dev->acg_dev->lun & 0xff;
1279                         offs += 8;
1280                 }
1281 inc_dev_cnt:
1282                 dev_cnt++;
1283         }
1284         if (!overflow)
1285                 scst_put_buf(cmd, buffer);
1286
1287         /* Set the response header */
1288         buffer_size = scst_get_buf_first(cmd, &buffer);
1289         if (unlikely(buffer_size <= 0))
1290                 goto out_err;
1291         dev_cnt *= 8;
1292         buffer[0] = (dev_cnt >> 24) & 0xff;
1293         buffer[1] = (dev_cnt >> 16) & 0xff;
1294         buffer[2] = (dev_cnt >> 8) & 0xff;
1295         buffer[3] = dev_cnt & 0xff;
1296         scst_put_buf(cmd, buffer);
1297
1298         dev_cnt += 8;
1299         if (dev_cnt < cmd->resp_data_len)
1300                 scst_set_resp_data_len(cmd, dev_cnt);
1301
1302 out_done:
1303         cmd->completed = 1;
1304
1305         /* Report the result */
1306         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1307
1308         TRACE_EXIT_RES(res);
1309         return res;
1310         
1311 out_put_err:
1312         scst_put_buf(cmd, buffer);
1313
1314 out_err:
1315         scst_set_cmd_error(cmd,
1316                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1317         goto out_done;
1318
1319 out_put_hw_err:
1320         scst_put_buf(cmd, buffer);
1321         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1322         goto out_done;
1323 }
1324
1325 static int scst_pre_select(struct scst_cmd *cmd)
1326 {
1327         int res = SCST_EXEC_NOT_COMPLETED;
1328
1329         TRACE_ENTRY();
1330
1331         if (scst_cmd_atomic(cmd)) {
1332                 res = SCST_EXEC_NEED_THREAD;
1333                 goto out;
1334         }
1335
1336         scst_block_dev(cmd->dev, 1);
1337         /* Device will be unblocked in scst_done_cmd_check() */
1338
1339         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1340                 int rc = scst_set_pending_UA(cmd);
1341                 if (rc == 0) {
1342                         res = SCST_EXEC_COMPLETED;
1343                         cmd->completed = 1;
1344                         /* Report the result */
1345                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1346                         goto out;
1347                 }
1348         }
1349
1350 out:
1351         TRACE_EXIT_RES(res);
1352         return res;
1353 }
1354
1355 static inline void scst_report_reserved(struct scst_cmd *cmd)
1356 {
1357         TRACE_ENTRY();
1358
1359         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1360         cmd->completed = 1;
1361         /* Report the result */
1362         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1363
1364         TRACE_EXIT();
1365         return;
1366 }
1367
1368 static int scst_reserve_local(struct scst_cmd *cmd)
1369 {
1370         int res = SCST_EXEC_NOT_COMPLETED;
1371         struct scst_device *dev;
1372         struct scst_tgt_dev *tgt_dev_tmp;
1373
1374         TRACE_ENTRY();
1375
1376         if (scst_cmd_atomic(cmd)) {
1377                 res = SCST_EXEC_NEED_THREAD;
1378                 goto out;
1379         }
1380
1381         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1382                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1383                      "(lun=%Ld)", (uint64_t)cmd->lun);
1384                 scst_set_cmd_error(cmd,
1385                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1386                 cmd->completed = 1;
1387                 res = SCST_EXEC_COMPLETED;
1388                 goto out;
1389         }
1390
1391         dev = cmd->dev;
1392         scst_block_dev(dev, 1);
1393         /* Device will be unblocked in scst_done_cmd_check() */
1394
1395         spin_lock_bh(&dev->dev_lock);
1396
1397         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1398                 scst_report_reserved(cmd);
1399                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1400                 res = SCST_EXEC_COMPLETED;
1401                 goto out_unlock;
1402         }
1403
1404         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1405                             dev_tgt_dev_list_entry) 
1406         {
1407                 if (cmd->tgt_dev != tgt_dev_tmp)
1408                         set_bit(SCST_TGT_DEV_RESERVED, 
1409                                 &tgt_dev_tmp->tgt_dev_flags);
1410         }
1411         dev->dev_reserved = 1;
1412
1413 out_unlock:
1414         spin_unlock_bh(&dev->dev_lock);
1415         
1416 out:
1417         TRACE_EXIT_RES(res);
1418         return res;
1419 }
1420
1421 static int scst_release_local(struct scst_cmd *cmd)
1422 {
1423         int res = SCST_EXEC_NOT_COMPLETED;
1424         struct scst_tgt_dev *tgt_dev_tmp;
1425         struct scst_device *dev;
1426
1427         TRACE_ENTRY();
1428
1429         dev = cmd->dev;
1430
1431         scst_block_dev(dev, 1);
1432         cmd->blocking = 1;
1433         TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1434
1435         spin_lock_bh(&dev->dev_lock);
1436
1437         /* 
1438          * The device could be RELEASED behind us, if RESERVING session 
1439          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1440          * matter, so use lock and no retest for DEV_RESERVED bits again
1441          */
1442         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1443                 res = SCST_EXEC_COMPLETED;
1444                 cmd->status = 0;
1445                 cmd->masked_status = 0;
1446                 cmd->msg_status = 0;
1447                 cmd->host_status = DID_OK;
1448                 cmd->driver_status = 0;
1449         } else {
1450                 list_for_each_entry(tgt_dev_tmp,
1451                                     &dev->dev_tgt_dev_list,
1452                                     dev_tgt_dev_list_entry) 
1453                 {
1454                         clear_bit(SCST_TGT_DEV_RESERVED, 
1455                                 &tgt_dev_tmp->tgt_dev_flags);
1456                 }
1457                 dev->dev_reserved = 0;
1458         }
1459
1460         spin_unlock_bh(&dev->dev_lock);
1461
1462         if (res == SCST_EXEC_COMPLETED) {
1463                 cmd->completed = 1;
1464                 /* Report the result */
1465                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1466         }
1467
1468         TRACE_EXIT_RES(res);
1469         return res;
1470 }
1471
1472 /* 
1473  * The result of cmd execution, if any, should be reported 
1474  * via scst_cmd_done_local() 
1475  */
1476 static int scst_pre_exec(struct scst_cmd *cmd)
1477 {
1478         int res = SCST_EXEC_NOT_COMPLETED, rc;
1479         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1480
1481         TRACE_ENTRY();
1482
1483         /* Reserve check before Unit Attention */
1484         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags))) {
1485                 if ((cmd->cdb[0] != INQUIRY) && (cmd->cdb[0] != REPORT_LUNS) &&
1486                     (cmd->cdb[0] != RELEASE) && (cmd->cdb[0] != RELEASE_10) &&
1487                     (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1488                     (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1489                     (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE))
1490                 {
1491                         scst_report_reserved(cmd);
1492                         res = SCST_EXEC_COMPLETED;
1493                         goto out;
1494                 }
1495         }
1496
1497         /* If we had a internal bus reset, set the command error unit attention */
1498         if ((cmd->dev->scsi_dev != NULL) &&
1499             unlikely(cmd->dev->scsi_dev->was_reset)) {
1500                 if (scst_is_ua_command(cmd)) 
1501                 {
1502                         struct scst_device *dev = cmd->dev;
1503                         int done = 0;
1504                         /* Prevent more than 1 cmd to be triggered by was_reset */
1505                         spin_lock_bh(&dev->dev_lock);
1506                         barrier(); /* to reread was_reset */
1507                         if (dev->scsi_dev->was_reset) {
1508                                 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1509                                 scst_set_cmd_error(cmd,
1510                                            SCST_LOAD_SENSE(scst_sense_reset_UA));
1511                                 /* It looks like it is safe to clear was_reset here */
1512                                 dev->scsi_dev->was_reset = 0;
1513                                 smp_mb();
1514                                 done = 1;
1515                         }
1516                         spin_unlock_bh(&dev->dev_lock);
1517
1518                         if (done)
1519                                 goto out_done;
1520                 }
1521         }
1522
1523         if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING, 
1524                         &cmd->tgt_dev->tgt_dev_flags))) {
1525                 if (scst_is_ua_command(cmd)) 
1526                 {
1527                         rc = scst_set_pending_UA(cmd);
1528                         if (rc == 0)
1529                                 goto out_done;
1530                 }
1531         }
1532
1533         /* Check READ_ONLY device status */
1534         if (tgt_dev->acg_dev->rd_only_flag &&
1535             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1536              cmd->cdb[0] == WRITE_10 ||
1537              cmd->cdb[0] == WRITE_12 ||
1538              cmd->cdb[0] == WRITE_16 ||
1539              cmd->cdb[0] == WRITE_VERIFY ||
1540              cmd->cdb[0] == WRITE_VERIFY_12 ||
1541              cmd->cdb[0] == WRITE_VERIFY_16 ||
1542              (cmd->dev->handler->type == TYPE_TAPE &&
1543               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1544         {
1545                 scst_set_cmd_error(cmd,
1546                            SCST_LOAD_SENSE(scst_sense_data_protect));
1547                 goto out_done;
1548         }
1549 out:
1550         TRACE_EXIT_RES(res);
1551         return res;
1552
1553 out_done:
1554         res = SCST_EXEC_COMPLETED;
1555         cmd->completed = 1;
1556         /* Report the result */
1557         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1558         goto out;
1559 }
1560
1561 /* 
1562  * The result of cmd execution, if any, should be reported 
1563  * via scst_cmd_done_local() 
1564  */
1565 static inline int scst_local_exec(struct scst_cmd *cmd)
1566 {
1567         int res = SCST_EXEC_NOT_COMPLETED;
1568
1569         TRACE_ENTRY();
1570
1571         /*
1572          * Adding new commands here don't forget to update
1573          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1574          */
1575
1576         switch (cmd->cdb[0]) {
1577         case MODE_SELECT:
1578         case MODE_SELECT_10:
1579         case LOG_SELECT:
1580                 res = scst_pre_select(cmd);
1581                 break;
1582         case RESERVE:
1583         case RESERVE_10:
1584                 res = scst_reserve_local(cmd);
1585                 break;
1586         case RELEASE:
1587         case RELEASE_10:
1588                 res = scst_release_local(cmd);
1589                 break;
1590         case REPORT_LUNS:
1591                 res = scst_report_luns_local(cmd);
1592                 break;
1593         }
1594
1595         TRACE_EXIT_RES(res);
1596         return res;
1597 }
1598
1599 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1600 {
1601         int rc = SCST_EXEC_NOT_COMPLETED;
1602
1603         TRACE_ENTRY();
1604
1605         cmd->sent_to_midlev = 1;
1606         cmd->state = SCST_CMD_STATE_EXECUTING;
1607         cmd->scst_cmd_done = scst_cmd_done_local;
1608
1609         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1610         smp_mb__after_set_bit();
1611
1612         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1613                 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1614                 goto out_aborted;
1615         }
1616
1617         rc = scst_pre_exec(cmd);
1618         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1619         if (rc != SCST_EXEC_NOT_COMPLETED) {
1620                 if (rc == SCST_EXEC_COMPLETED)
1621                         goto out;
1622                 else if (rc == SCST_EXEC_NEED_THREAD)
1623                         goto out_clear;
1624                 else
1625                         goto out_rc_error;
1626         }
1627
1628         rc = scst_local_exec(cmd);
1629         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1630         if (rc != SCST_EXEC_NOT_COMPLETED) {
1631                 if (rc == SCST_EXEC_COMPLETED)
1632                         goto out;
1633                 else if (rc == SCST_EXEC_NEED_THREAD)
1634                         goto out_clear;
1635                 else
1636                         goto out_rc_error;
1637         }
1638
1639         if (cmd->dev->handler->exec) {
1640                 struct scst_device *dev = cmd->dev;
1641                 TRACE_DBG("Calling dev handler %s exec(%p)",
1642                       dev->handler->name, cmd);
1643                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1644                 cmd->scst_cmd_done = scst_cmd_done_local;
1645                 rc = dev->handler->exec(cmd);
1646                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1647                 TRACE_DBG("Dev handler %s exec() returned %d",
1648                       dev->handler->name, rc);
1649                 if (rc == SCST_EXEC_COMPLETED)
1650                         goto out;
1651                 else if (rc == SCST_EXEC_NEED_THREAD)
1652                         goto out_clear;
1653                 else if (rc != SCST_EXEC_NOT_COMPLETED)
1654                         goto out_rc_error;
1655         }
1656
1657         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1658         
1659         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1660                 PRINT_ERROR_PR("Command for virtual device must be "
1661                         "processed by device handler (lun %Ld)!",
1662                         (uint64_t)cmd->lun);
1663                 goto out_error;
1664         }
1665
1666 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1667         if (unlikely(scst_alloc_request(cmd) != 0)) {
1668                 if (scst_cmd_atomic(cmd)) {
1669                         rc = SCST_EXEC_NEED_THREAD;
1670                         goto out_clear;
1671                 } else {
1672                         PRINT_INFO_PR("%s", "Unable to allocate request, "
1673                                 "sending BUSY status");
1674                         goto out_busy;
1675                 }
1676         }
1677         
1678         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1679                     (void *)cmd->scsi_req->sr_buffer,
1680                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1681                     cmd->retries);
1682 #else
1683         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1684                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1685                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1686                         scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1687         if (unlikely(rc != 0)) {
1688                 if (scst_cmd_atomic(cmd)) {
1689                         rc = SCST_EXEC_NEED_THREAD;
1690                         goto out_clear;
1691                 } else {
1692                         PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1693                         goto out_error;
1694                 }
1695         }
1696 #endif
1697
1698         rc = SCST_EXEC_COMPLETED;
1699
1700 out:
1701         TRACE_EXIT();
1702         return rc;
1703
1704 out_clear:
1705         /* Restore the state */
1706         cmd->sent_to_midlev = 0;
1707         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1708         goto out;
1709
1710 out_rc_error:
1711         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1712                     "invalid code %d", cmd->dev->handler->name, rc);
1713         /* go through */
1714
1715 out_error:
1716         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1717         cmd->completed = 1;
1718         cmd->state = SCST_CMD_STATE_DEV_DONE;
1719         rc = SCST_EXEC_COMPLETED;
1720         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1721         goto out;
1722
1723 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1724 out_busy:
1725         scst_set_busy(cmd);
1726         cmd->completed = 1;
1727         cmd->state = SCST_CMD_STATE_DEV_DONE;
1728         rc = SCST_EXEC_COMPLETED;
1729         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1730         goto out;
1731 #endif
1732
1733 out_aborted:
1734         rc = SCST_EXEC_COMPLETED;
1735         /* Report the result. The cmd is not completed */
1736         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1737         goto out;
1738 }
1739
1740 static int scst_send_to_midlev(struct scst_cmd *cmd)
1741 {
1742         int res, rc;
1743         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1744         struct scst_device *dev = cmd->dev;
1745         int expected_sn;
1746         int count;
1747         int atomic = scst_cmd_atomic(cmd);
1748
1749         TRACE_ENTRY();
1750
1751         res = SCST_CMD_STATE_RES_CONT_NEXT;
1752
1753         if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1754                 TRACE_DBG("Dev handler %s exec() can not be "
1755                       "called in atomic context, rescheduling to the thread",
1756                       dev->handler->name);
1757                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1758                 goto out;
1759         }
1760
1761         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1762                 goto out;
1763
1764         scst_inc_cmd_count(); /* protect dev & tgt_dev */
1765
1766         if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1767                 rc = scst_do_send_to_midlev(cmd);
1768                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1769                 if (rc == SCST_EXEC_NEED_THREAD) {
1770                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1771                               "thread context, rescheduling");
1772                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1773                         scst_dec_on_dev_cmd(cmd);
1774                         goto out_dec_cmd_count;
1775                 } else {
1776                         BUG_ON(rc != SCST_EXEC_COMPLETED);
1777                         goto out_unplug;
1778                 }
1779         }
1780
1781         expected_sn = tgt_dev->expected_sn;
1782         if (cmd->sn != expected_sn) {
1783                 spin_lock_bh(&tgt_dev->sn_lock);
1784                 tgt_dev->def_cmd_count++;
1785                 smp_mb();
1786                 barrier(); /* to reread expected_sn */
1787                 expected_sn = tgt_dev->expected_sn;
1788                 if (cmd->sn != expected_sn) {
1789                         scst_dec_on_dev_cmd(cmd);
1790                         TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1791                               "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1792                         list_add_tail(&cmd->sn_cmd_list_entry,
1793                                       &tgt_dev->deferred_cmd_list);
1794                         spin_unlock_bh(&tgt_dev->sn_lock);
1795                         /* !! At this point cmd can be already freed !! */
1796                         goto out_dec_cmd_count;
1797                 } else {
1798                         TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1799                               "expected_sn %d, continuing", expected_sn);
1800                         tgt_dev->def_cmd_count--;
1801                         spin_unlock_bh(&tgt_dev->sn_lock);
1802                 }
1803         }
1804
1805         count = 0;
1806         while(1) {
1807                 rc = scst_do_send_to_midlev(cmd);
1808                 if (rc == SCST_EXEC_NEED_THREAD) {
1809                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1810                               "thread context, rescheduling");
1811                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1812                         scst_dec_on_dev_cmd(cmd);
1813                         if (count != 0)
1814                                 goto out_unplug;
1815                         else
1816                                 goto out_dec_cmd_count;
1817                 }
1818                 BUG_ON(rc != SCST_EXEC_COMPLETED);
1819                 /* !! At this point cmd can be already freed !! */
1820                 count++;
1821                 expected_sn = __scst_inc_expected_sn(tgt_dev);
1822                 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1823                 if (cmd == NULL)
1824                         break;
1825                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1826                         break;
1827         }
1828
1829 out_unplug:
1830         if (dev->scsi_dev != NULL)
1831                 generic_unplug_device(dev->scsi_dev->request_queue);
1832
1833 out_dec_cmd_count:
1834         scst_dec_cmd_count();
1835         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1836
1837 out:
1838         TRACE_EXIT_HRES(res);
1839         return res;
1840 }
1841
1842 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1843 {
1844         int res = 0, rc;
1845         unsigned char type;
1846
1847         TRACE_ENTRY();
1848
1849         if (unlikely(cmd->cdb[0] == REQUEST_SENSE)) {
1850                 if (cmd->internal)
1851                         cmd = scst_complete_request_sense(cmd);
1852         } else if (unlikely(scst_check_auto_sense(cmd))) {
1853                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1854                             "without sense data (opcode 0x%x), issuing "
1855                             "REQUEST SENSE", cmd->cdb[0]);
1856                 rc = scst_prepare_request_sense(cmd);
1857                 if (res > 0) {
1858                         *pres = rc;
1859                         res = 1;
1860                         goto out;
1861                 } else {
1862                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1863                                     "returning HARDWARE ERROR");
1864                         scst_set_cmd_error(cmd,
1865                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
1866                 }
1867         }
1868
1869         type = cmd->dev->handler->type;
1870         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1871             cmd->tgt_dev->acg_dev->rd_only_flag &&
1872             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1873              type == TYPE_TAPE))
1874         {
1875                 int32_t length;
1876                 uint8_t *address;
1877
1878                 length = scst_get_buf_first(cmd, &address);
1879                 if (length <= 0)
1880                         goto out;
1881                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1882                         address[2] |= 0x80;   /* Write Protect*/
1883                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1884                         address[3] |= 0x80;   /* Write Protect*/
1885                 scst_put_buf(cmd, address);
1886         }
1887
1888         /* 
1889          * Check and clear NormACA option for the device, if necessary,
1890          * since we don't support ACA
1891          */
1892         if ((cmd->cdb[0] == INQUIRY) &&
1893             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1894             (cmd->resp_data_len > SCST_INQ_BYTE3))
1895         {
1896                 uint8_t *buffer;
1897                 int buflen;
1898
1899                 /* ToDo: all pages ?? */
1900                 buflen = scst_get_buf_first(cmd, &buffer);
1901                 if (buflen > 0) {
1902                         if (buflen > SCST_INQ_BYTE3) {
1903 #ifdef EXTRACHECKS
1904                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1905                                         PRINT_INFO_PR("NormACA set for device: "
1906                                             "lun=%Ld, type 0x%02x", 
1907                                             (uint64_t)cmd->lun, buffer[0]);
1908                                 }
1909 #endif
1910                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1911                         } else
1912                                 scst_set_cmd_error(cmd,
1913                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1914
1915                         scst_put_buf(cmd, buffer);
1916                 }
1917         }
1918
1919         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
1920                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
1921                                                 &cmd->tgt_dev->tgt_dev_flags)) {
1922                         struct scst_tgt_dev *tgt_dev_tmp;
1923                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
1924                               (uint64_t)cmd->lun, cmd->masked_status);
1925                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
1926                                      sizeof(cmd->sense_buffer));
1927                         /* Clearing the reservation */
1928                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
1929                                             dev_tgt_dev_list_entry) {
1930                                 clear_bit(SCST_TGT_DEV_RESERVED, 
1931                                         &tgt_dev_tmp->tgt_dev_flags);
1932                         }
1933                         cmd->dev->dev_reserved = 0;
1934                 }
1935                 scst_unblock_dev(cmd->dev);
1936         }
1937         
1938         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
1939                      (cmd->cdb[0] == MODE_SELECT_10) ||
1940                      (cmd->cdb[0] == LOG_SELECT)))
1941         {
1942                 if (cmd->status == 0) {
1943                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
1944                                 "setting the SELECT UA (lun=%Ld)", 
1945                                 (uint64_t)cmd->lun);
1946                         spin_lock_bh(&scst_temp_UA_lock);
1947                         if (cmd->cdb[0] == LOG_SELECT) {
1948                                 scst_set_sense(scst_temp_UA,
1949                                         sizeof(scst_temp_UA),
1950                                         UNIT_ATTENTION, 0x2a, 0x02);
1951                         } else {
1952                                 scst_set_sense(scst_temp_UA,
1953                                         sizeof(scst_temp_UA),
1954                                         UNIT_ATTENTION, 0x2a, 0x01);
1955                         }
1956                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
1957                                 sizeof(scst_temp_UA), 1);
1958                         spin_unlock_bh(&scst_temp_UA_lock);
1959                 }
1960                 scst_unblock_dev(cmd->dev);
1961         }
1962
1963 out:
1964         TRACE_EXIT_RES(res);
1965         return res;
1966 }
1967
1968 static int scst_dev_done(struct scst_cmd *cmd)
1969 {
1970         int res = SCST_CMD_STATE_RES_CONT_SAME;
1971         int state;
1972         int atomic = scst_cmd_atomic(cmd);
1973
1974         TRACE_ENTRY();
1975
1976         if (atomic && !cmd->dev->handler->dev_done_atomic &&
1977             cmd->dev->handler->dev_done) 
1978         {
1979                 TRACE_DBG("Dev handler %s dev_done() can not be "
1980                       "called in atomic context, rescheduling to the thread",
1981                       cmd->dev->handler->name);
1982                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1983                 goto out;
1984         }
1985
1986         if (scst_done_cmd_check(cmd, &res))
1987                 goto out;
1988
1989         state = SCST_CMD_STATE_XMIT_RESP;
1990         if (likely(!scst_is_cmd_local(cmd)) && 
1991             likely(cmd->dev->handler->dev_done != NULL))
1992         {
1993                 int rc;
1994                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
1995                       cmd->dev->handler->name, cmd);
1996                 rc = cmd->dev->handler->dev_done(cmd);
1997                 TRACE_DBG("Dev handler %s dev_done() returned %d",
1998                       cmd->dev->handler->name, rc);
1999                 if (rc != SCST_CMD_STATE_DEFAULT)
2000                         state = rc;
2001         }
2002
2003         switch (state) {
2004         case SCST_CMD_STATE_REINIT:
2005                 cmd->state = state;
2006                 res = SCST_CMD_STATE_RES_RESTART;
2007                 break;
2008
2009         case SCST_CMD_STATE_DEV_PARSE:
2010         case SCST_CMD_STATE_PREPARE_SPACE:
2011         case SCST_CMD_STATE_RDY_TO_XFER:
2012         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2013         case SCST_CMD_STATE_DEV_DONE:
2014         case SCST_CMD_STATE_XMIT_RESP:
2015         case SCST_CMD_STATE_FINISHED:
2016                 cmd->state = state;
2017                 res = SCST_CMD_STATE_RES_CONT_SAME;
2018                 break;
2019
2020         case SCST_CMD_STATE_NEED_THREAD_CTX:
2021                 TRACE_DBG("Dev handler %s dev_done() requested "
2022                       "thread context, rescheduling",
2023                       cmd->dev->handler->name);
2024                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2025                 break;
2026
2027         default:
2028                 if (state >= 0) {
2029                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2030                                 "invalid cmd state %d", 
2031                                 cmd->dev->handler->name, state);
2032                 } else {
2033                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2034                                 "error %d", cmd->dev->handler->name, 
2035                                 state);
2036                 }
2037                 scst_set_cmd_error(cmd,
2038                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2039                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2040                 res = SCST_CMD_STATE_RES_CONT_SAME;
2041                 break;
2042         }
2043
2044 out:
2045         TRACE_EXIT_HRES(res);
2046         return res;
2047 }
2048
2049 static int scst_xmit_response(struct scst_cmd *cmd)
2050 {
2051         int res, rc;
2052         int atomic = scst_cmd_atomic(cmd);
2053
2054         TRACE_ENTRY();
2055
2056         /* 
2057          * Check here also in order to avoid unnecessary delays of other
2058          * commands.
2059          */
2060         if (unlikely(cmd->sent_to_midlev == 0) &&
2061             (cmd->tgt_dev != NULL))
2062         {
2063                 TRACE(TRACE_SCSI_SERIALIZING,
2064                       "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2065                 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2066                 cmd->sent_to_midlev = 1;
2067         }
2068
2069         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2070                 TRACE_DBG("%s", "xmit_response() can not be "
2071                       "called in atomic context, rescheduling to the thread");
2072                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2073                 goto out;
2074         }
2075
2076         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2077         smp_mb__after_set_bit();
2078
2079         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2080                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2081                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2082                                 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2083                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2084                 }
2085         }
2086
2087         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2088                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2089                         cmd, cmd->tag);
2090                 cmd->state = SCST_CMD_STATE_FINISHED;
2091                 res = SCST_CMD_STATE_RES_CONT_SAME;
2092                 goto out;
2093         }
2094
2095 #ifdef DEBUG_TM
2096         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2097                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2098                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2099                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2100                         goto out;
2101                 }
2102                 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2103                         cmd, cmd->tag);
2104                 schedule_timeout_uninterruptible(HZ);
2105         }
2106 #endif
2107
2108         while (1) {
2109                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2110
2111                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2112                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2113
2114                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2115
2116 #if defined(DEBUG) || defined(TRACING)
2117                 if (cmd->sg) {
2118                         int i;
2119                         struct scatterlist *sg = cmd->sg;
2120                         TRACE(TRACE_SEND_BOT,
2121                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2122                               cmd->sg_cnt, sg, (void*)sg[0].page);
2123                         for(i = 0; i < cmd->sg_cnt; ++i) {
2124                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2125                                     "Xmitting sg", page_address(sg[i].page),
2126                                     sg[i].length);
2127                         }
2128                 }
2129 #endif
2130
2131 #ifdef DEBUG_RETRY
2132                 if (((scst_random() % 100) == 77))
2133                         rc = SCST_TGT_RES_QUEUE_FULL;
2134                 else
2135 #endif
2136                         rc = cmd->tgtt->xmit_response(cmd);
2137                 TRACE_DBG("xmit_response() returned %d", rc);
2138
2139                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2140                         goto out;
2141
2142                 /* Restore the previous state */
2143                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2144
2145                 switch (rc) {
2146                 case SCST_TGT_RES_QUEUE_FULL:
2147                 {
2148                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2149                                 break;
2150                         else
2151                                 continue;
2152                 }
2153
2154                 case SCST_TGT_RES_NEED_THREAD_CTX:
2155                 {
2156                         TRACE_DBG("Target driver %s xmit_response() "
2157                               "requested thread context, rescheduling",
2158                               cmd->tgtt->name);
2159                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2160                         break;
2161                 }
2162
2163                 default:
2164                         goto out_error;
2165                 }
2166                 break;
2167         }
2168
2169 out:
2170         /* Caution: cmd can be already dead here */
2171         TRACE_EXIT_HRES(res);
2172         return res;
2173
2174 out_error:
2175         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2176                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2177                         "fatal error", cmd->tgtt->name);
2178         } else {
2179                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2180                         "invalid value %d", cmd->tgtt->name, rc);
2181         }
2182         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2183         cmd->state = SCST_CMD_STATE_FINISHED;
2184         res = SCST_CMD_STATE_RES_CONT_SAME;
2185         goto out;
2186 }
2187
2188 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2189 {
2190         TRACE_ENTRY();
2191
2192         BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2193
2194         cmd->state = SCST_CMD_STATE_FINISHED;
2195         scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2196
2197         TRACE_EXIT();
2198         return;
2199 }
2200
2201 static int scst_finish_cmd(struct scst_cmd *cmd)
2202 {
2203         int res;
2204
2205         TRACE_ENTRY();
2206
2207         if (cmd->mem_checked) {
2208                 spin_lock_bh(&scst_cmd_mem_lock);
2209                 scst_cur_cmd_mem -= cmd->bufflen;
2210                 spin_unlock_bh(&scst_cmd_mem_lock);
2211         }
2212
2213         spin_lock_irq(&scst_list_lock);
2214
2215         TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2216         list_del(&cmd->cmd_list_entry);
2217
2218         if (cmd->mgmt_cmnd)
2219                 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2220
2221         if (likely(cmd->tgt_dev != NULL))
2222                 cmd->tgt_dev->cmd_count--;
2223
2224         cmd->sess->sess_cmd_count--;
2225
2226         list_del(&cmd->search_cmd_list_entry);
2227
2228         spin_unlock_irq(&scst_list_lock);
2229
2230         scst_free_cmd(cmd);
2231
2232         res = SCST_CMD_STATE_RES_CONT_NEXT;
2233
2234         TRACE_EXIT_HRES(res);
2235         return res;
2236 }
2237
2238 /*
2239  * Returns 0 on success, > 0 when we need to wait for unblock,
2240  * < 0 if there is no device (lun) or device type handler.
2241  * Called under scst_list_lock and IRQs disabled
2242  */
2243 static int scst_translate_lun(struct scst_cmd *cmd)
2244 {
2245         struct scst_tgt_dev *tgt_dev = NULL;
2246         int res = 0;
2247
2248         TRACE_ENTRY();
2249
2250         scst_inc_cmd_count();   
2251
2252         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2253                 res = -1;
2254                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2255                       (uint64_t)cmd->lun);
2256                 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2257                                     sess_tgt_dev_list_entry) 
2258                 {
2259                         if (tgt_dev->acg_dev->lun == cmd->lun) {
2260                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2261
2262                                 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2263                                         PRINT_INFO_PR("Dev handler for device "
2264                                           "%Ld is NULL, the device will not be "
2265                                           "visible remotely", (uint64_t)cmd->lun);
2266                                         break;
2267                                 }
2268                                 
2269                                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2270                                         cmd->tgt_dev_saved->cmd_count--;
2271                                         TRACE(TRACE_SCSI_SERIALIZING,
2272                                               "SCST_CMD_STATE_REINIT: "
2273                                               "incrementing expected_sn on tgt_dev_saved %p",
2274                                               cmd->tgt_dev_saved);
2275                                         scst_inc_expected_sn_unblock(
2276                                                 cmd->tgt_dev_saved, cmd, 1);
2277                                 }
2278                                 cmd->tgt_dev = tgt_dev;
2279                                 tgt_dev->cmd_count++;
2280                                 cmd->dev = tgt_dev->acg_dev->dev;
2281
2282                                 /* ToDo: cmd->queue_type */
2283
2284                                 /* scst_list_lock is enough to protect that */
2285                                 cmd->sn = tgt_dev->next_sn;
2286                                 tgt_dev->next_sn++;
2287
2288                                 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2289                                         "cmd->sn: %d", cmd->sn);
2290
2291                                 res = 0;
2292                                 break;
2293                         }
2294                 }
2295                 if (res != 0) {
2296                         TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2297                                 "unexisting LU?", (uint64_t)cmd->lun);
2298                         scst_dec_cmd_count();
2299                 }
2300         } else {
2301                 if ( !cmd->sess->waiting) {
2302                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2303                               cmd->sess);
2304                         list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2305                                       &scst_dev_wait_sess_list);
2306                         cmd->sess->waiting = 1;
2307                 }
2308                 scst_dec_cmd_count();
2309                 res = 1;
2310         }
2311
2312         TRACE_EXIT_RES(res);
2313         return res;
2314 }
2315
2316 /* Called under scst_list_lock and IRQs disabled */
2317 static int scst_process_init_cmd(struct scst_cmd *cmd)
2318 {
2319         int res = 0;
2320
2321         TRACE_ENTRY();
2322
2323         res = scst_translate_lun(cmd);
2324         if (likely(res == 0)) {
2325                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2326                 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2327                         TRACE(TRACE_RETRY, "Too many pending commands in "
2328                                 "session, returning BUSY to initiator \"%s\"",
2329                                 (cmd->sess->initiator_name[0] == '\0') ?
2330                                   "Anonymous" : cmd->sess->initiator_name);
2331                         scst_set_busy(cmd);
2332                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2333                 }
2334                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2335                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2336         } else if (res < 0) {
2337                 TRACE_DBG("Finishing cmd %p", cmd);
2338                 scst_set_cmd_error(cmd,
2339                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2340                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2341                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2342                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2343         }
2344
2345         TRACE_EXIT_RES(res);
2346         return res;
2347 }
2348
2349 /* 
2350  * Called under scst_list_lock and IRQs disabled
2351  * We don't drop it anywhere inside, because command execution
2352  * have to be serialized, i.e. commands must be executed in order
2353  * of their arrival, and we set this order inside scst_translate_lun().
2354  */
2355 static int scst_do_job_init(struct list_head *init_cmd_list)
2356 {
2357         int res = 1;
2358
2359         TRACE_ENTRY();
2360
2361         if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2362                 while (!list_empty(init_cmd_list)) {
2363                         struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2364                                                           typeof(*cmd),
2365                                                           cmd_list_entry);
2366                         res = scst_process_init_cmd(cmd);
2367                         if (res > 0)
2368                                 break;
2369                 }
2370         }
2371
2372         TRACE_EXIT_RES(res);
2373         return res;
2374 }
2375
2376 /* Called with no locks held */
2377 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2378         int left_locked)
2379 {
2380         int res;
2381
2382         TRACE_ENTRY();
2383
2384 #ifdef EXTRACHECKS
2385         BUG_ON(in_irq());
2386 #endif
2387
2388         cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2389                         SCST_CONTEXT_DIRECT_ATOMIC);
2390         cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2391
2392         do {
2393                 switch (cmd->state) {
2394                 case SCST_CMD_STATE_DEV_PARSE:
2395                         res = scst_parse_cmd(cmd);
2396                         break;
2397
2398                 case SCST_CMD_STATE_PREPARE_SPACE:
2399                         res = scst_prepare_space(cmd);
2400                         break;
2401
2402                 case SCST_CMD_STATE_RDY_TO_XFER:
2403                         res = scst_rdy_to_xfer(cmd);
2404                         break;
2405
2406                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2407                         res = scst_send_to_midlev(cmd);
2408                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2409                         break;
2410
2411                 case SCST_CMD_STATE_DEV_DONE:
2412                         res = scst_dev_done(cmd);
2413                         break;
2414
2415                 case SCST_CMD_STATE_XMIT_RESP:
2416                         res = scst_xmit_response(cmd);
2417                         break;
2418
2419                 case SCST_CMD_STATE_FINISHED:
2420                         res = scst_finish_cmd(cmd);
2421                         break;
2422
2423                 default:
2424                         PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2425                                cmd, cmd->state);
2426                         BUG();
2427                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2428                         break;
2429                 }
2430         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2431
2432         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2433                 if (left_locked)
2434                         spin_lock_irq(&scst_list_lock);
2435         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2436                 spin_lock_irq(&scst_list_lock);
2437
2438                 switch (cmd->state) {
2439                 case SCST_CMD_STATE_DEV_PARSE:
2440                 case SCST_CMD_STATE_PREPARE_SPACE:
2441                 case SCST_CMD_STATE_RDY_TO_XFER:
2442                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2443                 case SCST_CMD_STATE_DEV_DONE:
2444                 case SCST_CMD_STATE_XMIT_RESP:
2445                 case SCST_CMD_STATE_FINISHED:
2446                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2447                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2448                         break;
2449 #ifdef EXTRACHECKS
2450                 /* not very valid commands */
2451                 case SCST_CMD_STATE_DEFAULT:
2452                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2453                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2454                                 "useful list (left on scst cmd list)", cmd, 
2455                                 cmd->state);
2456                         spin_unlock_irq(&scst_list_lock);
2457                         BUG();
2458                         spin_lock_irq(&scst_list_lock);
2459                         break;
2460 #endif
2461                 default:
2462                         break;
2463                 }
2464                 cmd->non_atomic_only = 1;
2465                 if (!left_locked)
2466                         spin_unlock_irq(&scst_list_lock);
2467                 wake_up(&scst_list_waitQ);
2468         } else if (res == SCST_CMD_STATE_RES_RESTART) {
2469                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2470                         spin_lock_irq(&scst_list_lock);
2471                         TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2472                         list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2473                         if (!left_locked)
2474                                 spin_unlock_irq(&scst_list_lock);
2475                 } else
2476                         BUG();
2477         } else
2478                 BUG();
2479
2480         TRACE_EXIT_RES(res);
2481         return res;
2482 }
2483
2484 /* Called under scst_list_lock and IRQs disabled */
2485 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2486 {
2487         int res;
2488         struct scst_cmd *cmd;
2489         int atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2490                         SCST_CONTEXT_DIRECT_ATOMIC);
2491
2492         TRACE_ENTRY();
2493
2494 #ifdef EXTRACHECKS
2495         {
2496                 int c = (context & ~SCST_PROCESSIBLE_ENV);
2497                 WARN_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) && 
2498                         (c != SCST_CONTEXT_DIRECT));
2499         }
2500 #endif
2501
2502         tm_dbg_check_released_cmds();
2503
2504 restart:
2505         list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2506                 if (atomic && cmd->non_atomic_only) {
2507                         TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2508                         continue;
2509                 }
2510                 if (tm_dbg_check_cmd(cmd) != 0)
2511                         goto restart;
2512                 res = scst_process_active_cmd(cmd, context, NULL, 1);
2513                 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2514                         goto restart;
2515                 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2516                         goto restart;
2517                 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2518                         break;
2519                 } else
2520                         BUG();
2521         }
2522
2523         TRACE_EXIT();
2524         return;
2525 }
2526
2527 static inline int test_cmd_lists(void)
2528 {
2529         int res = !list_empty(&scst_active_cmd_list) ||
2530             (!list_empty(&scst_init_cmd_list) &&
2531              !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2532             test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2533             unlikely(scst_shut_threads_count > 0) ||
2534             tm_dbg_is_release();
2535         return res;
2536 }
2537
2538 int scst_cmd_thread(void *arg)
2539 {
2540         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2541         int n;
2542
2543         TRACE_ENTRY();
2544
2545         spin_lock(&lock);
2546         n = scst_thread_num++;
2547         spin_unlock(&lock);
2548         daemonize("scsi_tgt%d", n);
2549         recalc_sigpending();
2550         set_user_nice(current, 10);
2551         current->flags |= PF_NOFREEZE;
2552
2553         spin_lock_irq(&scst_list_lock);
2554         while (1) {
2555                 wait_queue_t wait;
2556                 init_waitqueue_entry(&wait, current);
2557
2558                 if (!test_cmd_lists()) {
2559                         add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2560                         for (;;) {
2561                                 set_current_state(TASK_INTERRUPTIBLE);
2562                                 if (test_cmd_lists())
2563                                         break;
2564                                 spin_unlock_irq(&scst_list_lock);
2565                                 schedule();
2566                                 spin_lock_irq(&scst_list_lock);
2567                         }
2568                         set_current_state(TASK_RUNNING);
2569                         remove_wait_queue(&scst_list_waitQ, &wait);
2570                 }
2571
2572                 scst_do_job_init(&scst_init_cmd_list);
2573                 scst_do_job_active(&scst_active_cmd_list,
2574                                    SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
2575
2576                 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2577                     list_empty(&scst_cmd_list) &&
2578                     list_empty(&scst_active_cmd_list) &&
2579                     list_empty(&scst_init_cmd_list)) {
2580                         break;
2581                 }
2582                 
2583                 if (unlikely(scst_shut_threads_count > 0)) {
2584                         scst_shut_threads_count--;
2585                         break;
2586                 }
2587         }
2588         spin_unlock_irq(&scst_list_lock);
2589
2590         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2591                 smp_mb__after_atomic_dec();
2592                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2593                 up(scst_shutdown_mutex);
2594         }
2595
2596         TRACE_EXIT();
2597         return 0;
2598 }
2599
2600 void scst_cmd_tasklet(long p)
2601 {
2602         TRACE_ENTRY();
2603
2604         spin_lock_irq(&scst_list_lock);
2605
2606         scst_do_job_init(&scst_init_cmd_list);
2607         scst_do_job_active(&scst_active_cmd_list, 
2608                 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2609
2610         spin_unlock_irq(&scst_list_lock);
2611
2612         TRACE_EXIT();
2613         return;
2614 }
2615
2616 /*
2617  * Returns 0 on success, < 0 if there is no device handler or
2618  * > 0 if SCST_FLAG_SUSPENDED set.
2619  */
2620 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2621 {
2622         struct scst_tgt_dev *tgt_dev = NULL;
2623         int res = -1;
2624
2625         TRACE_ENTRY();
2626
2627         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2628               (uint64_t)mcmd->lun);
2629
2630         spin_lock_irq(&scst_list_lock);
2631         scst_inc_cmd_count();   
2632         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2633                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2634                                     sess_tgt_dev_list_entry) 
2635                 {
2636                         if (tgt_dev->acg_dev->lun == mcmd->lun) {
2637                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2638                                 mcmd->mcmd_tgt_dev = tgt_dev;
2639                                 res = 0;
2640                                 break;
2641                         }
2642                 }
2643                 if (mcmd->mcmd_tgt_dev == NULL)
2644                         scst_dec_cmd_count();
2645         } else {
2646                 if ( !mcmd->sess->waiting) {
2647                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2648                               mcmd->sess);
2649                         list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2650                                       &scst_dev_wait_sess_list);
2651                         mcmd->sess->waiting = 1;
2652                 }
2653                 scst_dec_cmd_count();
2654                 res = 1;
2655         }
2656         spin_unlock_irq(&scst_list_lock);
2657
2658         TRACE_EXIT_HRES(res);
2659         return res;
2660 }
2661
2662 /* Called under scst_list_lock and IRQ off */
2663 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2664         struct scst_mgmt_cmd *mcmd)
2665 {
2666         TRACE_ENTRY();
2667
2668         TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2669                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2670                 mcmd->cmd_wait_count);
2671
2672         cmd->mgmt_cmnd = NULL;
2673
2674         if (cmd->completed)
2675                 mcmd->completed_cmd_count++;
2676
2677         mcmd->cmd_wait_count--;
2678         if (mcmd->cmd_wait_count > 0) {
2679                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2680                         mcmd->cmd_wait_count);
2681                 goto out;
2682         }
2683
2684         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2685
2686         if (mcmd->completed) {
2687                 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2688                         mcmd);
2689                 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2690                         &scst_active_mgmt_cmd_list);
2691         }
2692
2693         wake_up(&scst_mgmt_cmd_list_waitQ);
2694
2695 out:
2696         TRACE_EXIT();
2697         return;
2698 }
2699
2700 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2701         struct scst_tgt_dev *tgt_dev, int set_status)
2702 {
2703         int res = SCST_DEV_TM_NOT_COMPLETED;
2704         if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2705                 int irq = irqs_disabled();
2706                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2707                         tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2708 #ifdef EXTRACHECKS
2709                 BUG_ON(in_irq());
2710 #endif
2711                 if (!irq)
2712                         local_bh_disable();
2713                 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd, 
2714                         tgt_dev);
2715                 if (!irq)
2716                         local_bh_enable();
2717                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2718                       tgt_dev->acg_dev->dev->handler->name, res);
2719                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2720                         mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ? 
2721                                                 SCST_MGMT_STATUS_SUCCESS :
2722                                                 SCST_MGMT_STATUS_FAILED;
2723                 }
2724         }
2725         return res;
2726 }
2727
2728 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2729 {
2730         switch(mgmt_fn) {
2731                 case SCST_ABORT_TASK:
2732                 case SCST_ABORT_TASK_SET:
2733                 case SCST_CLEAR_TASK_SET:
2734                         return 1;
2735                 default:
2736                         return 0;
2737         }
2738 }
2739
2740 /* 
2741  * Called under scst_list_lock and IRQ off (to protect cmd
2742  * from being destroyed) + BHs also off
2743  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2744  */
2745 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2746         int other_ini, int call_dev_task_mgmt_fn)
2747 {
2748         TRACE_ENTRY();
2749
2750         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2751
2752         if (other_ini) {
2753                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2754                 smp_mb__after_set_bit();
2755         }
2756         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2757         smp_mb__after_set_bit();
2758
2759         if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2760                  scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2761
2762         if (mcmd) {
2763                 int defer;
2764                 if (cmd->tgtt->tm_sync_reply)
2765                         defer = 1;
2766                 else {
2767                         if (scst_is_strict_mgmt_fn(mcmd->fn))
2768                                 defer = test_bit(SCST_CMD_EXECUTING,
2769                                         &cmd->cmd_flags);
2770                         else
2771                                 defer = test_bit(SCST_CMD_XMITTING,
2772                                         &cmd->cmd_flags);
2773                 }
2774
2775                 if (defer) {
2776                         /*
2777                          * Delay the response until the command's finish in
2778                          * order to guarantee that "no further responses from
2779                          * the task are sent to the SCSI initiator port" after
2780                          * response from the TM function is sent (SAM)
2781                          */
2782                         TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2783                                 "xmitted (state %d), deferring ABORT...", cmd,
2784                                 cmd->tag, cmd->state);
2785 #ifdef EXTRACHECKS
2786                         if (cmd->mgmt_cmnd) {
2787                                 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2788                                         "has non-NULL mgmt_cmnd %p!!! Current "
2789                                         "mcmd %p\n", cmd, cmd->tag, cmd->state,
2790                                         cmd->mgmt_cmnd, mcmd);
2791                         }
2792 #endif
2793                         BUG_ON(cmd->mgmt_cmnd);
2794                         mcmd->cmd_wait_count++;
2795                         cmd->mgmt_cmnd = mcmd;
2796                 }
2797         }
2798
2799         tm_dbg_release_cmd(cmd);
2800
2801         TRACE_EXIT();
2802         return;
2803 }
2804
2805 /* Called under scst_list_lock and IRQ off */
2806 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2807 {
2808         int res;
2809         if (mcmd->cmd_wait_count != 0) {
2810                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2811                         "wait", mcmd->cmd_wait_count);
2812                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2813                 res = -1;
2814         } else {
2815                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2816                 res = 0;
2817         }
2818         mcmd->completed = 1;
2819         return res;
2820 }
2821
2822 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2823 {
2824         struct scst_device *dev;
2825         int wake = 0;
2826
2827         TRACE_ENTRY();
2828
2829         if (!scst_mutex_held)
2830                 down(&scst_mutex);
2831
2832         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2833                 struct scst_cmd *cmd, *tcmd;
2834                 spin_lock_bh(&dev->dev_lock);
2835                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2836                                         blocked_cmd_list_entry) {
2837                         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2838                                 list_del(&cmd->blocked_cmd_list_entry);
2839                                 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2840                                         "to active cmd list", cmd);
2841                                 spin_lock_irq(&scst_list_lock);
2842                                 list_move_tail(&cmd->cmd_list_entry,
2843                                         &scst_active_cmd_list);
2844                                 spin_unlock_irq(&scst_list_lock);
2845                                 wake = 1;
2846                         }
2847                 }
2848                 spin_unlock_bh(&dev->dev_lock);
2849         }
2850
2851         if (!scst_mutex_held)
2852                 up(&scst_mutex);
2853
2854         if (wake)
2855                 wake_up(&scst_list_waitQ);
2856
2857         TRACE_EXIT();
2858         return;
2859 }
2860
2861 /* Returns 0 if the command processing should be continued, <0 otherwise */
2862 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2863         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2864 {
2865         struct scst_cmd *cmd;
2866         struct scst_session *sess = tgt_dev->sess;
2867
2868         TRACE_ENTRY();
2869
2870         local_bh_disable();
2871         spin_lock_irq(&scst_list_lock);
2872
2873         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2874         list_for_each_entry(cmd, &sess->search_cmd_list, 
2875                         search_cmd_list_entry) {
2876                 if ((cmd->tgt_dev == NULL) && 
2877                     (cmd->lun == tgt_dev->acg_dev->lun))
2878                         continue;
2879                 if (cmd->tgt_dev != tgt_dev)
2880                         continue;
2881                 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2882         }
2883         spin_unlock_irq(&scst_list_lock);
2884         local_bh_enable();
2885
2886         scst_unblock_aborted_cmds(scst_mutex_held);
2887
2888         TRACE_EXIT();
2889         return;
2890 }
2891
2892 /* Returns 0 if the command processing should be continued, <0 otherwise */
2893 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2894 {
2895         int res;
2896         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2897         struct scst_device *dev = tgt_dev->acg_dev->dev;
2898
2899         TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2900                 tgt_dev->acg_dev->lun, mcmd);
2901
2902         spin_lock_bh(&dev->dev_lock);
2903         __scst_block_dev(dev);
2904         spin_unlock_bh(&dev->dev_lock);
2905
2906         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2907         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2908
2909         res = scst_set_mcmd_next_state(mcmd);
2910
2911         TRACE_EXIT_RES(res);
2912         return res;
2913 }
2914
2915 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
2916 {
2917         /*
2918          * No need for special protection for SCST_FLAG_TM_ACTIVE, since
2919          * we could be called from the only thread.
2920          */
2921         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
2922                 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
2923                         mcmd);
2924                 if (!locked)
2925                         spin_lock_irq(&scst_list_lock);
2926                 list_move_tail(&mcmd->mgmt_cmd_list_entry, 
2927                         &scst_delayed_mgmt_cmd_list);
2928                 if (!locked)
2929                         spin_unlock_irq(&scst_list_lock);
2930                 return -1;
2931         } else {
2932                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
2933                 return 0;
2934         }
2935 }
2936
2937 /* Returns 0 if the command processing should be continued, 
2938  * >0, if it should be requeued, <0 otherwise */
2939 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
2940 {
2941         int res = 0;
2942
2943         TRACE_ENTRY();
2944
2945         res = scst_check_delay_mgmt_cmd(mcmd, 1);
2946         if (res != 0)
2947                 goto out;
2948
2949         if (mcmd->fn == SCST_ABORT_TASK) {
2950                 struct scst_session *sess = mcmd->sess;
2951                 struct scst_cmd *cmd;
2952
2953                 local_bh_disable();
2954                 spin_lock_irq(&scst_list_lock);
2955                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
2956                 if (cmd == NULL) {
2957                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
2958                                 "tag %d not found", mcmd->tag);
2959                         mcmd->status = SCST_MGMT_STATUS_FAILED;
2960                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2961                 } else {
2962                         TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
2963                                 "aborting it", cmd, mcmd->tag, cmd->sn);
2964                         mcmd->cmd_to_abort = cmd;
2965                         scst_abort_cmd(cmd, mcmd, 0, 1);
2966                         res = scst_set_mcmd_next_state(mcmd);
2967                         mcmd->cmd_to_abort = NULL; /* just in case */
2968                 }
2969                 spin_unlock_irq(&scst_list_lock);
2970                 local_bh_enable();
2971         } else {
2972                 int rc;
2973                 rc = scst_mgmt_translate_lun(mcmd);
2974                 if (rc < 0) {
2975                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
2976                                 "found", (uint64_t)mcmd->lun);
2977                         mcmd->status = SCST_MGMT_STATUS_FAILED;
2978                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2979                 } else if (rc == 0)
2980                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
2981                 else
2982                         res = rc;
2983         }
2984
2985 out:
2986         TRACE_EXIT_RES(res);
2987         return res;
2988 }
2989
2990 /* Returns 0 if the command processing should be continued, <0 otherwise */
2991 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
2992 {
2993         int res, rc;
2994         struct scst_device *dev, *d;
2995         struct scst_tgt_dev *tgt_dev;
2996         int cont, c;
2997         LIST_HEAD(host_devs);
2998
2999         TRACE_ENTRY();
3000
3001         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3002                 mcmd, mcmd->sess->sess_cmd_count);
3003
3004         down(&scst_mutex);
3005
3006         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3007                 int found = 0;
3008
3009                 spin_lock_bh(&dev->dev_lock);
3010                 __scst_block_dev(dev);
3011                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3012                 spin_unlock_bh(&dev->dev_lock);
3013
3014                 cont = 0;
3015                 c = 0;
3016                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3017                         dev_tgt_dev_list_entry) 
3018                 {
3019                         cont = 1;
3020                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3021                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3022                                 c = 1;
3023                         else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3024                                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3025                 }
3026                 if (cont && !c)
3027                         continue;
3028                 
3029                 if (dev->scsi_dev == NULL)
3030                         continue;
3031
3032                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3033                         if (dev->scsi_dev->host->host_no ==
3034                                     d->scsi_dev->host->host_no) 
3035                         {
3036                                 found = 1;
3037                                 break;
3038                         }
3039                 }
3040                 if (!found)
3041                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3042         }
3043
3044         /*
3045          * We suppose here that for all commands that already on devices
3046          * on/after scsi_reset_provider() completion callbacks will be called.
3047          */
3048
3049         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3050                 /* dev->scsi_dev must be non-NULL here */
3051                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3052                       dev->scsi_dev->host->host_no);
3053                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3054                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3055                       dev->scsi_dev->host->host_no,
3056                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3057                 if (rc != SUCCESS) {
3058                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3059                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3060                 }
3061         }
3062
3063         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3064                 if (dev->scsi_dev != NULL)
3065                         dev->scsi_dev->was_reset = 0;
3066         }
3067
3068         up(&scst_mutex);
3069
3070         spin_lock_irq(&scst_list_lock);
3071         tm_dbg_task_mgmt("TARGET RESET");
3072         res = scst_set_mcmd_next_state(mcmd);
3073         spin_unlock_irq(&scst_list_lock);
3074
3075         TRACE_EXIT_RES(res);
3076         return res;
3077 }
3078
3079 /* Returns 0 if the command processing should be continued, <0 otherwise */
3080 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3081 {
3082         int res, rc;
3083         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3084         struct scst_device *dev = tgt_dev->acg_dev->dev;
3085
3086         TRACE_ENTRY();
3087
3088         TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3089                 mcmd);
3090
3091         spin_lock_bh(&dev->dev_lock);
3092         __scst_block_dev(dev);
3093         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3094         spin_unlock_bh(&dev->dev_lock);
3095
3096         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3097         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3098                 goto out_tm_dbg;
3099
3100         if (dev->scsi_dev != NULL) {
3101                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3102                       dev->scsi_dev->host->host_no);
3103                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3104                 if (rc != SUCCESS)
3105                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3106                 dev->scsi_dev->was_reset = 0;
3107         }
3108
3109 out_tm_dbg:
3110         spin_lock_irq(&scst_list_lock);
3111         tm_dbg_task_mgmt("LUN RESET");
3112         res = scst_set_mcmd_next_state(mcmd);
3113         spin_unlock_irq(&scst_list_lock);
3114
3115         TRACE_EXIT_RES(res);
3116         return res;
3117 }
3118
3119 /* Returns 0 if the command processing should be continued, <0 otherwise */
3120 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3121         int nexus_loss)
3122 {
3123         int res;
3124         struct scst_session *sess = mcmd->sess;
3125         struct scst_tgt_dev *tgt_dev;
3126
3127         TRACE_ENTRY();
3128
3129         if (nexus_loss) {
3130                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3131                         mcmd);
3132         } else {
3133                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3134                         mcmd);
3135         }
3136
3137         down(&scst_mutex);
3138         list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3139                 sess_tgt_dev_list_entry) 
3140         {
3141                 struct scst_device *dev = tgt_dev->acg_dev->dev;
3142                 int rc;
3143
3144                 spin_lock_bh(&dev->dev_lock);
3145                 __scst_block_dev(dev);
3146                 spin_unlock_bh(&dev->dev_lock);
3147
3148                 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3149                 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3150                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3151
3152                 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3153                 if (nexus_loss)
3154                         scst_reset_tgt_dev(tgt_dev, 1);
3155         }
3156         up(&scst_mutex);
3157
3158         spin_lock_irq(&scst_list_lock);
3159         res = scst_set_mcmd_next_state(mcmd);
3160         spin_unlock_irq(&scst_list_lock);
3161
3162         TRACE_EXIT_RES(res);
3163         return res;
3164 }
3165
3166 /* Returns 0 if the command processing should be continued, <0 otherwise */
3167 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3168         int nexus_loss)
3169 {
3170         int res;
3171         struct scst_tgt *tgt = mcmd->sess->tgt;
3172         struct scst_session *sess;
3173         struct scst_device *dev;
3174         struct scst_tgt_dev *tgt_dev;
3175
3176         TRACE_ENTRY();
3177
3178         if (nexus_loss) {
3179                 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3180                         mcmd);
3181         } else {
3182                 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3183                         mcmd);
3184         }
3185
3186         down(&scst_mutex);
3187
3188         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3189                 spin_lock_bh(&dev->dev_lock);
3190                 __scst_block_dev(dev);
3191                 spin_unlock_bh(&dev->dev_lock);
3192         }
3193
3194         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3195                 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3196                         sess_tgt_dev_list_entry) 
3197                 {
3198                         int rc;
3199
3200                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3201                         if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3202                                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3203
3204                         __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3205                         if (nexus_loss)
3206                                 scst_reset_tgt_dev(tgt_dev, 1);
3207                 }
3208         }
3209
3210         up(&scst_mutex);
3211
3212         spin_lock_irq(&scst_list_lock);
3213         res = scst_set_mcmd_next_state(mcmd);
3214         spin_unlock_irq(&scst_list_lock);
3215
3216         TRACE_EXIT_RES(res);
3217         return res;
3218 }
3219
3220 /* Returns 0 if the command processing should be continued, <0 otherwise */
3221 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3222 {
3223         int res = 0;
3224
3225         TRACE_ENTRY();
3226
3227         mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3228
3229         switch (mcmd->fn) {
3230         case SCST_ABORT_TASK_SET:
3231         case SCST_CLEAR_TASK_SET:
3232                 res = scst_abort_task_set(mcmd);
3233                 break;
3234
3235         case SCST_LUN_RESET:
3236                 res = scst_lun_reset(mcmd);
3237                 break;
3238
3239         case SCST_TARGET_RESET:
3240                 res = scst_target_reset(mcmd);
3241                 break;
3242
3243         case SCST_ABORT_ALL_TASKS_SESS:
3244                 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3245                 break;
3246
3247         case SCST_NEXUS_LOSS_SESS:
3248                 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3249                 break;
3250
3251         case SCST_ABORT_ALL_TASKS:
3252                 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3253                 break;
3254
3255         case SCST_NEXUS_LOSS:
3256                 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3257                 break;
3258
3259         case SCST_CLEAR_ACA:
3260                 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3261                 /* Nothing to do (yet) */
3262                 break;
3263
3264         default:
3265                 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3266                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3267                 break;
3268         }
3269
3270         TRACE_EXIT_RES(res);
3271         return res;
3272 }
3273
3274 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3275 {
3276         struct scst_device *dev;
3277         struct scst_tgt_dev *tgt_dev;
3278
3279         TRACE_ENTRY();
3280
3281         clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3282         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3283                 struct scst_mgmt_cmd *m;
3284                 spin_lock_irq(&scst_list_lock);
3285                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3286                                 mgmt_cmd_list_entry);
3287                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3288                         "cmd list", m);
3289                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3290                 spin_unlock_irq(&scst_list_lock);
3291         }
3292
3293         mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3294         if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3295                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3296
3297         if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3298                 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3299                       mcmd->sess->tgt->tgtt->name);
3300                 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3301                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3302                       mcmd->sess->tgt->tgtt->name);
3303         }
3304
3305         switch (mcmd->fn) {
3306         case SCST_ABORT_TASK_SET:
3307         case SCST_CLEAR_TASK_SET:
3308         case SCST_LUN_RESET:
3309                 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3310                 break;
3311
3312         case SCST_TARGET_RESET:
3313         case SCST_ABORT_ALL_TASKS:
3314         case SCST_NEXUS_LOSS:
3315                 down(&scst_mutex);
3316                 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3317                         scst_unblock_dev(dev);
3318                 }
3319                 up(&scst_mutex);
3320                 break;
3321
3322         case SCST_NEXUS_LOSS_SESS:
3323         case SCST_ABORT_ALL_TASKS_SESS:
3324                 down(&scst_mutex);
3325                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3326                                 sess_tgt_dev_list_entry) {
3327                         scst_unblock_dev(tgt_dev->acg_dev->dev);
3328                 }
3329                 up(&scst_mutex);
3330                 break;
3331
3332         case SCST_CLEAR_ACA:
3333         default:
3334                 break;
3335         }
3336
3337         mcmd->tgt_priv = NULL;
3338
3339         TRACE_EXIT();
3340         return;
3341 }
3342
3343 /* Returns >0, if cmd should be requeued */
3344 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3345 {
3346         int res = 0;
3347
3348         TRACE_ENTRY();
3349
3350         TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3351
3352         while (1) {
3353                 switch (mcmd->state) {
3354                 case SCST_MGMT_CMD_STATE_INIT:
3355                         res = scst_mgmt_cmd_init(mcmd);
3356                         if (res)
3357                                 goto out;
3358                         break;
3359
3360                 case SCST_MGMT_CMD_STATE_READY:
3361                         if (scst_mgmt_cmd_exec(mcmd))
3362                                 goto out;
3363                         break;
3364
3365                 case SCST_MGMT_CMD_STATE_DONE:
3366                         scst_mgmt_cmd_send_done(mcmd);
3367                         break;
3368
3369                 case SCST_MGMT_CMD_STATE_FINISHED:
3370                         goto out_free;
3371
3372 #ifdef EXTRACHECKS
3373                 case SCST_MGMT_CMD_STATE_EXECUTING:
3374                         BUG();
3375 #endif
3376
3377                 default:
3378                         PRINT_ERROR_PR("Unknown state %d of management command",
3379                                     mcmd->state);
3380                         res = -1;
3381                         goto out_free;
3382                 }
3383         }
3384
3385 out:
3386         TRACE_EXIT_RES(res);
3387         return res;
3388
3389 out_free:
3390         scst_free_mgmt_cmd(mcmd, 1);
3391         goto out;
3392 }
3393
3394 static inline int test_mgmt_cmd_list(void)
3395 {
3396         int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3397                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3398                   test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3399         return res;
3400 }
3401
3402 int scst_mgmt_cmd_thread(void *arg)
3403 {
3404         struct scst_mgmt_cmd *mcmd;
3405
3406         TRACE_ENTRY();
3407
3408         daemonize("scsi_tgt_mc");
3409         recalc_sigpending();
3410         current->flags |= PF_NOFREEZE;
3411
3412         spin_lock_irq(&scst_list_lock);
3413         while (1) {
3414                 wait_queue_t wait;
3415                 init_waitqueue_entry(&wait, current);
3416
3417                 if (!test_mgmt_cmd_list()) {
3418                         add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3419                                                  &wait);
3420                         for (;;) {
3421                                 set_current_state(TASK_INTERRUPTIBLE);
3422                                 if (test_mgmt_cmd_list())
3423                                         break;
3424                                 spin_unlock_irq(&scst_list_lock);
3425                                 schedule();
3426                                 spin_lock_irq(&scst_list_lock);
3427                         }
3428                         set_current_state(TASK_RUNNING);
3429                         remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3430                 }
3431
3432                 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3433                        !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3434                 {
3435                         int rc;
3436                         mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3437                                           typeof(*mcmd), mgmt_cmd_list_entry);
3438                         TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3439                               mcmd);
3440                         list_move_tail(&mcmd->mgmt_cmd_list_entry,
3441                                        &scst_mgmt_cmd_list);
3442                         spin_unlock_irq(&scst_list_lock);
3443                         rc = scst_process_mgmt_cmd(mcmd);
3444                         spin_lock_irq(&scst_list_lock);
3445                         if (rc > 0) {
3446                                 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3447                                         "of active mgmt cmd list", mcmd);
3448                                 list_move(&mcmd->mgmt_cmd_list_entry,
3449                                        &scst_active_mgmt_cmd_list);
3450                         }
3451                 }
3452
3453                 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3454                     list_empty(&scst_active_mgmt_cmd_list)) 
3455                 {
3456                         break;
3457                 }
3458         }
3459         spin_unlock_irq(&scst_list_lock);
3460
3461         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3462                 smp_mb__after_atomic_dec();
3463                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3464                 up(scst_shutdown_mutex);
3465         }
3466
3467         TRACE_EXIT();
3468         return 0;
3469 }
3470
3471 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3472         *sess, int fn, int atomic, void *tgt_priv)
3473 {
3474         struct scst_mgmt_cmd *mcmd = NULL;
3475
3476         TRACE_ENTRY();
3477
3478         if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3479                 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3480                             "(target %s)", sess->tgt->tgtt->name);
3481                 goto out;
3482         }
3483
3484         mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3485         if (mcmd == NULL)
3486                 goto out;
3487
3488         mcmd->sess = sess;
3489         mcmd->fn = fn;
3490         mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3491         mcmd->tgt_priv = tgt_priv;
3492
3493 out:
3494         TRACE_EXIT();
3495         return mcmd;
3496 }
3497
3498 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3499         struct scst_mgmt_cmd *mcmd)
3500 {
3501         unsigned long flags;
3502         int res = 0;
3503
3504         TRACE_ENTRY();
3505
3506         scst_sess_get(sess);
3507
3508         spin_lock_irqsave(&scst_list_lock, flags);
3509
3510         sess->sess_cmd_count++;
3511
3512 #ifdef EXTRACHECKS
3513         if (unlikely(sess->shutting_down)) {
3514                 PRINT_ERROR_PR("%s",
3515                         "New mgmt cmd while shutting down the session");
3516                 BUG();
3517         }
3518 #endif
3519
3520         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3521                 switch(sess->init_phase) {
3522                 case SCST_SESS_IPH_INITING:
3523                         TRACE_DBG("Adding mcmd %p to init deferred mcmd list", 
3524                                 mcmd);
3525                         list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3526                                 &sess->init_deferred_mcmd_list);
3527                         goto out_unlock;
3528                 case SCST_SESS_IPH_SUCCESS:
3529                         break;
3530                 case SCST_SESS_IPH_FAILED:
3531                         res = -1;
3532                         goto out_unlock;
3533                 default:
3534                         BUG();
3535                 }
3536         }
3537
3538         TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3539         list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3540
3541         spin_unlock_irqrestore(&scst_list_lock, flags);
3542
3543         wake_up(&scst_mgmt_cmd_list_waitQ);
3544
3545 out:
3546         TRACE_EXIT();
3547         return res;
3548
3549 out_unlock:
3550         spin_unlock_irqrestore(&scst_list_lock, flags);
3551         goto out;
3552 }
3553
3554 /* 
3555  * Must not been called in parallel with scst_unregister_session() for the 
3556  * same sess
3557  */
3558 int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
3559                         const uint8_t *lun, int lun_len, int atomic,
3560                         void *tgt_priv)
3561 {
3562         int res = -EFAULT;
3563         struct scst_mgmt_cmd *mcmd = NULL;
3564
3565         TRACE_ENTRY();
3566
3567         if (unlikely(fn == SCST_ABORT_TASK)) {
3568                 PRINT_ERROR_PR("%s() for ABORT TASK called", __FUNCTION__);
3569                 res = -EINVAL;
3570                 goto out;
3571         }
3572
3573         mcmd = scst_pre_rx_mgmt_cmd(sess, fn, atomic, tgt_priv);
3574         if (mcmd == NULL)
3575                 goto out;
3576
3577         mcmd->lun = scst_unpack_lun(lun, lun_len);
3578         if (mcmd->lun == (lun_t)-1)
3579                 goto out_free;
3580
3581         TRACE(TRACE_MGMT, "sess=%p, lun=%Ld", sess, (uint64_t)mcmd->lun);
3582
3583         if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
3584                 goto out_free;
3585
3586         res = 0;
3587
3588 out:
3589         TRACE_EXIT_RES(res);
3590         return res;
3591
3592 out_free:
3593         scst_free_mgmt_cmd(mcmd, 0);
3594         mcmd = NULL;
3595         goto out;
3596 }
3597
3598 /* 
3599  * Must not been called in parallel with scst_unregister_session() for the 
3600  * same sess
3601  */
3602 int scst_rx_mgmt_fn_tag(struct scst_session *sess, int fn, uint32_t tag,
3603                        int atomic, void *tgt_priv)
3604 {
3605         int res = -EFAULT;
3606         struct scst_mgmt_cmd *mcmd = NULL;
3607
3608         TRACE_ENTRY();
3609
3610         if (unlikely(fn != SCST_ABORT_TASK)) {