- Fixes wrongly set context in scst_tgt_cmd_done()
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28
29 #include "scst_debug.h"
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static int scst_do_job_init(struct list_head *init_cmd_list);
34
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
36         int left_locked);
37
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39         struct scst_mgmt_cmd *mcmd);
40
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43         unsigned long *pflags, int left_locked)
44 {
45         int res;
46
47         TRACE_ENTRY();
48
49         TRACE_DBG("Moving cmd %p to cmd list", cmd);
50         list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
51
52         /* This is an inline func., so unneeded code will be optimized out */
53         if (pflags)
54                 spin_unlock_irqrestore(&scst_list_lock, *pflags);
55         else
56                 spin_unlock_irq(&scst_list_lock);
57
58         res = __scst_process_active_cmd(cmd, context, left_locked);
59
60         TRACE_EXIT_RES(res);
61         return res;
62 }
63
64 static inline void scst_schedule_tasklet(void)
65 {
66         struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
67
68 #if 0 /* Looks like #else is better for performance */
69         if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
70                 tasklet_schedule(t);
71         else {
72                 /* 
73                  * We suppose that other CPU(s) are rather idle, so we
74                  * ask one of them to help
75                  */
76                 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77                         "instead", smp_processor_id());
78                 wake_up(&scst_list_waitQ);
79         }
80 #else
81         tasklet_schedule(t);
82 #endif
83 }
84
85 /* 
86  * Must not been called in parallel with scst_unregister_session() for the 
87  * same sess
88  */
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90                              const uint8_t *lun, int lun_len,
91                              const uint8_t *cdb, int cdb_len, int atomic)
92 {
93         struct scst_cmd *cmd;
94
95         TRACE_ENTRY();
96
97 #ifdef EXTRACHECKS
98         if (unlikely(sess->shutting_down)) {
99                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
100                 BUG();
101         }
102 #endif
103
104         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
105         if (cmd == NULL)
106                 goto out;
107
108         cmd->sess = sess;
109         cmd->tgt = sess->tgt;
110         cmd->tgtt = sess->tgt->tgtt;
111         cmd->state = SCST_CMD_STATE_INIT_WAIT;
112
113         /* 
114          * For both wrong lun and CDB defer the error reporting for
115          * scst_cmd_init_done()
116          */
117
118         cmd->lun = scst_unpack_lun(lun, lun_len);
119
120         if (cdb_len <= MAX_COMMAND_SIZE) {
121                 memcpy(cmd->cdb, cdb, cdb_len);
122                 cmd->cdb_len = cdb_len;
123         }
124
125         TRACE_DBG("cmd %p, sess %p", cmd, sess);
126         scst_sess_get(sess);
127
128 out:
129         TRACE_EXIT();
130         return cmd;
131 }
132
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
134 {
135         int res = 0;
136         unsigned long flags = 0;
137         struct scst_session *sess = cmd->sess;
138
139         TRACE_ENTRY();
140
141         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142         TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag, 
143                 (uint64_t)cmd->lun, cmd->cdb_len);
144         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145                 cmd->cdb, cmd->cdb_len);
146
147         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
149         {
150                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
152                         cmd->tgtt->name);
153                 pref_context = SCST_CONTEXT_TASKLET;
154         }
155
156         spin_lock_irqsave(&scst_list_lock, flags);
157
158         /* Let's make it here, this will save us a lock or atomic */
159         sess->sess_cmd_count++;
160
161         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
162
163         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164                 switch(sess->init_phase) {
165                 case SCST_SESS_IPH_SUCCESS:
166                         break;
167                 case SCST_SESS_IPH_INITING:
168                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169                         list_add_tail(&cmd->cmd_list_entry, 
170                                 &sess->init_deferred_cmd_list);
171                         goto out_unlock_flags;
172                 case SCST_SESS_IPH_FAILED:
173                         scst_set_busy(cmd);
174                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
175                         TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176                         list_add_tail(&cmd->cmd_list_entry, 
177                                 &scst_active_cmd_list);
178                         goto active;
179                 default:
180                         BUG();
181                 }
182         }
183
184         if (unlikely(cmd->lun == (lun_t)-1)) {
185                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186                 scst_set_cmd_error(cmd,
187                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
191                 goto active;
192         }
193
194         if (unlikely(cmd->cdb_len == 0)) {
195                 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196                 scst_set_cmd_error(cmd,
197                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
201                 goto active;
202         }
203
204         cmd->state = SCST_CMD_STATE_INIT;
205
206         TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207         list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
208
209         switch (pref_context) {
210         case SCST_CONTEXT_DIRECT:
211         case SCST_CONTEXT_DIRECT_ATOMIC:
212                 res = scst_do_job_init(&scst_init_cmd_list);
213                 if (res > 0)
214                         goto out_unlock_flags;
215                 break;
216
217         case SCST_CONTEXT_THREAD:
218                 goto out_thread_unlock_flags;
219
220         case SCST_CONTEXT_TASKLET:
221                 scst_schedule_tasklet();
222                 goto out_unlock_flags;
223
224         default:
225                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
226                             pref_context);
227                 goto out_thread_unlock_flags;
228         }
229
230 active:
231         switch (pref_context) {
232         case SCST_CONTEXT_DIRECT:
233         case SCST_CONTEXT_DIRECT_ATOMIC:
234                 scst_process_active_cmd(cmd, pref_context, &flags, 0);
235                 break;
236
237         case SCST_CONTEXT_THREAD:
238                 goto out_thread_unlock_flags;
239
240         case SCST_CONTEXT_TASKLET:
241                 scst_schedule_tasklet();
242                 goto out_unlock_flags;
243
244         default:
245                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
246                             pref_context);
247                 goto out_thread_unlock_flags;
248         }
249
250 out:
251         TRACE_EXIT();
252         return;
253
254 out_unlock_flags:
255         spin_unlock_irqrestore(&scst_list_lock, flags);
256         goto out;
257
258 out_thread_unlock_flags:
259         cmd->non_atomic_only = 1;
260         spin_unlock_irqrestore(&scst_list_lock, flags);
261         wake_up(&scst_list_waitQ);
262         goto out;
263 }
264
265 static int scst_parse_cmd(struct scst_cmd *cmd)
266 {
267         int res = SCST_CMD_STATE_RES_CONT_SAME;
268         int state;
269         struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270         struct scst_device *dev = cmd->dev;
271         struct scst_info_cdb cdb_info;
272         int atomic = scst_cmd_atomic(cmd);
273         int set_dir = 1;
274
275         TRACE_ENTRY();
276
277         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278                 TRACE_DBG("ABORTED set, returning ABORTED "
279                         "for cmd %p", cmd);
280                 goto out_xmit;
281         }
282
283         if (atomic && !dev->handler->parse_atomic) {
284                 TRACE_DBG("Dev handler %s parse() can not be "
285                       "called in atomic context, rescheduling to the thread",
286                       dev->handler->name);
287                 res = SCST_CMD_STATE_RES_NEED_THREAD;
288                 goto out;
289         }
290
291         /*
292          * Expected transfer data supplied by the SCSI transport via the
293          * target driver are untrusted, so we prefer to fetch them from CDB.
294          * Additionally, not all transports support supplying the expected
295          * transfer data.
296          */
297
298         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
299                         &cdb_info) != 0)) 
300         {
301                 static int t;
302                 if (t < 10) {
303                         t++;
304                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305                                 "Should you update scst_scsi_op_table?",
306                                 cmd->cdb[0], dev->handler->name);
307                 }
308                 if (scst_cmd_is_expected_set(cmd)) {
309                         TRACE(TRACE_MINOR, "Using initiator supplied values: "
310                                 "direction %d, transfer_len %d",
311                                 cmd->expected_data_direction,
312                                 cmd->expected_transfer_len);
313                         cmd->data_direction = cmd->expected_data_direction;
314                         cmd->bufflen = cmd->expected_transfer_len;
315                         /* Restore (most probably) lost CDB length */
316                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317                         if (cmd->cdb_len == -1) {
318                                 PRINT_ERROR_PR("Unable to get CDB length for "
319                                         "opcode 0x%02x. Returning INVALID "
320                                         "OPCODE", cmd->cdb[0]);
321                                 scst_set_cmd_error(cmd,
322                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
323                                 goto out_xmit;
324                         }
325                 }
326                 else {
327                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328                              "target %s not supplied expected values. "
329                              "Returning INVALID OPCODE.", cmd->cdb[0], 
330                              dev->handler->name, cmd->tgtt->name);
331                         scst_set_cmd_error(cmd,
332                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
333                         goto out_xmit;
334                 }
335         } else {
336                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337                         "set %s), transfer_len=%d (expected len %d), flags=%d",
338                         cdb_info.op_name, cdb_info.direction,
339                         cmd->expected_data_direction,
340                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341                         cdb_info.transfer_len, cmd->expected_transfer_len,
342                         cdb_info.flags);
343
344                 /* Restore (most probably) lost CDB length */
345                 cmd->cdb_len = cdb_info.cdb_len;
346
347                 cmd->data_direction = cdb_info.direction;
348                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349                         cmd->bufflen = cdb_info.transfer_len;
350                 /* else cmd->bufflen remained as it was inited in 0 */
351         }
352
353         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355                             "(opcode 0x%02x)", cmd->cdb[0]);
356                 scst_set_cmd_error(cmd,
357                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
358                 goto out_xmit;
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362                 PRINT_ERROR_PR("Linked commands are not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (likely(!scst_is_cmd_local(cmd))) {
370                 TRACE_DBG("Calling dev handler %s parse(%p)",
371                       dev->handler->name, cmd);
372                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373                 state = dev->handler->parse(cmd, &cdb_info);
374                 TRACE_DBG("Dev handler %s parse() returned %d",
375                         dev->handler->name, state);
376
377                 if (cmd->data_len == -1)
378                         cmd->data_len = cmd->bufflen;
379
380                 if (state == SCST_CMD_STATE_DEFAULT)
381                         state = SCST_CMD_STATE_PREPARE_SPACE;
382         }
383         else
384                 state = SCST_CMD_STATE_PREPARE_SPACE;
385
386 #ifdef EXTRACHECKS
387         if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
388                 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
389                         (state != SCST_CMD_STATE_DEV_PARSE)) ||
390                     ((cmd->bufflen != 0) && 
391                         (cmd->data_direction == SCST_DATA_NONE)) ||
392                     ((cmd->bufflen == 0) && 
393                         (cmd->data_direction != SCST_DATA_NONE)) ||
394                     ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
395                         (state > SCST_CMD_STATE_PREPARE_SPACE))) 
396                 {
397                         PRINT_ERROR_PR("Dev handler %s parse() returned "
398                                        "invalid cmd data_direction %d, "
399                                        "bufflen %zd or state %d (opcode 0x%x)",
400                                        dev->handler->name, 
401                                        cmd->data_direction, cmd->bufflen,
402                                        state, cmd->cdb[0]);
403                         goto out_error;
404                 }
405         }
406 #endif
407
408         switch (state) {
409         case SCST_CMD_STATE_PREPARE_SPACE:
410         case SCST_CMD_STATE_DEV_PARSE:
411         case SCST_CMD_STATE_RDY_TO_XFER:
412         case SCST_CMD_STATE_SEND_TO_MIDLEV:
413         case SCST_CMD_STATE_DEV_DONE:
414         case SCST_CMD_STATE_XMIT_RESP:
415         case SCST_CMD_STATE_FINISHED:
416                 cmd->state = state;
417                 res = SCST_CMD_STATE_RES_CONT_SAME;
418                 break;
419
420         case SCST_CMD_STATE_REINIT:
421                 cmd->tgt_dev_saved = tgt_dev_saved;
422                 cmd->state = state;
423                 res = SCST_CMD_STATE_RES_RESTART;
424                 set_dir = 0;
425                 break;
426
427         case SCST_CMD_STATE_NEED_THREAD_CTX:
428                 TRACE_DBG("Dev handler %s parse() requested thread "
429                       "context, rescheduling", dev->handler->name);
430                 res = SCST_CMD_STATE_RES_NEED_THREAD;
431                 set_dir = 0;
432                 break;
433
434         default:
435                 if (state >= 0) {
436                         PRINT_ERROR_PR("Dev handler %s parse() returned "
437                              "invalid cmd state %d (opcode %d)", 
438                              dev->handler->name, state, cmd->cdb[0]);
439                 } else {
440                         PRINT_ERROR_PR("Dev handler %s parse() returned "
441                                 "error %d (opcode %d)", dev->handler->name, 
442                                 state, cmd->cdb[0]);
443                 }
444                 goto out_error;
445         }
446
447         if ((cmd->resp_data_len == -1) && set_dir) {
448                 if (cmd->data_direction == SCST_DATA_READ)
449                         cmd->resp_data_len = cmd->bufflen;
450                 else
451                          cmd->resp_data_len = 0;
452         }
453         
454 out:
455         TRACE_EXIT_HRES(res);
456         return res;
457
458 out_error:
459         /* dev_done() will be called as part of the regular cmd's finish */
460         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
461         cmd->state = SCST_CMD_STATE_DEV_DONE;
462         res = SCST_CMD_STATE_RES_CONT_SAME;
463         goto out;
464
465 out_xmit:
466         cmd->state = SCST_CMD_STATE_XMIT_RESP;
467         res = SCST_CMD_STATE_RES_CONT_SAME;
468         goto out;
469 }
470
471 void scst_cmd_mem_work_fn(void *p)
472 {
473         TRACE_ENTRY();
474
475         spin_lock_bh(&scst_cmd_mem_lock);
476
477         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
478         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
479                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
480                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
481         } else {
482                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
483                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
484         }
485         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
486
487         spin_unlock_bh(&scst_cmd_mem_lock);
488
489         TRACE_EXIT();
490         return;
491 }
492
493 int scst_check_mem(struct scst_cmd *cmd)
494 {
495         int res = 0;
496
497         TRACE_ENTRY();
498
499         if (cmd->mem_checked)
500                 goto out;
501
502         spin_lock_bh(&scst_cmd_mem_lock);
503
504         scst_cur_cmd_mem += cmd->bufflen;
505         cmd->mem_checked = 1;
506         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
507                 goto out_unlock;
508
509         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
510                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
511                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
512                 (cmd->sess->initiator_name[0] == '\0') ?
513                   "Anonymous" : cmd->sess->initiator_name,
514                 scst_cur_max_cmd_mem >> 10);
515
516         scst_cur_cmd_mem -= cmd->bufflen;
517         cmd->mem_checked = 0;
518         scst_set_busy(cmd);
519         cmd->state = SCST_CMD_STATE_XMIT_RESP;
520         res = 1;
521
522 out_unlock:
523         spin_unlock_bh(&scst_cmd_mem_lock);
524
525 out:
526         TRACE_EXIT_RES(res);
527         return res;
528 }
529
530 static void scst_low_cur_max_cmd_mem(void)
531 {
532         TRACE_ENTRY();
533
534         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
535                 cancel_delayed_work(&scst_cmd_mem_work);
536                 flush_scheduled_work();
537                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
538         }
539
540         spin_lock_bh(&scst_cmd_mem_lock);
541
542         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
543                                 (scst_cur_cmd_mem >> 2);
544         if (scst_cur_max_cmd_mem < 16*1024*1024)
545                 scst_cur_max_cmd_mem = 16*1024*1024;
546
547         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
548                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
549                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
550                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
551         }
552
553         spin_unlock_bh(&scst_cmd_mem_lock);
554
555         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
556
557         TRACE_EXIT();
558         return;
559 }
560
561 static int scst_prepare_space(struct scst_cmd *cmd)
562 {
563         int r, res = SCST_CMD_STATE_RES_CONT_SAME;
564
565         TRACE_ENTRY();
566
567         if (cmd->data_direction == SCST_DATA_NONE) {
568                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
569                 goto out;
570         }
571
572         r = scst_check_mem(cmd);
573         if (unlikely(r != 0))
574                 goto out;
575
576         if (cmd->data_buf_tgt_alloc) {
577                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
578                 r = cmd->tgtt->alloc_data_buf(cmd);
579                 cmd->data_buf_alloced = (r == 0);
580         } else
581                 r = scst_alloc_space(cmd);
582
583         if (r != 0) {
584                 if (scst_cmd_atomic(cmd)) {
585                         TRACE_MEM("%s", "Atomic memory allocation failed, "
586                               "rescheduling to the thread");
587                         res = SCST_CMD_STATE_RES_NEED_THREAD;
588                         goto out;
589                 } else
590                         goto out_no_space;
591         }
592
593         switch (cmd->data_direction) {
594         case SCST_DATA_WRITE:
595                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
596                 break;
597
598         default:
599                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
600                 break;
601         }
602
603 out:
604         TRACE_EXIT_HRES(res);
605         return res;
606
607 out_no_space:
608         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
609                 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
610         scst_low_cur_max_cmd_mem();
611         scst_set_busy(cmd);
612         cmd->state = SCST_CMD_STATE_DEV_DONE;
613         res = SCST_CMD_STATE_RES_CONT_SAME;
614         goto out;
615 }
616
617 /* No locks */
618 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
619 {
620         struct scst_tgt *tgt = cmd->sess->tgt;
621         int res = 0;
622         unsigned long flags;
623
624         TRACE_ENTRY();
625
626         spin_lock_irqsave(&tgt->tgt_lock, flags);
627         tgt->retry_cmds++;
628         smp_mb();
629         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
630               tgt->retry_cmds);
631         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
632                 /* At least one cmd finished, so try again */
633                 tgt->retry_cmds--;
634                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
635                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
636                       "retry_cmds=%d)", finished_cmds,
637                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
638                 res = -1;
639                 goto out_unlock_tgt;
640         }
641
642         TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
643         /* IRQ already off */
644         spin_lock(&scst_list_lock);
645         list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
646         spin_unlock(&scst_list_lock);
647
648         if (!tgt->retry_timer_active) {
649                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
650                 add_timer(&tgt->retry_timer);
651                 tgt->retry_timer_active = 1;
652         }
653
654 out_unlock_tgt:
655         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
656
657         TRACE_EXIT_RES(res);
658         return res;
659 }
660
661 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
662 {
663         int res, rc;
664         int atomic = scst_cmd_atomic(cmd);
665
666         TRACE_ENTRY();
667
668         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
669         {
670                 TRACE_DBG("ABORTED set, returning ABORTED for "
671                         "cmd %p", cmd);
672                 goto out_dev_done;
673         }
674
675         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
676                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
677                       "called in atomic context, rescheduling to the thread");
678                 res = SCST_CMD_STATE_RES_NEED_THREAD;
679                 goto out;
680         }
681
682         while (1) {
683                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
684
685                 res = SCST_CMD_STATE_RES_CONT_NEXT;
686                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
687
688                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
689 #ifdef DEBUG_RETRY
690                 if (((scst_random() % 100) == 75))
691                         rc = SCST_TGT_RES_QUEUE_FULL;
692                 else
693 #endif
694                         rc = cmd->tgtt->rdy_to_xfer(cmd);
695                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
696
697                 if (likely(rc == SCST_TGT_RES_SUCCESS))
698                         goto out;
699
700                 /* Restore the previous state */
701                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
702
703                 switch (rc) {
704                 case SCST_TGT_RES_QUEUE_FULL:
705                 {
706                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
707                                 break;
708                         else
709                                 continue;
710                 }
711
712                 case SCST_TGT_RES_NEED_THREAD_CTX:
713                 {
714                         TRACE_DBG("Target driver %s "
715                               "rdy_to_xfer() requested thread "
716                               "context, rescheduling", cmd->tgtt->name);
717                         res = SCST_CMD_STATE_RES_NEED_THREAD;
718                         break;
719                 }
720
721                 default:
722                         goto out_error_rc;
723                 }
724                 break;
725         }
726
727 out:
728         TRACE_EXIT_HRES(res);
729         return res;
730
731 out_error_rc:
732         if (rc == SCST_TGT_RES_FATAL_ERROR) {
733                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
734                      "fatal error", cmd->tgtt->name);
735         } else {
736                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
737                             "value %d", cmd->tgtt->name, rc);
738         }
739         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
740
741 out_dev_done:
742         cmd->state = SCST_CMD_STATE_DEV_DONE;
743         res = SCST_CMD_STATE_RES_CONT_SAME;
744         goto out;
745 }
746
747 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
748         int check_retries)
749 {
750         unsigned long flags;
751         int rc;
752
753         TRACE_ENTRY();
754
755         TRACE_DBG("Context: %d", context);
756
757         switch(context) {
758         case SCST_CONTEXT_DIRECT:
759         case SCST_CONTEXT_DIRECT_ATOMIC:
760                 if (check_retries)
761                         scst_check_retries(cmd->tgt, 0);
762                 cmd->non_atomic_only = 0;
763                 rc = __scst_process_active_cmd(cmd, context, 0);
764                 if (rc == SCST_CMD_STATE_RES_NEED_THREAD)
765                         goto out_thread;
766                 break;
767
768         default:
769                 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
770                             context);
771                 /* go through */
772         case SCST_CONTEXT_THREAD:
773                 if (check_retries)
774                         scst_check_retries(cmd->tgt, 1);
775                 goto out_thread;
776
777         case SCST_CONTEXT_TASKLET:
778                 if (check_retries)
779                         scst_check_retries(cmd->tgt, 1);
780                 cmd->non_atomic_only = 0;
781                 spin_lock_irqsave(&scst_list_lock, flags);
782                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
783                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
784                 spin_unlock_irqrestore(&scst_list_lock, flags);
785                 scst_schedule_tasklet();
786                 break;
787         }
788 out:
789         TRACE_EXIT();
790         return;
791
792 out_thread:
793         cmd->non_atomic_only = 1;
794         spin_lock_irqsave(&scst_list_lock, flags);
795         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
796         list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
797         spin_unlock_irqrestore(&scst_list_lock, flags);
798         wake_up(&scst_list_waitQ);
799         goto out;
800 }
801
802 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
803 {
804         TRACE_ENTRY();
805
806         TRACE_DBG("Preferred context: %d", pref_context);
807         TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
808         cmd->non_atomic_only = 0;
809
810         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
811                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
812         {
813                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
814                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
815                         cmd->tgtt->name);
816                 pref_context = SCST_CONTEXT_TASKLET;
817         }
818
819         switch (status) {
820         case SCST_RX_STATUS_SUCCESS:
821                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
822                 break;
823
824         case SCST_RX_STATUS_ERROR_SENSE_SET:
825                 cmd->state = SCST_CMD_STATE_DEV_DONE;
826                 break;
827
828         case SCST_RX_STATUS_ERROR_FATAL:
829                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
830                 /* go through */
831         case SCST_RX_STATUS_ERROR:
832                 scst_set_cmd_error(cmd,
833                            SCST_LOAD_SENSE(scst_sense_hardw_error));
834                 cmd->state = SCST_CMD_STATE_DEV_DONE;
835                 break;
836
837         default:
838                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
839                         status);
840                 cmd->state = SCST_CMD_STATE_DEV_DONE;
841                 break;
842         }
843
844         scst_proccess_redirect_cmd(cmd, pref_context, 1);
845
846         TRACE_EXIT();
847         return;
848 }
849
850 /* No locks supposed to be held */
851 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
852         int rq_sense_len, int *next_state)
853 {
854         int sense_valid;
855         struct scst_device *dev = cmd->dev;
856         int dbl_ua_possible, ua_sent = 0;
857
858         TRACE_ENTRY();
859
860         /* If we had a internal bus reset behind us, set the command error UA */
861         if ((dev->scsi_dev != NULL) &&
862             unlikely(cmd->host_status == DID_RESET) &&
863             scst_is_ua_command(cmd))
864         {
865                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
866                       dev->scsi_dev->was_reset, cmd->host_status);
867                 scst_set_cmd_error(cmd,
868                    SCST_LOAD_SENSE(scst_sense_reset_UA));
869                 /* just in case */
870                 cmd->ua_ignore = 0;
871                 /* It looks like it is safe to clear was_reset here */
872                 dev->scsi_dev->was_reset = 0;
873                 smp_mb();
874         }
875
876         if (rq_sense != NULL) {
877                 sense_valid = SCST_SENSE_VALID(rq_sense);
878                 if (sense_valid) {
879                         memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
880                         /* 
881                          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
882                          * in init_scst()
883                          */
884                         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
885                 }
886         } else
887                 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
888
889         dbl_ua_possible = dev->dev_double_ua_possible;
890         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
891         if (unlikely(dbl_ua_possible)) {
892                 spin_lock_bh(&dev->dev_lock);
893                 barrier(); /* to reread dev_double_ua_possible */
894                 dbl_ua_possible = dev->dev_double_ua_possible;
895                 if (dbl_ua_possible)
896                         ua_sent = dev->dev_reset_ua_sent;
897                 else
898                         spin_unlock_bh(&dev->dev_lock);
899         }
900
901         if (sense_valid) {
902                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
903                              sizeof(cmd->sense_buffer));
904                 /* Check Unit Attention Sense Key */
905                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
906                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
907                                 if (dbl_ua_possible) 
908                                 {
909                                         if (ua_sent) {
910                                                 TRACE(TRACE_MGMT, "%s", 
911                                                         "Double UA detected");
912                                                 /* Do retry */
913                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
914                                                         "(tag %d)", cmd, cmd->tag);
915                                                 cmd->status = 0;
916                                                 cmd->masked_status = 0;
917                                                 cmd->msg_status = 0;
918                                                 cmd->host_status = DID_OK;
919                                                 cmd->driver_status = 0;
920                                                 memset(cmd->sense_buffer, 0,
921                                                         sizeof(cmd->sense_buffer));
922                                                 cmd->retry = 1;
923                                                 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
924                                                 /* 
925                                                  * Dev is still blocked by this cmd, so
926                                                  * it's OK to clear SCST_DEV_SERIALIZED
927                                                  * here.
928                                                  */
929                                                 dev->dev_double_ua_possible = 0;
930                                                 dev->dev_serialized = 0;
931                                                 dev->dev_reset_ua_sent = 0;
932                                                 goto out_unlock;
933                                         } else
934                                                 dev->dev_reset_ua_sent = 1;
935                                 }
936                         }
937                         if (cmd->ua_ignore == 0) {
938                                 if (unlikely(dbl_ua_possible)) {
939                                         __scst_process_UA(dev, cmd,
940                                                 cmd->sense_buffer,
941                                                 sizeof(cmd->sense_buffer), 0);
942                                 } else {
943                                         scst_process_UA(dev, cmd,
944                                                 cmd->sense_buffer,
945                                                 sizeof(cmd->sense_buffer), 0);
946                                 }
947                         }
948                 }
949         }
950
951         if (unlikely(dbl_ua_possible)) {
952                 if (ua_sent && scst_is_ua_command(cmd)) {
953                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
954                         dev->dev_double_ua_possible = 0;
955                         dev->dev_serialized = 0;
956                         dev->dev_reset_ua_sent = 0;
957                 }
958                 spin_unlock_bh(&dev->dev_lock);
959         }
960
961 out:
962         TRACE_EXIT();
963         return;
964
965 out_unlock:
966         spin_unlock_bh(&dev->dev_lock);
967         goto out;
968 }
969
970 static int scst_check_auto_sense(struct scst_cmd *cmd)
971 {
972         int res = 0;
973
974         TRACE_ENTRY();
975
976         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
977             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
978              SCST_NO_SENSE(cmd->sense_buffer)))
979         {
980                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
981                       "cmd->status=%x, cmd->masked_status=%x, "
982                       "cmd->msg_status=%x, cmd->host_status=%x, "
983                       "cmd->driver_status=%x", cmd->status, cmd->masked_status, 
984                       cmd->msg_status, cmd->host_status, cmd->driver_status);
985                 res = 1;
986         } else if (unlikely(cmd->host_status)) {
987                 if ((cmd->host_status == DID_REQUEUE) ||
988                     (cmd->host_status == DID_IMM_RETRY) ||
989                     (cmd->host_status == DID_SOFT_ERROR)) {
990                         scst_set_busy(cmd);
991                 } else {
992                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
993                                 "received, returning HARDWARE ERROR instead",
994                                 cmd->host_status);
995                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
996                 }
997         }
998
999         TRACE_EXIT_RES(res);
1000         return res;
1001 }
1002
1003 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1004         const uint8_t *rq_sense, int rq_sense_len, int *next_state)
1005 {
1006         unsigned char type;
1007
1008         TRACE_ENTRY();
1009
1010         cmd->status = result & 0xff;
1011         cmd->masked_status = status_byte(result);
1012         cmd->msg_status = msg_byte(result);
1013         cmd->host_status = host_byte(result);
1014         cmd->driver_status = driver_byte(result);
1015         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, "
1016               "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
1017               "cmd->driver_status=%x", result, cmd->status,
1018               cmd->masked_status, cmd->msg_status, cmd->host_status,
1019               cmd->driver_status);
1020
1021         cmd->completed = 1;
1022
1023         scst_dec_on_dev_cmd(cmd);
1024
1025         type = cmd->dev->handler->type;
1026         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1027             cmd->tgt_dev->acg_dev->rd_only_flag &&
1028             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1029              type == TYPE_TAPE)) {
1030                 int32_t length;
1031                 uint8_t *address;
1032
1033                 length = scst_get_buf_first(cmd, &address);
1034                 TRACE_DBG("length %d", length);
1035                 if (unlikely(length <= 0)) {
1036                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1037                                 __func__);
1038                         goto next;
1039                 }
1040                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1041                         address[2] |= 0x80;   /* Write Protect*/
1042                 }
1043                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1044                         address[3] |= 0x80;   /* Write Protect*/
1045                 }
1046                 scst_put_buf(cmd, address);
1047         }
1048
1049 next:
1050         scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1051
1052         TRACE_EXIT();
1053         return;
1054 }
1055
1056 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1057 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1058                                             struct scsi_request **req)
1059 {
1060         struct scst_cmd *cmd = NULL;
1061
1062         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1063                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1064
1065         if (cmd == NULL) {
1066                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1067                 if (*req)
1068                         scsi_release_request(*req);
1069         }
1070
1071         return cmd;
1072 }
1073
1074 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1075 {
1076         struct scsi_request *req = NULL;
1077         struct scst_cmd *cmd;
1078         int next_state;
1079
1080         TRACE_ENTRY();
1081
1082         WARN_ON(in_irq());
1083
1084         /*
1085          * We don't use scsi_cmd->resid, because:
1086          * 1. Many low level initiator drivers don't use (set) this field
1087          * 2. We determine the command's buffer size directly from CDB, 
1088          *    so scsi_cmd->resid is not relevant for us, and target drivers 
1089          *    should know the residual, if necessary, by comparing expected 
1090          *    and actual transfer sizes.
1091          */
1092
1093         cmd = scst_get_cmd(scsi_cmd, &req);
1094         if (cmd == NULL)
1095                 goto out;
1096
1097         next_state = SCST_CMD_STATE_DEV_DONE;
1098         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1099                 sizeof(req->sr_sense_buffer), &next_state);
1100
1101         /* Clear out request structure */
1102         req->sr_use_sg = 0;
1103         req->sr_sglist_len = 0;
1104         req->sr_bufflen = 0;
1105         req->sr_buffer = NULL;
1106         req->sr_underflow = 0;
1107         req->sr_request->rq_disk = NULL; /* disown request blk */
1108
1109         cmd->bufflen = req->sr_bufflen; //??
1110
1111         scst_release_request(cmd);
1112
1113         cmd->state = next_state;
1114         cmd->non_atomic_only = 0;
1115
1116         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1117
1118 out:
1119         TRACE_EXIT();
1120         return;
1121 }
1122 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1123 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1124 {
1125         struct scst_cmd *cmd;
1126         int next_state;
1127
1128         TRACE_ENTRY();
1129
1130         WARN_ON(in_irq());
1131
1132         /*
1133          * We don't use resid, because:
1134          * 1. Many low level initiator drivers don't use (set) this field
1135          * 2. We determine the command's buffer size directly from CDB,
1136          *    so resid is not relevant for us, and target drivers
1137          *    should know the residual, if necessary, by comparing expected
1138          *    and actual transfer sizes.
1139          */
1140
1141         cmd = (struct scst_cmd *)data;
1142         if (cmd == NULL)
1143                 goto out;
1144
1145         next_state = SCST_CMD_STATE_DEV_DONE;
1146         scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE,
1147                 &next_state);
1148
1149         cmd->state = next_state;
1150         cmd->non_atomic_only = 0;
1151
1152         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1153
1154 out:
1155         TRACE_EXIT();
1156         return;
1157 }
1158 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1159
1160 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1161 {
1162         TRACE_ENTRY();
1163
1164         BUG_ON(in_irq());
1165
1166         scst_dec_on_dev_cmd(cmd);
1167
1168         if (next_state == SCST_CMD_STATE_DEFAULT)
1169                 next_state = SCST_CMD_STATE_DEV_DONE;
1170
1171         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1172 #if defined(DEBUG) || defined(TRACING)
1173                 if (cmd->sg) {
1174                         int i;
1175                         struct scatterlist *sg = cmd->sg;
1176                         TRACE(TRACE_RECV_TOP, 
1177                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1178                               cmd->sg_cnt, sg, (void*)sg[0].page);
1179                         for(i = 0; i < cmd->sg_cnt; ++i) {
1180                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1181                                         "Exec'd sg", page_address(sg[i].page),
1182                                         sg[i].length);
1183                         }
1184                 }
1185 #endif
1186         }
1187
1188
1189 #ifdef EXTRACHECKS
1190         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1191             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1192             (next_state != SCST_CMD_STATE_FINISHED)) 
1193         {
1194                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1195                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1196                 scst_set_cmd_error(cmd,
1197                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1198                 next_state = SCST_CMD_STATE_DEV_DONE;
1199         }
1200
1201         if (scst_check_auto_sense(cmd)) {
1202                 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1203                         "opcode %d", cmd->cdb[0]);
1204         }
1205 #endif
1206
1207         scst_check_sense(cmd, NULL, 0, &next_state);
1208
1209         cmd->state = next_state;
1210         cmd->non_atomic_only = 0;
1211
1212         scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1213
1214         TRACE_EXIT();
1215         return;
1216 }
1217
1218 static int scst_report_luns_local(struct scst_cmd *cmd)
1219 {
1220         int res = SCST_EXEC_COMPLETED;
1221         int dev_cnt = 0;
1222         int buffer_size;
1223         struct scst_tgt_dev *tgt_dev = NULL;
1224         uint8_t *buffer;
1225
1226         TRACE_ENTRY();
1227
1228         cmd->status = 0;
1229         cmd->masked_status = 0;
1230         cmd->msg_status = 0;
1231         cmd->host_status = DID_OK;
1232         cmd->driver_status = 0;
1233
1234         /* ToDo: use full SG buffer, not only the first entry */
1235         buffer_size = scst_get_buf_first(cmd, &buffer);
1236         if (unlikely(buffer_size <= 0))
1237                 goto out_err;
1238
1239         if (buffer_size < 16) {
1240                 goto out_put_err;
1241         }
1242
1243         memset(buffer, 0, buffer_size);
1244
1245         /* sess->sess_tgt_dev_list is protected by suspended activity */
1246         list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1247                             sess_tgt_dev_list_entry) 
1248         {
1249                 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1250                         buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1251                         buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1252                 }
1253                 dev_cnt++;
1254                 /* Tmp, until ToDo above done */
1255                 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1256                         break;
1257         }
1258
1259         /* Set the response header */
1260         dev_cnt *= 8;
1261         buffer[0] = (dev_cnt >> 24) & 0xff;
1262         buffer[1] = (dev_cnt >> 16) & 0xff;
1263         buffer[2] = (dev_cnt >> 8) & 0xff;
1264         buffer[3] = dev_cnt & 0xff;
1265
1266         dev_cnt += 8;
1267
1268         scst_put_buf(cmd, buffer);
1269
1270         if (buffer_size > dev_cnt)
1271                 scst_set_resp_data_len(cmd, dev_cnt);
1272         
1273 out_done:
1274         cmd->completed = 1;
1275
1276         /* Report the result */
1277         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1278
1279         TRACE_EXIT_RES(res);
1280         return res;
1281         
1282 out_put_err:
1283         scst_put_buf(cmd, buffer);
1284
1285 out_err:
1286         scst_set_cmd_error(cmd,
1287                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1288         goto out_done;
1289 }
1290
1291 static int scst_pre_select(struct scst_cmd *cmd)
1292 {
1293         int res = SCST_EXEC_NOT_COMPLETED;
1294
1295         TRACE_ENTRY();
1296
1297         if (scst_cmd_atomic(cmd)) {
1298                 res = SCST_EXEC_NEED_THREAD;
1299                 goto out;
1300         }
1301
1302         scst_block_dev(cmd->dev, 1);
1303         /* Device will be unblocked in scst_done_cmd_check() */
1304
1305         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1306                 int rc = scst_set_pending_UA(cmd);
1307                 if (rc == 0) {
1308                         res = SCST_EXEC_COMPLETED;
1309                         cmd->completed = 1;
1310                         /* Report the result */
1311                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1312                         goto out;
1313                 }
1314         }
1315
1316 out:
1317         TRACE_EXIT_RES(res);
1318         return res;
1319 }
1320
1321 static inline void scst_report_reserved(struct scst_cmd *cmd)
1322 {
1323         TRACE_ENTRY();
1324
1325         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1326         cmd->completed = 1;
1327         /* Report the result */
1328         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1329
1330         TRACE_EXIT();
1331         return;
1332 }
1333
1334 static int scst_reserve_local(struct scst_cmd *cmd)
1335 {
1336         int res = SCST_EXEC_NOT_COMPLETED;
1337         struct scst_device *dev;
1338         struct scst_tgt_dev *tgt_dev_tmp;
1339
1340         TRACE_ENTRY();
1341
1342         if (scst_cmd_atomic(cmd)) {
1343                 res = SCST_EXEC_NEED_THREAD;
1344                 goto out;
1345         }
1346
1347         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1348                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1349                      "(lun=%Ld)", (uint64_t)cmd->lun);
1350                 scst_set_cmd_error(cmd,
1351                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1352                 cmd->completed = 1;
1353                 res = SCST_EXEC_COMPLETED;
1354                 goto out;
1355         }
1356
1357         dev = cmd->dev;
1358         scst_block_dev(dev, 1);
1359         /* Device will be unblocked in scst_done_cmd_check() */
1360
1361         spin_lock_bh(&dev->dev_lock);
1362
1363         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1364                 scst_report_reserved(cmd);
1365                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1366                 res = SCST_EXEC_COMPLETED;
1367                 goto out_unlock;
1368         }
1369
1370         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1371                             dev_tgt_dev_list_entry) 
1372         {
1373                 if (cmd->tgt_dev != tgt_dev_tmp)
1374                         set_bit(SCST_TGT_DEV_RESERVED, 
1375                                 &tgt_dev_tmp->tgt_dev_flags);
1376         }
1377         dev->dev_reserved = 1;
1378
1379 out_unlock:
1380         spin_unlock_bh(&dev->dev_lock);
1381         
1382 out:
1383         TRACE_EXIT_RES(res);
1384         return res;
1385 }
1386
1387 static int scst_release_local(struct scst_cmd *cmd)
1388 {
1389         int res = SCST_EXEC_NOT_COMPLETED;
1390         struct scst_tgt_dev *tgt_dev_tmp;
1391         struct scst_device *dev;
1392
1393         TRACE_ENTRY();
1394
1395         dev = cmd->dev;
1396
1397         scst_block_dev(dev, 1);
1398         cmd->blocking = 1;
1399         TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1400
1401         spin_lock_bh(&dev->dev_lock);
1402
1403         /* 
1404          * The device could be RELEASED behind us, if RESERVING session 
1405          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1406          * matter, so use lock and no retest for DEV_RESERVED bits again
1407          */
1408         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1409                 res = SCST_EXEC_COMPLETED;
1410                 cmd->status = 0;
1411                 cmd->masked_status = 0;
1412                 cmd->msg_status = 0;
1413                 cmd->host_status = DID_OK;
1414                 cmd->driver_status = 0;
1415         } else {
1416                 list_for_each_entry(tgt_dev_tmp,
1417                                     &dev->dev_tgt_dev_list,
1418                                     dev_tgt_dev_list_entry) 
1419                 {
1420                         clear_bit(SCST_TGT_DEV_RESERVED, 
1421                                 &tgt_dev_tmp->tgt_dev_flags);
1422                 }
1423                 dev->dev_reserved = 0;
1424         }
1425
1426         spin_unlock_bh(&dev->dev_lock);
1427
1428         if (res == SCST_EXEC_COMPLETED) {
1429                 cmd->completed = 1;
1430                 /* Report the result */
1431                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1432         }
1433
1434         TRACE_EXIT_RES(res);
1435         return res;
1436 }
1437
1438 /* 
1439  * The result of cmd execution, if any, should be reported 
1440  * via scst_cmd_done_local() 
1441  */
1442 static int scst_pre_exec(struct scst_cmd *cmd)
1443 {
1444         int res = SCST_EXEC_NOT_COMPLETED, rc;
1445         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1446
1447         TRACE_ENTRY();
1448
1449         /* Reserve check before Unit Attention */
1450         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1451             (cmd->cdb[0] != INQUIRY) &&
1452             (cmd->cdb[0] != REPORT_LUNS) &&
1453             (cmd->cdb[0] != RELEASE) &&
1454             (cmd->cdb[0] != RELEASE_10) &&
1455             (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1456             (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1457             (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) 
1458         {
1459                 scst_report_reserved(cmd);
1460                 res = SCST_EXEC_COMPLETED;
1461                 goto out;
1462         }
1463
1464         /* If we had a internal bus reset, set the command error unit attention */
1465         if ((cmd->dev->scsi_dev != NULL) &&
1466             unlikely(cmd->dev->scsi_dev->was_reset) &&
1467             scst_is_ua_command(cmd)) 
1468         {
1469                 struct scst_device *dev = cmd->dev;
1470                 int done = 0;
1471                 /* Prevent more than 1 cmd to be triggered by was_reset */
1472                 spin_lock_bh(&dev->dev_lock);
1473                 barrier(); /* to reread was_reset */
1474                 if (dev->scsi_dev->was_reset) {
1475                         TRACE(TRACE_MGMT, "was_reset is %d", 1);
1476                         scst_set_cmd_error(cmd,
1477                                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1478                         /* It looks like it is safe to clear was_reset here */
1479                         dev->scsi_dev->was_reset = 0;
1480                         smp_mb();
1481                         done = 1;
1482                 }
1483                 spin_unlock_bh(&dev->dev_lock);
1484
1485                 if (done)
1486                         goto out_done;
1487         }
1488
1489         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1490             scst_is_ua_command(cmd)) 
1491         {
1492                 rc = scst_set_pending_UA(cmd);
1493                 if (rc == 0)
1494                         goto out_done;
1495         }
1496
1497         /* Check READ_ONLY device status */
1498         if (tgt_dev->acg_dev->rd_only_flag &&
1499             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1500              cmd->cdb[0] == WRITE_10 ||
1501              cmd->cdb[0] == WRITE_12 ||
1502              cmd->cdb[0] == WRITE_16 ||
1503              cmd->cdb[0] == WRITE_VERIFY ||
1504              cmd->cdb[0] == WRITE_VERIFY_12 ||
1505              cmd->cdb[0] == WRITE_VERIFY_16 ||
1506              (cmd->dev->handler->type == TYPE_TAPE &&
1507               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1508         {
1509                 scst_set_cmd_error(cmd,
1510                            SCST_LOAD_SENSE(scst_sense_data_protect));
1511                 goto out_done;
1512         }
1513 out:
1514         TRACE_EXIT_RES(res);
1515         return res;
1516
1517 out_done:
1518         res = SCST_EXEC_COMPLETED;
1519         cmd->completed = 1;
1520         /* Report the result */
1521         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1522         goto out;
1523 }
1524
1525 /* 
1526  * The result of cmd execution, if any, should be reported 
1527  * via scst_cmd_done_local() 
1528  */
1529 static inline int scst_local_exec(struct scst_cmd *cmd)
1530 {
1531         int res = SCST_EXEC_NOT_COMPLETED;
1532
1533         TRACE_ENTRY();
1534
1535         /*
1536          * Adding new commands here don't forget to update
1537          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1538          */
1539
1540         switch (cmd->cdb[0]) {
1541         case MODE_SELECT:
1542         case MODE_SELECT_10:
1543         case LOG_SELECT:
1544                 res = scst_pre_select(cmd);
1545                 break;
1546         case RESERVE:
1547         case RESERVE_10:
1548                 res = scst_reserve_local(cmd);
1549                 break;
1550         case RELEASE:
1551         case RELEASE_10:
1552                 res = scst_release_local(cmd);
1553                 break;
1554         case REPORT_LUNS:
1555                 res = scst_report_luns_local(cmd);
1556                 break;
1557         }
1558
1559         TRACE_EXIT_RES(res);
1560         return res;
1561 }
1562
1563 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1564 {
1565         int rc = SCST_EXEC_NOT_COMPLETED;
1566
1567         TRACE_ENTRY();
1568
1569         cmd->sent_to_midlev = 1;
1570         cmd->state = SCST_CMD_STATE_EXECUTING;
1571         cmd->scst_cmd_done = scst_cmd_done_local;
1572
1573         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1574         smp_mb__after_set_bit();
1575
1576         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1577                 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1578                 goto out_aborted;
1579         }
1580
1581         rc = scst_pre_exec(cmd);
1582         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1583         if (rc != SCST_EXEC_NOT_COMPLETED) {
1584                 if (rc == SCST_EXEC_COMPLETED)
1585                         goto out;
1586                 else if (rc == SCST_EXEC_NEED_THREAD)
1587                         goto out_clear;
1588                 else
1589                         goto out_rc_error;
1590         }
1591
1592         rc = scst_local_exec(cmd);
1593         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1594         if (rc != SCST_EXEC_NOT_COMPLETED) {
1595                 if (rc == SCST_EXEC_COMPLETED)
1596                         goto out;
1597                 else if (rc == SCST_EXEC_NEED_THREAD)
1598                         goto out_clear;
1599                 else
1600                         goto out_rc_error;
1601         }
1602
1603         if (cmd->dev->handler->exec) {
1604                 struct scst_device *dev = cmd->dev;
1605                 TRACE_DBG("Calling dev handler %s exec(%p)",
1606                       dev->handler->name, cmd);
1607                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1608                 cmd->scst_cmd_done = scst_cmd_done_local;
1609                 rc = dev->handler->exec(cmd);
1610                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1611                 TRACE_DBG("Dev handler %s exec() returned %d",
1612                       dev->handler->name, rc);
1613                 if (rc != SCST_EXEC_NOT_COMPLETED) {
1614                         if (rc == SCST_EXEC_COMPLETED)
1615                                 goto out;
1616                         else if (rc == SCST_EXEC_NEED_THREAD)
1617                                 goto out_clear;
1618                         else
1619                                 goto out_rc_error;
1620                 }
1621         }
1622
1623         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1624         
1625         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1626                 PRINT_ERROR_PR("Command for virtual device must be "
1627                         "processed by device handler (lun %Ld)!",
1628                         (uint64_t)cmd->lun);
1629                 goto out_error;
1630         }
1631
1632 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1633         if (scst_alloc_request(cmd) != 0) {
1634                 PRINT_INFO_PR("%s", "Unable to allocate request, "
1635                         "sending BUSY status");
1636                 goto out_busy;
1637         }
1638         
1639         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1640                     (void *)cmd->scsi_req->sr_buffer,
1641                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1642                     cmd->retries);
1643 #else
1644         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1645                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1646                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1647                         GFP_KERNEL);
1648         if (rc) {
1649                 PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1650                 goto out_error;
1651         }
1652 #endif
1653
1654         rc = SCST_EXEC_COMPLETED;
1655
1656 out:
1657         TRACE_EXIT();
1658         return rc;
1659
1660 out_clear:
1661         /* Restore the state */
1662         cmd->sent_to_midlev = 0;
1663         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1664         goto out;
1665
1666 out_rc_error:
1667         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1668                     "invalid code %d", cmd->dev->handler->name, rc);
1669         /* go through */
1670
1671 out_error:
1672         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1673         cmd->completed = 1;
1674         cmd->state = SCST_CMD_STATE_DEV_DONE;
1675         rc = SCST_EXEC_COMPLETED;
1676         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1677         goto out;
1678
1679 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1680 out_busy:
1681         scst_set_busy(cmd);
1682         cmd->completed = 1;
1683         cmd->state = SCST_CMD_STATE_DEV_DONE;
1684         rc = SCST_EXEC_COMPLETED;
1685         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1686         goto out;
1687 #endif
1688
1689 out_aborted:
1690         rc = SCST_EXEC_COMPLETED;
1691         /* Report the result. The cmd is not completed */
1692         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1693         goto out;
1694 }
1695
1696 static int scst_send_to_midlev(struct scst_cmd *cmd)
1697 {
1698         int res, rc;
1699         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1700         struct scst_device *dev = cmd->dev;
1701         int expected_sn;
1702         int count;
1703         int atomic = scst_cmd_atomic(cmd);
1704
1705         TRACE_ENTRY();
1706
1707         res = SCST_CMD_STATE_RES_CONT_NEXT;
1708
1709         if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1710                 TRACE_DBG("Dev handler %s exec() can not be "
1711                       "called in atomic context, rescheduling to the thread",
1712                       dev->handler->name);
1713                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1714                 goto out;
1715         }
1716
1717         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1718                 goto out;
1719
1720         scst_inc_cmd_count(); /* protect dev & tgt_dev */
1721
1722         if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1723                 rc = scst_do_send_to_midlev(cmd);
1724                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1725                 if (rc == SCST_EXEC_NEED_THREAD) {
1726                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1727                               "thread context, rescheduling");
1728                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1729                         scst_dec_on_dev_cmd(cmd);
1730                         goto out_dec_cmd_count;
1731                 } else {
1732                         BUG_ON(rc != SCST_EXEC_COMPLETED);
1733                         goto out_unplug;
1734                 }
1735         }
1736
1737         expected_sn = tgt_dev->expected_sn;
1738         if (cmd->sn != expected_sn) {
1739                 spin_lock_bh(&tgt_dev->sn_lock);
1740                 tgt_dev->def_cmd_count++;
1741                 smp_mb();
1742                 barrier(); /* to reread expected_sn */
1743                 expected_sn = tgt_dev->expected_sn;
1744                 if (cmd->sn != expected_sn) {
1745                         scst_dec_on_dev_cmd(cmd);
1746                         TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1747                               "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1748                         list_add_tail(&cmd->sn_cmd_list_entry,
1749                                       &tgt_dev->deferred_cmd_list);
1750                         spin_unlock_bh(&tgt_dev->sn_lock);
1751                         /* !! At this point cmd can be already freed !! */
1752                         goto out_dec_cmd_count;
1753                 } else {
1754                         TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1755                               "expected_sn %d, continuing", expected_sn);
1756                         tgt_dev->def_cmd_count--;
1757                         spin_unlock_bh(&tgt_dev->sn_lock);
1758                 }
1759         }
1760
1761         count = 0;
1762         while(1) {
1763                 rc = scst_do_send_to_midlev(cmd);
1764                 if (rc == SCST_EXEC_NEED_THREAD) {
1765                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1766                               "thread context, rescheduling");
1767                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1768                         scst_dec_on_dev_cmd(cmd);
1769                         if (count != 0)
1770                                 goto out_unplug;
1771                         else
1772                                 goto out_dec_cmd_count;
1773                 }
1774                 BUG_ON(rc != SCST_EXEC_COMPLETED);
1775                 /* !! At this point cmd can be already freed !! */
1776                 count++;
1777                 expected_sn = __scst_inc_expected_sn(tgt_dev);
1778                 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1779                 if (cmd == NULL)
1780                         break;
1781                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1782                         break;
1783         }
1784
1785 out_unplug:
1786         if (dev->scsi_dev != NULL)
1787                 generic_unplug_device(dev->scsi_dev->request_queue);
1788
1789 out_dec_cmd_count:
1790         scst_dec_cmd_count();
1791         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1792
1793 out:
1794         TRACE_EXIT_HRES(res);
1795         return res;
1796 }
1797
1798 static struct scst_cmd *scst_create_prepare_internal_cmd(
1799         struct scst_cmd *orig_cmd, int bufsize)
1800 {
1801         struct scst_cmd *res;
1802         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1803
1804         TRACE_ENTRY();
1805
1806         res = scst_alloc_cmd(gfp_mask);
1807         if (unlikely(res == NULL)) {
1808                 goto out;
1809         }
1810
1811         res->sess = orig_cmd->sess;
1812         res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1813         res->atomic = scst_cmd_atomic(orig_cmd);
1814         res->internal = 1;
1815         res->tgtt = orig_cmd->tgtt;
1816         res->tgt = orig_cmd->tgt;
1817         res->dev = orig_cmd->dev;
1818         res->tgt_dev = orig_cmd->tgt_dev;
1819         res->lun = orig_cmd->lun;
1820         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1821         res->data_direction = SCST_DATA_UNKNOWN;
1822         res->orig_cmd = orig_cmd;
1823
1824         res->bufflen = bufsize;
1825         if (bufsize > 0) {
1826                 if (scst_alloc_space(res) != 0)
1827                         PRINT_ERROR("Unable to create buffer (size %d) for "
1828                                 "internal cmd", bufsize);
1829                         goto out_free_res;
1830         }
1831
1832 out:
1833         TRACE_EXIT_HRES((unsigned long)res);
1834         return res;
1835
1836 out_free_res:
1837         scst_destroy_cmd(res);
1838         res = NULL;
1839         goto out;
1840 }
1841
1842 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1843 {
1844         TRACE_ENTRY();
1845
1846         if (cmd->bufflen > 0)
1847                 scst_release_space(cmd);
1848         scst_destroy_cmd(cmd);
1849
1850         TRACE_EXIT();
1851         return;
1852 }
1853
1854 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1855 {
1856         int res = SCST_CMD_STATE_RES_RESTART;
1857 #define sbuf_size 252
1858         static const unsigned char request_sense[6] =
1859             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1860         struct scst_cmd *rs_cmd;
1861
1862         TRACE_ENTRY();
1863
1864         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1865         if (rs_cmd != 0)
1866                 goto out_error;
1867
1868         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1869         rs_cmd->cdb_len = sizeof(request_sense);
1870         rs_cmd->data_direction = SCST_DATA_READ;
1871
1872         spin_lock_irq(&scst_list_lock);
1873         list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1874         spin_unlock_irq(&scst_list_lock);
1875
1876 out:
1877         TRACE_EXIT_RES(res);
1878         return res;
1879
1880 out_error:
1881         res = -1;
1882         goto out;
1883 #undef sbuf_size
1884 }
1885
1886 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1887 {
1888         struct scst_cmd *orig_cmd = cmd->orig_cmd;
1889         uint8_t *buf;
1890         int len;
1891
1892         TRACE_ENTRY();
1893
1894         BUG_ON(orig_cmd);
1895
1896         len = scst_get_buf_first(cmd, &buf);
1897
1898         if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1899             (!SCST_NO_SENSE(buf))) 
1900         {
1901                 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
1902                         buf, len);
1903                 memcpy(orig_cmd->sense_buffer, buf,
1904                         (sizeof(orig_cmd->sense_buffer) > len) ?
1905                                 len : sizeof(orig_cmd->sense_buffer));
1906         } else {
1907                 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1908                         "REQUEST SENSE, returning HARDWARE ERROR");
1909                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1910         }
1911
1912         scst_put_buf(cmd, buf);
1913
1914         scst_free_internal_cmd(cmd);
1915
1916         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1917         return orig_cmd;
1918 }
1919
1920 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1921 {
1922         int res = 0, rc;
1923         unsigned char type;
1924
1925         TRACE_ENTRY();
1926
1927         if (cmd->cdb[0] == REQUEST_SENSE) {
1928                 if (cmd->internal)
1929                         cmd = scst_complete_request_sense(cmd);
1930         } else if (scst_check_auto_sense(cmd)) {
1931                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1932                             "without sense data (opcode 0x%x), issuing "
1933                             "REQUEST SENSE", cmd->cdb[0]);
1934                 rc = scst_prepare_request_sense(cmd);
1935                 if (res > 0) {
1936                         *pres = rc;
1937                         res = 1;
1938                         goto out;
1939                 } else {
1940                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1941                                     "returning HARDWARE ERROR");
1942                         scst_set_cmd_error(cmd,
1943                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
1944                 }
1945         }
1946
1947         type = cmd->dev->handler->type;
1948         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1949             cmd->tgt_dev->acg_dev->rd_only_flag &&
1950             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1951              type == TYPE_TAPE))
1952         {
1953                 int32_t length;
1954                 uint8_t *address;
1955
1956                 length = scst_get_buf_first(cmd, &address);
1957                 if (length <= 0)
1958                         goto out;
1959                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1960                         address[2] |= 0x80;   /* Write Protect*/
1961                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1962                         address[3] |= 0x80;   /* Write Protect*/
1963                 scst_put_buf(cmd, address);
1964         }
1965
1966         /* 
1967          * Check and clear NormACA option for the device, if necessary,
1968          * since we don't support ACA
1969          */
1970         if ((cmd->cdb[0] == INQUIRY) &&
1971             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1972             (cmd->resp_data_len > SCST_INQ_BYTE3))
1973         {
1974                 uint8_t *buffer;
1975                 int buflen;
1976
1977                 /* ToDo: all pages ?? */
1978                 buflen = scst_get_buf_first(cmd, &buffer);
1979                 if (buflen > 0) {
1980                         if (buflen > SCST_INQ_BYTE3) {
1981 #ifdef EXTRACHECKS
1982                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1983                                         PRINT_INFO_PR("NormACA set for device: "
1984                                             "lun=%Ld, type 0x%02x", 
1985                                             (uint64_t)cmd->lun, buffer[0]);
1986                                 }
1987 #endif
1988                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1989                         } else
1990                                 scst_set_cmd_error(cmd,
1991                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1992
1993                         scst_put_buf(cmd, buffer);
1994                 }
1995         }
1996
1997         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
1998                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
1999                                                 &cmd->tgt_dev->tgt_dev_flags)) {
2000                         struct scst_tgt_dev *tgt_dev_tmp;
2001                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2002                               (uint64_t)cmd->lun, cmd->masked_status);
2003                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2004                                      sizeof(cmd->sense_buffer));
2005                         /* Clearing the reservation */
2006                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2007                                             dev_tgt_dev_list_entry) {
2008                                 clear_bit(SCST_TGT_DEV_RESERVED, 
2009                                         &tgt_dev_tmp->tgt_dev_flags);
2010                         }
2011                         cmd->dev->dev_reserved = 0;
2012                 }
2013                 scst_unblock_dev(cmd->dev);
2014         }
2015         
2016         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
2017                      (cmd->cdb[0] == MODE_SELECT_10) ||
2018                      (cmd->cdb[0] == LOG_SELECT)))
2019         {
2020                 if (cmd->status == 0) {
2021                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2022                                 "setting the SELECT UA (lun=%Ld)", 
2023                                 (uint64_t)cmd->lun);
2024                         spin_lock_bh(&scst_temp_UA_lock);
2025                         if (cmd->cdb[0] == LOG_SELECT) {
2026                                 scst_set_sense(scst_temp_UA,
2027                                         sizeof(scst_temp_UA),
2028                                         UNIT_ATTENTION, 0x2a, 0x02);
2029                         } else {
2030                                 scst_set_sense(scst_temp_UA,
2031                                         sizeof(scst_temp_UA),
2032                                         UNIT_ATTENTION, 0x2a, 0x01);
2033                         }
2034                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2035                                 sizeof(scst_temp_UA), 1);
2036                         spin_unlock_bh(&scst_temp_UA_lock);
2037                 }
2038                 scst_unblock_dev(cmd->dev);
2039         }
2040
2041 out:
2042         TRACE_EXIT_RES(res);
2043         return res;
2044 }
2045
2046 static int scst_dev_done(struct scst_cmd *cmd)
2047 {
2048         int res = SCST_CMD_STATE_RES_CONT_SAME;
2049         int state;
2050         int atomic = scst_cmd_atomic(cmd);
2051
2052         TRACE_ENTRY();
2053
2054         if (atomic && !cmd->dev->handler->dev_done_atomic &&
2055             cmd->dev->handler->dev_done) 
2056         {
2057                 TRACE_DBG("Dev handler %s dev_done() can not be "
2058                       "called in atomic context, rescheduling to the thread",
2059                       cmd->dev->handler->name);
2060                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2061                 goto out;
2062         }
2063
2064         if (scst_done_cmd_check(cmd, &res))
2065                 goto out;
2066
2067         state = SCST_CMD_STATE_XMIT_RESP;
2068         if (likely(!scst_is_cmd_local(cmd)) && 
2069             likely(cmd->dev->handler->dev_done != NULL))
2070         {
2071                 int rc;
2072                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2073                       cmd->dev->handler->name, cmd);
2074                 rc = cmd->dev->handler->dev_done(cmd);
2075                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2076                       cmd->dev->handler->name, rc);
2077                 if (rc != SCST_CMD_STATE_DEFAULT)
2078                         state = rc;
2079         }
2080
2081         switch (state) {
2082         case SCST_CMD_STATE_REINIT:
2083                 cmd->state = state;
2084                 res = SCST_CMD_STATE_RES_RESTART;
2085                 break;
2086
2087         case SCST_CMD_STATE_DEV_PARSE:
2088         case SCST_CMD_STATE_PREPARE_SPACE:
2089         case SCST_CMD_STATE_RDY_TO_XFER:
2090         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2091         case SCST_CMD_STATE_DEV_DONE:
2092         case SCST_CMD_STATE_XMIT_RESP:
2093         case SCST_CMD_STATE_FINISHED:
2094                 cmd->state = state;
2095                 res = SCST_CMD_STATE_RES_CONT_SAME;
2096                 break;
2097
2098         case SCST_CMD_STATE_NEED_THREAD_CTX:
2099                 TRACE_DBG("Dev handler %s dev_done() requested "
2100                       "thread context, rescheduling",
2101                       cmd->dev->handler->name);
2102                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2103                 break;
2104
2105         default:
2106                 if (state >= 0) {
2107                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2108                                 "invalid cmd state %d", 
2109                                 cmd->dev->handler->name, state);
2110                 } else {
2111                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2112                                 "error %d", cmd->dev->handler->name, 
2113                                 state);
2114                 }
2115                 scst_set_cmd_error(cmd,
2116                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2117                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2118                 res = SCST_CMD_STATE_RES_CONT_SAME;
2119                 break;
2120         }
2121
2122 out:
2123         TRACE_EXIT_HRES(res);
2124         return res;
2125 }
2126
2127 static int scst_xmit_response(struct scst_cmd *cmd)
2128 {
2129         int res, rc;
2130         int atomic = scst_cmd_atomic(cmd);
2131
2132         TRACE_ENTRY();
2133
2134         /* 
2135          * Check here also in order to avoid unnecessary delays of other
2136          * commands.
2137          */
2138         if (unlikely(cmd->sent_to_midlev == 0) &&
2139             (cmd->tgt_dev != NULL))
2140         {
2141                 TRACE(TRACE_SCSI_SERIALIZING,
2142                       "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2143                 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2144                 cmd->sent_to_midlev = 1;
2145         }
2146
2147         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2148                 TRACE_DBG("%s", "xmit_response() can not be "
2149                       "called in atomic context, rescheduling to the thread");
2150                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2151                 goto out;
2152         }
2153
2154         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2155         smp_mb__after_set_bit();
2156
2157         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2158                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2159                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2160                                 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2161                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2162                 }
2163         }
2164
2165         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2166                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2167                         cmd, cmd->tag);
2168                 cmd->state = SCST_CMD_STATE_FINISHED;
2169                 res = SCST_CMD_STATE_RES_CONT_SAME;
2170                 goto out;
2171         }
2172
2173 #ifdef DEBUG_TM
2174         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2175                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2176                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2177                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2178                         goto out;
2179                 }
2180                 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2181                         cmd, cmd->tag);
2182                 schedule_timeout_uninterruptible(HZ);
2183         }
2184 #endif
2185
2186         while (1) {
2187                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2188
2189                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2190                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2191
2192                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2193
2194 #if defined(DEBUG) || defined(TRACING)
2195                 if (cmd->sg) {
2196                         int i;
2197                         struct scatterlist *sg = cmd->sg;
2198                         TRACE(TRACE_SEND_BOT, 
2199                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2200                               cmd->sg_cnt, sg, (void*)sg[0].page);
2201                         for(i = 0; i < cmd->sg_cnt; ++i) {
2202                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2203                                     "Xmitting sg", page_address(sg[i].page),
2204                                     sg[i].length);
2205                         }
2206                 }
2207 #endif
2208
2209 #ifdef DEBUG_RETRY
2210                 if (((scst_random() % 100) == 77))
2211                         rc = SCST_TGT_RES_QUEUE_FULL;
2212                 else
2213 #endif
2214                         rc = cmd->tgtt->xmit_response(cmd);
2215                 TRACE_DBG("xmit_response() returned %d", rc);
2216
2217                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2218                         goto out;
2219
2220                 /* Restore the previous state */
2221                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2222
2223                 switch (rc) {
2224                 case SCST_TGT_RES_QUEUE_FULL:
2225                 {
2226                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2227                                 break;
2228                         else
2229                                 continue;
2230                 }
2231
2232                 case SCST_TGT_RES_NEED_THREAD_CTX:
2233                 {
2234                         TRACE_DBG("Target driver %s xmit_response() "
2235                               "requested thread context, rescheduling",
2236                               cmd->tgtt->name);
2237                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2238                         break;
2239                 }
2240
2241                 default:
2242                         goto out_error;
2243                 }
2244                 break;
2245         }
2246
2247 out:
2248         /* Caution: cmd can be already dead here */
2249         TRACE_EXIT_HRES(res);
2250         return res;
2251
2252 out_error:
2253         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2254                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2255                         "fatal error", cmd->tgtt->name);
2256         } else {
2257                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2258                         "invalid value %d", cmd->tgtt->name, rc);
2259         }
2260         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2261         cmd->state = SCST_CMD_STATE_FINISHED;
2262         res = SCST_CMD_STATE_RES_CONT_SAME;
2263         goto out;
2264 }
2265
2266 static int scst_finish_cmd(struct scst_cmd *cmd)
2267 {
2268         int res;
2269
2270         TRACE_ENTRY();
2271
2272         if (cmd->mem_checked) {
2273                 spin_lock_bh(&scst_cmd_mem_lock);
2274                 scst_cur_cmd_mem -= cmd->bufflen;
2275                 spin_unlock_bh(&scst_cmd_mem_lock);
2276         }
2277
2278         spin_lock_irq(&scst_list_lock);
2279
2280         TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2281         list_del(&cmd->cmd_list_entry);
2282
2283         if (cmd->mgmt_cmnd)
2284                 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2285
2286         if (likely(cmd->tgt_dev != NULL))
2287                 cmd->tgt_dev->cmd_count--;
2288
2289         cmd->sess->sess_cmd_count--;
2290
2291         list_del(&cmd->search_cmd_list_entry);
2292
2293         spin_unlock_irq(&scst_list_lock);
2294
2295         scst_free_cmd(cmd);
2296
2297         res = SCST_CMD_STATE_RES_CONT_NEXT;
2298
2299         TRACE_EXIT_HRES(res);
2300         return res;
2301 }
2302
2303 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2304 {
2305         TRACE_ENTRY();
2306
2307         BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2308
2309         cmd->state = SCST_CMD_STATE_FINISHED;
2310         scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2311
2312         TRACE_EXIT();
2313         return;
2314 }
2315
2316 /*
2317  * Returns 0 on success, > 0 when we need to wait for unblock,
2318  * < 0 if there is no device (lun) or device type handler.
2319  * Called under scst_list_lock and IRQs disabled
2320  */
2321 static int scst_translate_lun(struct scst_cmd *cmd)
2322 {
2323         struct scst_tgt_dev *tgt_dev = NULL;
2324         int res = 0;
2325
2326         TRACE_ENTRY();
2327
2328         scst_inc_cmd_count();   
2329
2330         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2331                 res = -1;
2332                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2333                       (uint64_t)cmd->lun);
2334                 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2335                                     sess_tgt_dev_list_entry) 
2336                 {
2337                         if (tgt_dev->acg_dev->lun == cmd->lun) {
2338                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2339
2340                                 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2341                                         PRINT_INFO_PR("Dev handler for device "
2342                                           "%Ld is NULL, the device will not be "
2343                                           "visible remotely", (uint64_t)cmd->lun);
2344                                         break;
2345                                 }
2346                                 
2347                                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2348                                         cmd->tgt_dev_saved->cmd_count--;
2349                                         TRACE(TRACE_SCSI_SERIALIZING,
2350                                               "SCST_CMD_STATE_REINIT: "
2351                                               "incrementing expected_sn on tgt_dev_saved %p",
2352                                               cmd->tgt_dev_saved);
2353                                         scst_inc_expected_sn_unblock(
2354                                                 cmd->tgt_dev_saved, cmd, 1);
2355                                 }
2356                                 cmd->tgt_dev = tgt_dev;
2357                                 tgt_dev->cmd_count++;
2358                                 cmd->dev = tgt_dev->acg_dev->dev;
2359
2360                                 /* ToDo: cmd->queue_type */
2361
2362                                 /* scst_list_lock is enough to protect that */
2363                                 cmd->sn = tgt_dev->next_sn;
2364                                 tgt_dev->next_sn++;
2365
2366                                 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2367                                         "cmd->sn: %d", cmd->sn);
2368
2369                                 res = 0;
2370                                 break;
2371                         }
2372                 }
2373                 if (res != 0) {
2374                         TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2375                                 "unexisting LU?", (uint64_t)cmd->lun);
2376                         scst_dec_cmd_count();
2377                 }
2378         } else {
2379                 if ( !cmd->sess->waiting) {
2380                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2381                               cmd->sess);
2382                         list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2383                                       &scst_dev_wait_sess_list);
2384                         cmd->sess->waiting = 1;
2385                 }
2386                 scst_dec_cmd_count();
2387                 res = 1;
2388         }
2389
2390         TRACE_EXIT_RES(res);
2391         return res;
2392 }
2393
2394 /* Called under scst_list_lock and IRQs disabled */
2395 static int scst_process_init_cmd(struct scst_cmd *cmd)
2396 {
2397         int res = 0;
2398
2399         TRACE_ENTRY();
2400
2401         res = scst_translate_lun(cmd);
2402         if (likely(res == 0)) {
2403                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2404                 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2405                         TRACE(TRACE_RETRY, "Too many pending commands in "
2406                                 "session, returning BUSY to initiator \"%s\"",
2407                                 (cmd->sess->initiator_name[0] == '\0') ?
2408                                   "Anonymous" : cmd->sess->initiator_name);
2409                         scst_set_busy(cmd);
2410                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2411                 }
2412                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2413                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2414         } else if (res < 0) {
2415                 TRACE_DBG("Finishing cmd %p", cmd);
2416                 scst_set_cmd_error(cmd,
2417                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2418                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2419                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2420                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2421         }
2422
2423         TRACE_EXIT_RES(res);
2424         return res;
2425 }
2426
2427 /* 
2428  * Called under scst_list_lock and IRQs disabled
2429  * We don't drop it anywhere inside, because command execution
2430  * have to be serialized, i.e. commands must be executed in order
2431  * of their arrival, and we set this order inside scst_translate_lun().
2432  */
2433 static int scst_do_job_init(struct list_head *init_cmd_list)
2434 {
2435         int res = 1;
2436
2437         TRACE_ENTRY();
2438
2439         if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2440                 while (!list_empty(init_cmd_list)) {
2441                         struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2442                                                           typeof(*cmd),
2443                                                           cmd_list_entry);
2444                         res = scst_process_init_cmd(cmd);
2445                         if (res > 0)
2446                                 break;
2447                 }
2448         }
2449
2450         TRACE_EXIT_RES(res);
2451         return res;
2452 }
2453
2454 /* Called with no locks held */
2455 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2456         int left_locked)
2457 {
2458         int res;
2459
2460         TRACE_ENTRY();
2461
2462         BUG_ON(in_irq());
2463
2464         cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2465                         SCST_CONTEXT_DIRECT_ATOMIC);
2466         cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2467
2468         do {
2469                 switch (cmd->state) {
2470                 case SCST_CMD_STATE_DEV_PARSE:
2471                         res = scst_parse_cmd(cmd);
2472                         break;
2473
2474                 case SCST_CMD_STATE_PREPARE_SPACE:
2475                         res = scst_prepare_space(cmd);
2476                         break;
2477
2478                 case SCST_CMD_STATE_RDY_TO_XFER:
2479                         res = scst_rdy_to_xfer(cmd);
2480                         break;
2481
2482                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2483                         res = scst_send_to_midlev(cmd);
2484                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2485                         break;
2486
2487                 case SCST_CMD_STATE_DEV_DONE:
2488                         res = scst_dev_done(cmd);
2489                         break;
2490
2491                 case SCST_CMD_STATE_XMIT_RESP:
2492                         res = scst_xmit_response(cmd);
2493                         break;
2494
2495                 case SCST_CMD_STATE_FINISHED:
2496                         res = scst_finish_cmd(cmd);
2497                         break;
2498
2499                 default:
2500                         PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2501                                cmd, cmd->state);
2502                         BUG();
2503                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2504                         break;
2505                 }
2506         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2507
2508         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2509                 if (left_locked)
2510                         spin_lock_irq(&scst_list_lock);
2511         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2512                 spin_lock_irq(&scst_list_lock);
2513
2514                 switch (cmd->state) {
2515                 case SCST_CMD_STATE_DEV_PARSE:
2516                 case SCST_CMD_STATE_PREPARE_SPACE:
2517                 case SCST_CMD_STATE_RDY_TO_XFER:
2518                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2519                 case SCST_CMD_STATE_DEV_DONE:
2520                 case SCST_CMD_STATE_XMIT_RESP:
2521                 case SCST_CMD_STATE_FINISHED:
2522                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2523                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2524                         break;
2525 #ifdef EXTRACHECKS
2526                 /* not very valid commands */
2527                 case SCST_CMD_STATE_DEFAULT:
2528                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2529                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2530                                 "useful list (left on scst cmd list)", cmd, 
2531                                 cmd->state);
2532                         spin_unlock_irq(&scst_list_lock);
2533                         BUG();
2534                         spin_lock_irq(&scst_list_lock);
2535                         break;
2536 #endif
2537                 default:
2538                         break;
2539                 }
2540                 cmd->non_atomic_only = 1;
2541                 if (!left_locked)
2542                         spin_unlock_irq(&scst_list_lock);
2543                 wake_up(&scst_list_waitQ);
2544         } else if (res == SCST_CMD_STATE_RES_RESTART) {
2545                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2546                         spin_lock_irq(&scst_list_lock);
2547                         TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2548                         list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2549                         if (!left_locked)
2550                                 spin_unlock_irq(&scst_list_lock);
2551                 } else
2552                         BUG();
2553         } else
2554                 BUG();
2555
2556         TRACE_EXIT_RES(res);
2557         return res;
2558 }
2559
2560 /* Called under scst_list_lock and IRQs disabled */
2561 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2562 {
2563         int res;
2564         struct scst_cmd *cmd;
2565         int atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2566                         SCST_CONTEXT_DIRECT_ATOMIC);
2567
2568         TRACE_ENTRY();
2569
2570 #ifdef EXTRACHECKS
2571         {
2572                 int c = (context & ~SCST_PROCESSIBLE_ENV);
2573                 WARN_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) && 
2574                         (c != SCST_CONTEXT_DIRECT));
2575         }
2576 #endif
2577
2578         tm_dbg_check_released_cmds();
2579
2580 restart:
2581         list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2582                 if (atomic && cmd->non_atomic_only) {
2583                         TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2584                         continue;
2585                 }
2586                 if (tm_dbg_check_cmd(cmd) != 0)
2587                         goto restart;
2588                 res = scst_process_active_cmd(cmd, context, NULL, 1);
2589                 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2590                         goto restart;
2591                 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2592                         goto restart;
2593                 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2594                         break;
2595                 } else
2596                         BUG();
2597         }
2598
2599         TRACE_EXIT();
2600         return;
2601 }
2602
2603 static inline int test_cmd_lists(void)
2604 {
2605         int res = !list_empty(&scst_active_cmd_list) ||
2606             (!list_empty(&scst_init_cmd_list) &&
2607              !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2608             test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2609             unlikely(scst_shut_threads_count > 0) ||
2610             tm_dbg_is_release();
2611         return res;
2612 }
2613
2614 int scst_cmd_thread(void *arg)
2615 {
2616         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2617         int n;
2618
2619         TRACE_ENTRY();
2620
2621         spin_lock(&lock);
2622         n = scst_thread_num++;
2623         spin_unlock(&lock);
2624         daemonize("scsi_tgt%d", n);
2625         recalc_sigpending();
2626         set_user_nice(current, 10);
2627         current->flags |= PF_NOFREEZE;
2628
2629         spin_lock_irq(&scst_list_lock);
2630         while (1) {
2631                 wait_queue_t wait;
2632                 init_waitqueue_entry(&wait, current);
2633
2634                 if (!test_cmd_lists()) {
2635                         add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2636                         for (;;) {
2637                                 set_current_state(TASK_INTERRUPTIBLE);
2638                                 if (test_cmd_lists())
2639                                         break;
2640                                 spin_unlock_irq(&scst_list_lock);
2641                                 schedule();
2642                                 spin_lock_irq(&scst_list_lock);
2643                         }
2644                         set_current_state(TASK_RUNNING);
2645                         remove_wait_queue(&scst_list_waitQ, &wait);
2646                 }
2647
2648                 scst_do_job_init(&scst_init_cmd_list);
2649                 scst_do_job_active(&scst_active_cmd_list,
2650                                    SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
2651
2652                 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2653                     list_empty(&scst_cmd_list) &&
2654                     list_empty(&scst_active_cmd_list) &&
2655                     list_empty(&scst_init_cmd_list)) {
2656                         break;
2657                 }
2658                 
2659                 if (unlikely(scst_shut_threads_count > 0)) {
2660                         scst_shut_threads_count--;
2661                         break;
2662                 }
2663         }
2664         spin_unlock_irq(&scst_list_lock);
2665
2666         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2667                 smp_mb__after_atomic_dec();
2668                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2669                 up(scst_shutdown_mutex);
2670         }
2671
2672         TRACE_EXIT();
2673         return 0;
2674 }
2675
2676 void scst_cmd_tasklet(long p)
2677 {
2678         TRACE_ENTRY();
2679
2680         spin_lock_irq(&scst_list_lock);
2681
2682         scst_do_job_init(&scst_init_cmd_list);
2683         scst_do_job_active(&scst_active_cmd_list, 
2684                 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2685
2686         spin_unlock_irq(&scst_list_lock);
2687
2688         TRACE_EXIT();
2689         return;
2690 }
2691
2692 /*
2693  * Returns 0 on success, < 0 if there is no device handler or
2694  * > 0 if SCST_FLAG_SUSPENDED set.
2695  */
2696 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2697 {
2698         struct scst_tgt_dev *tgt_dev = NULL;
2699         int res = -1;
2700
2701         TRACE_ENTRY();
2702
2703         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2704               (uint64_t)mcmd->lun);
2705
2706         spin_lock_irq(&scst_list_lock);
2707         scst_inc_cmd_count();   
2708         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2709                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2710                                     sess_tgt_dev_list_entry) 
2711                 {
2712                         if (tgt_dev->acg_dev->lun == mcmd->lun) {
2713                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2714                                 mcmd->mcmd_tgt_dev = tgt_dev;
2715                                 res = 0;
2716                                 break;
2717                         }
2718                 }
2719                 if (mcmd->mcmd_tgt_dev == NULL)
2720                         scst_dec_cmd_count();
2721         } else {
2722                 if ( !mcmd->sess->waiting) {
2723                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2724                               mcmd->sess);
2725                         list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2726                                       &scst_dev_wait_sess_list);
2727                         mcmd->sess->waiting = 1;
2728                 }
2729                 scst_dec_cmd_count();
2730                 res = 1;
2731         }
2732         spin_unlock_irq(&scst_list_lock);
2733
2734         TRACE_EXIT_HRES(res);
2735         return res;
2736 }
2737
2738 /* Called under scst_list_lock and IRQ off */
2739 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2740         struct scst_mgmt_cmd *mcmd)
2741 {
2742         TRACE_ENTRY();
2743
2744         TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2745                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2746                 mcmd->cmd_wait_count);
2747
2748         cmd->mgmt_cmnd = NULL;
2749
2750         if (cmd->completed)
2751                 mcmd->completed_cmd_count++;
2752
2753         mcmd->cmd_wait_count--;
2754         if (mcmd->cmd_wait_count > 0) {
2755                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2756                         mcmd->cmd_wait_count);
2757                 goto out;
2758         }
2759
2760         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2761
2762         if (mcmd->completed) {
2763                 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2764                         mcmd);
2765                 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2766                         &scst_active_mgmt_cmd_list);
2767         }
2768
2769         wake_up(&scst_mgmt_cmd_list_waitQ);
2770
2771 out:
2772         TRACE_EXIT();
2773         return;
2774 }
2775
2776 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2777         struct scst_tgt_dev *tgt_dev, int set_status)
2778 {
2779         int res = SCST_DEV_TM_NOT_COMPLETED;
2780         if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2781                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2782                       tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2783                 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd, 
2784                         tgt_dev);
2785                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2786                       tgt_dev->acg_dev->dev->handler->name, res);
2787                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2788                         mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ? 
2789                                                 SCST_MGMT_STATUS_SUCCESS :
2790                                                 SCST_MGMT_STATUS_FAILED;
2791                 }
2792         }
2793         return res;
2794 }
2795
2796 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2797 {
2798         switch(mgmt_fn) {
2799                 case SCST_ABORT_TASK:
2800                 case SCST_ABORT_TASK_SET:
2801                 case SCST_CLEAR_TASK_SET:
2802                         return 1;
2803                 default:
2804                         return 0;
2805         }
2806 }
2807
2808 /* 
2809  * Called under scst_list_lock and IRQ off (to protect cmd
2810  * from being destroyed).
2811  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2812  */
2813 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2814         int other_ini, int call_dev_task_mgmt_fn)
2815 {
2816         TRACE_ENTRY();
2817
2818         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2819
2820         if (other_ini) {
2821                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2822                 smp_mb__after_set_bit();
2823         }
2824         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2825         smp_mb__after_set_bit();
2826
2827         if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2828                  scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2829
2830         if (mcmd) {
2831                 int defer;
2832                 if (cmd->tgtt->tm_sync_reply)
2833                         defer = 1;
2834                 else {
2835                         if (scst_is_strict_mgmt_fn(mcmd->fn))
2836                                 defer = test_bit(SCST_CMD_EXECUTING,
2837                                         &cmd->cmd_flags);
2838                         else
2839                                 defer = test_bit(SCST_CMD_XMITTING,
2840                                         &cmd->cmd_flags);
2841                 }
2842
2843                 if (defer) {
2844                         /*
2845                          * Delay the response until the command's finish in
2846                          * order to guarantee that "no further responses from
2847                          * the task are sent to the SCSI initiator port" after
2848                          * response from the TM function is sent (SAM)
2849                          */
2850                         TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2851                                 "xmitted (state %d), deferring ABORT...", cmd,
2852                                 cmd->tag, cmd->state);
2853 #ifdef EXTRACHECKS
2854                         if (cmd->mgmt_cmnd) {
2855                                 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2856                                         "has non-NULL mgmt_cmnd %p!!! Current "
2857                                         "mcmd %p\n", cmd, cmd->tag, cmd->state,
2858                                         cmd->mgmt_cmnd, mcmd);
2859                         }
2860 #endif
2861                         BUG_ON(cmd->mgmt_cmnd);
2862                         mcmd->cmd_wait_count++;
2863                         cmd->mgmt_cmnd = mcmd;
2864                 }
2865         }
2866
2867         tm_dbg_release_cmd(cmd);
2868
2869         TRACE_EXIT();
2870         return;
2871 }
2872
2873 /* Called under scst_list_lock and IRQ off */
2874 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2875 {
2876         int res;
2877         if (mcmd->cmd_wait_count != 0) {
2878                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2879                         "wait", mcmd->cmd_wait_count);
2880                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2881                 res = -1;
2882         } else {
2883                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2884                 res = 0;
2885         }
2886         mcmd->completed = 1;
2887         return res;
2888 }
2889
2890 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2891 {
2892         struct scst_device *dev;
2893         int wake = 0;
2894
2895         TRACE_ENTRY();
2896
2897         if (!scst_mutex_held)
2898                 down(&scst_mutex);
2899
2900         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2901                 struct scst_cmd *cmd, *tcmd;
2902                 spin_lock_bh(&dev->dev_lock);
2903                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2904                                         blocked_cmd_list_entry) {
2905                         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2906                                 list_del(&cmd->blocked_cmd_list_entry);
2907                                 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2908                                         "to active cmd list", cmd);
2909                                 spin_lock_irq(&scst_list_lock);
2910                                 list_move_tail(&cmd->cmd_list_entry,
2911                                         &scst_active_cmd_list);
2912                                 spin_unlock_irq(&scst_list_lock);
2913                                 wake = 1;
2914                         }
2915                 }
2916                 spin_unlock_bh(&dev->dev_lock);
2917         }
2918
2919         if (!scst_mutex_held)
2920                 up(&scst_mutex);
2921
2922         if (wake)
2923                 wake_up(&scst_list_waitQ);
2924
2925         TRACE_EXIT();
2926         return;
2927 }
2928
2929 /* Returns 0 if the command processing should be continued, <0 otherwise */
2930 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2931         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2932 {
2933         struct scst_cmd *cmd;
2934         struct scst_session *sess = tgt_dev->sess;
2935
2936         TRACE_ENTRY();
2937
2938         spin_lock_irq(&scst_list_lock);
2939
2940         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2941         list_for_each_entry(cmd, &sess->search_cmd_list, 
2942                         search_cmd_list_entry) {
2943                 if ((cmd->tgt_dev == NULL) && 
2944                     (cmd->lun == tgt_dev->acg_dev->lun))
2945                         continue;
2946                 if (cmd->tgt_dev != tgt_dev)
2947                         continue;
2948                 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2949         }
2950         spin_unlock_irq(&scst_list_lock);
2951
2952         scst_unblock_aborted_cmds(scst_mutex_held);
2953
2954         TRACE_EXIT();
2955         return;
2956 }
2957
2958 /* Returns 0 if the command processing should be continued, <0 otherwise */
2959 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2960 {
2961         int res;
2962         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2963         struct scst_device *dev = tgt_dev->acg_dev->dev;
2964
2965         TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2966                 tgt_dev->acg_dev->lun, mcmd);
2967
2968         spin_lock_bh(&dev->dev_lock);
2969         __scst_block_dev(dev);
2970         spin_unlock_bh(&dev->dev_lock);
2971
2972         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2973         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2974
2975         res = scst_set_mcmd_next_state(mcmd);
2976
2977         TRACE_EXIT_RES(res);
2978         return res;
2979 }
2980
2981 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
2982 {
2983         /*
2984          * No need for special protection for SCST_FLAG_TM_ACTIVE, since
2985          * we could be called from the only thread.
2986          */
2987         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
2988                 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
2989                         mcmd);
2990                 if (!locked)
2991                         spin_lock_irq(&scst_list_lock);
2992                 list_move_tail(&mcmd->mgmt_cmd_list_entry, 
2993                         &scst_delayed_mgmt_cmd_list);
2994                 if (!locked)
2995                         spin_unlock_irq(&scst_list_lock);
2996                 return -1;
2997         } else {
2998                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
2999                 return 0;
3000         }
3001 }
3002
3003 /* Returns 0 if the command processing should be continued, 
3004  * >0, if it should be requeued, <0 otherwise */
3005 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3006 {
3007         int res = 0;
3008
3009         TRACE_ENTRY();
3010
3011         res = scst_check_delay_mgmt_cmd(mcmd, 1);
3012         if (res != 0)
3013                 goto out;
3014
3015         if (mcmd->fn == SCST_ABORT_TASK) {
3016                 struct scst_session *sess = mcmd->sess;
3017                 struct scst_cmd *cmd;
3018
3019                 spin_lock_irq(&scst_list_lock);
3020                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3021                 if (cmd == NULL) {
3022                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3023                                 "tag %d not found", mcmd->tag);
3024                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3025                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3026                 } else {
3027                         TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
3028                                 "aborting it", cmd, mcmd->tag, cmd->sn);
3029                         mcmd->cmd_to_abort = cmd;
3030                         scst_abort_cmd(cmd, mcmd, 0, 1);
3031                         res = scst_set_mcmd_next_state(mcmd);
3032                         mcmd->cmd_to_abort = NULL; /* just in case */
3033                 }
3034                 spin_unlock_irq(&scst_list_lock);
3035         } else {
3036                 int rc;
3037                 rc = scst_mgmt_translate_lun(mcmd);
3038                 if (rc < 0) {
3039                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3040                                 "found", (uint64_t)mcmd->lun);
3041                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3042                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3043                 } else if (rc == 0)
3044                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
3045                 else
3046                         res = rc;
3047         }
3048
3049 out:
3050         TRACE_EXIT_RES(res);
3051         return res;
3052 }
3053
3054 /* Returns 0 if the command processing should be continued, <0 otherwise */
3055 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3056 {
3057         int res, rc;
3058         struct scst_device *dev, *d;
3059         struct scst_tgt_dev *tgt_dev;
3060         int cont, c;
3061         LIST_HEAD(host_devs);
3062
3063         TRACE_ENTRY();
3064
3065         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3066                 mcmd, mcmd->sess->sess_cmd_count);
3067
3068         down(&scst_mutex);
3069
3070         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3071                 int found = 0;
3072
3073                 spin_lock_bh(&dev->dev_lock);
3074                 __scst_block_dev(dev);
3075                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3076                 spin_unlock_bh(&dev->dev_lock);
3077
3078                 cont = 0;
3079                 c = 0;
3080                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3081                         dev_tgt_dev_list_entry) 
3082                 {
3083                         cont = 1;
3084                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3085                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3086                                 c = 1;
3087                         else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3088                                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3089                 }
3090                 if (cont && !c)
3091                         continue;
3092                 
3093                 if (dev->scsi_dev == NULL)
3094                         continue;
3095
3096                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3097                         if (dev->scsi_dev->host->host_no ==
3098                                     d->scsi_dev->host->host_no) 
3099                         {
3100                                 found = 1;
3101                                 break;
3102                         }
3103                 }
3104                 if (!found)
3105                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3106         }
3107
3108         /*
3109          * We suppose here that for all commands that already on devices
3110          * on/after scsi_reset_provider() completion callbacks will be called.
3111          */
3112
3113         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3114                 /* dev->scsi_dev must be non-NULL here */
3115                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3116                       dev->scsi_dev->host->host_no);
3117                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3118                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3119                       dev->scsi_dev->host->host_no,
3120                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3121                 if (rc != SUCCESS) {
3122                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3123                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3124                 }
3125         }
3126
3127         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3128                 if (dev->scsi_dev != NULL)
3129                         dev->scsi_dev->was_reset = 0;
3130         }
3131
3132         up(&scst_mutex);
3133
3134         spin_lock_irq(&scst_list_lock);
3135         tm_dbg_task_mgmt("TARGET RESET");
3136         res = scst_set_mcmd_next_state(mcmd);
3137         spin_unlock_irq(&scst_list_lock);
3138
3139         TRACE_EXIT_RES(res);
3140         return res;
3141 }
3142
3143 /* Returns 0 if the command processing should be continued, <0 otherwise */
3144 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3145 {
3146         int res, rc;
3147         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3148         struct scst_device *dev = tgt_dev->acg_dev->dev;
3149
3150         TRACE_ENTRY();
3151
3152         TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3153                 mcmd);
3154
3155         spin_lock_bh(&dev->dev_lock);
3156         __scst_block_dev(dev);
3157         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3158         spin_unlock_bh(&dev->dev_lock);
3159
3160         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3161         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3162                 goto out_tm_dbg;
3163
3164         if (dev->scsi_dev != NULL) {
3165                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3166                       dev->scsi_dev->host->host_no);
3167                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3168                 if (rc != SUCCESS)
3169                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3170                 dev->scsi_dev->was_reset = 0;
3171         }
3172
3173 out_tm_dbg:
3174         spin_lock_irq(&scst_list_lock);
3175         tm_dbg_task_mgmt("LUN RESET");
3176         res = scst_set_mcmd_next_state(mcmd);
3177         spin_unlock_irq(&scst_list_lock);
3178
3179         TRACE_EXIT_RES(res);
3180         return res;
3181 }
3182
3183 /* Returns 0 if the command processing should be continued, <0 otherwise */
3184 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3185         int nexus_loss)
3186 {
3187         int res;
3188         struct scst_session *sess = mcmd->sess;
3189         struct scst_tgt_dev *tgt_dev;
3190
3191         TRACE_ENTRY();
3192
3193         if (nexus_loss) {
3194                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3195                         mcmd);
3196         } else {
3197                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3198                         mcmd);
3199         }
3200
3201         down(&scst_mutex);
3202         list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3203                 sess_tgt_dev_list_entry) 
3204         {
3205                 struct scst_device *dev = tgt_dev->acg_dev->dev;
3206                 int rc;
3207
3208                 spin_lock_bh(&dev->dev_lock);
3209                 __scst_block_dev(dev);
3210                 spin_unlock_bh(&dev->dev_lock);
3211
3212                 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3213                 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3214                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3215
3216                 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3217                 if (nexus_loss)
3218                         scst_reset_tgt_dev(tgt_dev, 1);
3219         }
3220         up(&scst_mutex);
3221
3222         spin_lock_irq(&scst_list_lock);
3223         res = scst_set_mcmd_next_state(mcmd);
3224         spin_unlock_irq(&scst_list_lock);
3225
3226         TRACE_EXIT_RES(res);
3227         return res;
3228 }
3229
3230 /* Returns 0 if the command processing should be continued, <0 otherwise */
3231 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3232         int nexus_loss)
3233 {
3234         int res;
3235         struct scst_tgt *tgt = mcmd->sess->tgt;
3236         struct scst_session *sess;
3237         struct scst_device *dev;
3238         struct scst_tgt_dev *tgt_dev;
3239
3240         TRACE_ENTRY();
3241
3242         if (nexus_loss) {
3243                 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3244                         mcmd);
3245         } else {
3246                 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3247                         mcmd);
3248         }
3249
3250         down(&scst_mutex);
3251
3252         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3253                 spin_lock_bh(&dev->dev_lock);
3254                 __scst_block_dev(dev);
3255                 spin_unlock_bh(&dev->dev_lock);
3256         }
3257
3258         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3259                 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3260                         sess_tgt_dev_list_entry) 
3261                 {
3262                         int rc;
3263
3264                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3265                         if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3266                                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3267
3268                         __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3269                         if (nexus_loss)
3270                                 scst_reset_tgt_dev(tgt_dev, 1);
3271                 }
3272         }
3273
3274         up(&scst_mutex);
3275
3276         spin_lock_irq(&scst_list_lock);
3277         res = scst_set_mcmd_next_state(mcmd);
3278         spin_unlock_irq(&scst_list_lock);
3279
3280         TRACE_EXIT_RES(res);
3281         return res;
3282 }
3283
3284 /* Returns 0 if the command processing should be continued, <0 otherwise */
3285 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3286 {
3287         int res = 0;
3288
3289         TRACE_ENTRY();
3290
3291         mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3292
3293         switch (mcmd->fn) {
3294         case SCST_ABORT_TASK_SET:
3295         case SCST_CLEAR_TASK_SET:
3296                 res = scst_abort_task_set(mcmd);
3297                 break;
3298
3299         case SCST_LUN_RESET:
3300                 res = scst_lun_reset(mcmd);
3301                 break;
3302
3303         case SCST_TARGET_RESET:
3304                 res = scst_target_reset(mcmd);
3305                 break;
3306
3307         case SCST_ABORT_ALL_TASKS_SESS:
3308                 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3309                 break;
3310
3311         case SCST_NEXUS_LOSS_SESS:
3312                 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3313                 break;
3314
3315         case SCST_ABORT_ALL_TASKS:
3316                 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3317                 break;
3318
3319         case SCST_NEXUS_LOSS:
3320                 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3321                 break;
3322
3323         case SCST_CLEAR_ACA:
3324                 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3325                 /* Nothing to do (yet) */
3326                 break;
3327
3328         default:
3329                 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3330                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3331                 break;
3332         }
3333
3334         TRACE_EXIT_RES(res);
3335         return res;
3336 }
3337
3338 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3339 {
3340         struct scst_device *dev;
3341         struct scst_tgt_dev *tgt_dev;
3342
3343         TRACE_ENTRY();
3344
3345         clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3346         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3347                 struct scst_mgmt_cmd *m;
3348                 spin_lock_irq(&scst_list_lock);
3349                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3350                                 mgmt_cmd_list_entry);
3351                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3352                         "cmd list", m);
3353                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3354                 spin_unlock_irq(&scst_list_lock);
3355         }
3356
3357         mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3358         if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3359                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3360
3361         if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3362                 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3363                       mcmd->sess->tgt->tgtt->name);
3364                 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3365                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3366                       mcmd->sess->tgt->tgtt->name);
3367         }
3368
3369         switch (mcmd->fn) {
3370         case SCST_ABORT_TASK_SET:
3371         case SCST_CLEAR_TASK_SET:
3372         case SCST_LUN_RESET:
3373                 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3374                 break;
3375
3376         case SCST_TARGET_RESET:
3377         case SCST_ABORT_ALL_TASKS:
3378         case SCST_NEXUS_LOSS:
3379                 down(&scst_mutex);
3380                 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3381                         scst_unblock_dev(dev);
3382                 }
3383                 up(&scst_mutex);
3384                 break;
3385
3386         case SCST_NEXUS_LOSS_SESS:
3387         case SCST_ABORT_ALL_TASKS_SESS:
3388                 down(&scst_mutex);
3389                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3390                                 sess_tgt_dev_list_entry) {
3391                         scst_unblock_dev(tgt_dev->acg_dev->dev);
3392                 }
3393                 up(&scst_mutex);
3394                 break;
3395
3396         case SCST_CLEAR_ACA:
3397         default:
3398                 break;
3399         }
3400
3401         mcmd->tgt_priv = NULL;
3402
3403         TRACE_EXIT();
3404         return;
3405 }
3406
3407 /* Returns >0, if cmd should be requeued */
3408 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3409 {
3410         int res = 0;
3411
3412         TRACE_ENTRY();
3413
3414         TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3415
3416         while (1) {
3417                 switch (mcmd->state) {
3418                 case SCST_MGMT_CMD_STATE_INIT:
3419                         res = scst_mgmt_cmd_init(mcmd);
3420                         if (res)
3421                                 goto out;
3422                         break;
3423
3424                 case SCST_MGMT_CMD_STATE_READY:
3425                         if (scst_mgmt_cmd_exec(mcmd))
3426                                 goto out;
3427                         break;
3428
3429                 case SCST_MGMT_CMD_STATE_DONE:
3430                         scst_mgmt_cmd_send_done(mcmd);
3431                         break;
3432
3433                 case SCST_MGMT_CMD_STATE_FINISHED:
3434                         goto out_free;
3435
3436 #ifdef EXTRACHECKS
3437                 case SCST_MGMT_CMD_STATE_EXECUTING:
3438                         BUG();
3439 #endif
3440
3441                 default:
3442                         PRINT_ERROR_PR("Unknown state %d of management command",
3443                                     mcmd->state);
3444                         res = -1;
3445                         goto out_free;
3446                 }
3447         }
3448
3449 out:
3450         TRACE_EXIT_RES(res);
3451         return res;
3452
3453 out_free:
3454         scst_free_mgmt_cmd(mcmd, 1);
3455         goto out;
3456 }
3457
3458 static inline int test_mgmt_cmd_list(void)
3459 {
3460         int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3461                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3462                   test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3463         return res;
3464 }
3465
3466 int scst_mgmt_cmd_thread(void *arg)
3467 {
3468         struct scst_mgmt_cmd *mcmd;
3469
3470         TRACE_ENTRY();
3471
3472         daemonize("scsi_tgt_mc");
3473         recalc_sigpending();
3474         current->flags |= PF_NOFREEZE;
3475
3476         spin_lock_irq(&scst_list_lock);
3477         while (1) {
3478                 wait_queue_t wait;
3479                 init_waitqueue_entry(&wait, current);
3480
3481                 if (!test_mgmt_cmd_list()) {
3482                         add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3483                                                  &wait);
3484                         for (;;) {
3485                                 set_current_state(TASK_INTERRUPTIBLE);
3486                                 if (test_mgmt_cmd_list())
3487                                         break;
3488                                 spin_unlock_irq(&scst_list_lock);
3489                                 schedule();
3490                                 spin_lock_irq(&scst_list_lock);
3491                         }
3492                         set_current_state(TASK_RUNNING);
3493                         remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3494                 }
3495
3496                 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3497                        !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3498                 {
3499                         int rc;
3500                         mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3501                                           typeof(*mcmd), mgmt_cmd_list_entry);
3502                         TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3503                               mcmd);
3504                         list_move_tail(&mcmd->mgmt_cmd_list_entry,
3505                                        &scst_mgmt_cmd_list);
3506                         spin_unlock_irq(&scst_list_lock);
3507                         rc = scst_process_mgmt_cmd(mcmd);
3508                         spin_lock_irq(&scst_list_lock);
3509                         if (rc > 0) {
3510                                 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3511                                         "of active mgmt cmd list", mcmd);
3512                                 list_move(&mcmd->mgmt_cmd_list_entry,
3513                                        &scst_active_mgmt_cmd_list);
3514                         }
3515                 }
3516
3517                 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3518                     list_empty(&scst_active_mgmt_cmd_list)) 
3519                 {
3520                         break;
3521                 }
3522         }
3523         spin_unlock_irq(&scst_list_lock);
3524
3525         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3526                 smp_mb__after_atomic_dec();
3527                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3528                 up(scst_shutdown_mutex);
3529         }
3530
3531         TRACE_EXIT();
3532         return 0;
3533 }
3534
3535 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3536         *sess, int fn, int atomic, void *tgt_priv)
3537 {
3538         struct scst_mgmt_cmd *mcmd = NULL;
3539
3540         TRACE_ENTRY();
3541
3542         if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3543                 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3544                             "(target %s)", sess->tgt->tgtt->name);
3545                 goto out;
3546         }
3547
3548         mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3549         if (mcmd == NULL)
3550                 goto out;
3551
3552         mcmd->sess = sess;
3553         mcmd->fn = fn;
3554         mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3555         mcmd->tgt_priv = tgt_priv;
3556
3557 out:
3558         TRACE_EXIT();
3559         return mcmd;
3560 }
3561
3562 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3563         struct scst_mgmt_cmd *mcmd)
3564 {
3565         unsigned long flags;
3566         int res = 0;
3567
3568         TRACE_ENTRY();
3569
3570         scst_sess_get(sess);
3571
3572         spin_lock_irqsave(&scst_list_lock, flags);
3573
3574         sess->sess_cmd_count++;
3575
3576 #ifdef EXTRACHECKS
3577         if (unlikely(sess->shutting_down)) {
3578                 PRINT_ERROR_PR("%s",
3579                         "New mgmt cmd while shutting down the session");
3580                 BUG();
3581         }
3582 #endif
3583
3584         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3585                 switch(sess->init_phase) {
3586                 case SCST_SESS_IPH_INITING:
3587                         TRACE_DBG("Adding mcmd %p to init deferred mcmd list", 
3588                                 mcmd);
3589                         list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3590                                 &sess->init_deferred_mcmd_list);
3591                         goto out_unlock;
3592                 case SCST_SESS_IPH_SUCCESS:
3593                         break;
3594                 case SCST_SESS_IPH_FAILED:
3595                         res = -1;
3596                         goto out_unlock;
3597                 default:
3598                         BUG();
3599                 }
3600         }
3601
3602         TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3603         list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3604
3605         spin_unlock_irqrestore(&scst_list_lock, flags);
3606
3607         wake_up(&scst_mgmt_cmd_list_waitQ);
3608
3609 out:
3610         TRACE_EXIT();
3611         return res;
3612
3613 out_unlock:
3614         spin_unlock_irqrestore(&scst_list_lock, flags);
3615         goto out;
3616 }
3617
3618 /* 
3619  * Must not been called in parallel with scst_unregister_session() for the 
3620  * same sess
3621  */
3622 int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
3623                         const uint8_t *lun, int lun_len, int atomic,
3624                         void *tgt_priv)
3625 {
3626         int res = -EFAULT;
3627         struct scst_mgmt_cmd *mcmd = NULL;
3628
3629         TRACE_ENTRY();
3630
3631         if (unlikely(fn == SCST_ABORT_TASK)) {
3632                 PRINT_ERROR_PR("%s() for ABORT TASK called", __FUNCTION__);
3633                 res = -EINVAL;
3634                 goto out;
3635         }
3636
3637         mcmd = scst_pre_rx_mgmt_cmd(sess, fn, atomic, tgt_priv);
3638         if (mcmd == NULL)
3639                 goto out;
3640
3641         mcmd->lun = scst_unpack_lun(lun, lun_len);
3642 &n