387e87290ad5308cc94d6ab12c56f01a7326d0a9
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *  
4  *  Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *  
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  * 
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28
29 #include "scst_debug.h"
30 #include "scsi_tgt.h"
31 #include "scst_priv.h"
32
33 static int scst_do_job_init(struct list_head *init_cmd_list);
34
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
36         int left_locked);
37
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39         struct scst_mgmt_cmd *mcmd);
40
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43         unsigned long *pflags, int left_locked)
44 {
45         int res;
46
47         TRACE_ENTRY();
48
49         TRACE_DBG("Moving cmd %p to cmd list", cmd);
50         list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
51
52         /* This is an inline func., so unneeded code will be optimized out */
53         if (pflags)
54                 spin_unlock_irqrestore(&scst_list_lock, *pflags);
55         else
56                 spin_unlock_irq(&scst_list_lock);
57
58         res = __scst_process_active_cmd(cmd, context, left_locked);
59
60         TRACE_EXIT_RES(res);
61         return res;
62 }
63
64 static inline void scst_schedule_tasklet(void)
65 {
66         struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
67
68 #if 0 /* Looks like #else is better for performance */
69         if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
70                 tasklet_schedule(t);
71         else {
72                 /* 
73                  * We suppose that other CPU(s) are rather idle, so we
74                  * ask one of them to help
75                  */
76                 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77                         "instead", smp_processor_id());
78                 wake_up(&scst_list_waitQ);
79         }
80 #else
81         tasklet_schedule(t);
82 #endif
83 }
84
85 /* 
86  * Must not been called in parallel with scst_unregister_session() for the 
87  * same sess
88  */
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90                              const uint8_t *lun, int lun_len,
91                              const uint8_t *cdb, int cdb_len, int atomic)
92 {
93         struct scst_cmd *cmd;
94
95         TRACE_ENTRY();
96
97 #ifdef EXTRACHECKS
98         if (unlikely(sess->shutting_down)) {
99                 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
100                 BUG();
101         }
102 #endif
103
104         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
105         if (cmd == NULL)
106                 goto out;
107
108         cmd->sess = sess;
109         cmd->tgt = sess->tgt;
110         cmd->tgtt = sess->tgt->tgtt;
111         cmd->state = SCST_CMD_STATE_INIT_WAIT;
112
113         /* 
114          * For both wrong lun and CDB defer the error reporting for
115          * scst_cmd_init_done()
116          */
117
118         cmd->lun = scst_unpack_lun(lun, lun_len);
119
120         if (cdb_len <= MAX_COMMAND_SIZE) {
121                 memcpy(cmd->cdb, cdb, cdb_len);
122                 cmd->cdb_len = cdb_len;
123         }
124
125         TRACE_DBG("cmd %p, sess %p", cmd, sess);
126         scst_sess_get(sess);
127
128 out:
129         TRACE_EXIT();
130         return cmd;
131 }
132
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
134 {
135         int res = 0;
136         unsigned long flags = 0;
137         struct scst_session *sess = cmd->sess;
138
139         TRACE_ENTRY();
140
141         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142         TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag, 
143                 (uint64_t)cmd->lun, cmd->cdb_len);
144         TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145                 cmd->cdb, cmd->cdb_len);
146
147         if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
149         {
150                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
152                         cmd->tgtt->name);
153                 pref_context = SCST_CONTEXT_TASKLET;
154         }
155
156         spin_lock_irqsave(&scst_list_lock, flags);
157
158         /* Let's make it here, this will save us a lock or atomic */
159         sess->sess_cmd_count++;
160
161         list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
162
163         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164                 switch(sess->init_phase) {
165                 case SCST_SESS_IPH_SUCCESS:
166                         break;
167                 case SCST_SESS_IPH_INITING:
168                         TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169                         list_add_tail(&cmd->cmd_list_entry, 
170                                 &sess->init_deferred_cmd_list);
171                         goto out_unlock_flags;
172                 case SCST_SESS_IPH_FAILED:
173                         scst_set_busy(cmd);
174                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
175                         TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176                         list_add_tail(&cmd->cmd_list_entry, 
177                                 &scst_active_cmd_list);
178                         goto active;
179                 default:
180                         BUG();
181                 }
182         }
183
184         if (unlikely(cmd->lun == (lun_t)-1)) {
185                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186                 scst_set_cmd_error(cmd,
187                         SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
191                 goto active;
192         }
193
194         if (unlikely(cmd->cdb_len == 0)) {
195                 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196                 scst_set_cmd_error(cmd,
197                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200                 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
201                 goto active;
202         }
203
204         cmd->state = SCST_CMD_STATE_INIT;
205
206         TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207         list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
208
209         switch (pref_context) {
210         case SCST_CONTEXT_DIRECT:
211         case SCST_CONTEXT_DIRECT_ATOMIC:
212                 res = scst_do_job_init(&scst_init_cmd_list);
213                 if (res > 0)
214                         goto out_unlock_flags;
215                 break;
216
217         case SCST_CONTEXT_THREAD:
218                 goto out_thread_unlock_flags;
219
220         case SCST_CONTEXT_TASKLET:
221                 scst_schedule_tasklet();
222                 goto out_unlock_flags;
223
224         default:
225                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
226                             pref_context);
227                 goto out_thread_unlock_flags;
228         }
229
230 active:
231         switch (pref_context) {
232         case SCST_CONTEXT_DIRECT:
233         case SCST_CONTEXT_DIRECT_ATOMIC:
234                 scst_process_active_cmd(cmd, pref_context, &flags, 0);
235                 break;
236
237         case SCST_CONTEXT_THREAD:
238                 goto out_thread_unlock_flags;
239
240         case SCST_CONTEXT_TASKLET:
241                 scst_schedule_tasklet();
242                 goto out_unlock_flags;
243
244         default:
245                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
246                             pref_context);
247                 goto out_thread_unlock_flags;
248         }
249
250 out:
251         TRACE_EXIT();
252         return;
253
254 out_unlock_flags:
255         spin_unlock_irqrestore(&scst_list_lock, flags);
256         goto out;
257
258 out_thread_unlock_flags:
259         cmd->non_atomic_only = 1;
260         spin_unlock_irqrestore(&scst_list_lock, flags);
261         wake_up(&scst_list_waitQ);
262         goto out;
263 }
264
265 static int scst_parse_cmd(struct scst_cmd *cmd)
266 {
267         int res = SCST_CMD_STATE_RES_CONT_SAME;
268         int state;
269         struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270         struct scst_device *dev = cmd->dev;
271         struct scst_info_cdb cdb_info;
272         int atomic = scst_cmd_atomic(cmd);
273         int set_dir = 1;
274
275         TRACE_ENTRY();
276
277         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278                 TRACE_DBG("ABORTED set, returning ABORTED "
279                         "for cmd %p", cmd);
280                 goto out_xmit;
281         }
282
283         if (atomic && !dev->handler->parse_atomic) {
284                 TRACE_DBG("Dev handler %s parse() can not be "
285                       "called in atomic context, rescheduling to the thread",
286                       dev->handler->name);
287                 res = SCST_CMD_STATE_RES_NEED_THREAD;
288                 goto out;
289         }
290
291         /*
292          * Expected transfer data supplied by the SCSI transport via the
293          * target driver are untrusted, so we prefer to fetch them from CDB.
294          * Additionally, not all transports support supplying the expected
295          * transfer data.
296          */
297
298         if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type, 
299                         &cdb_info) != 0)) 
300         {
301                 static int t;
302                 if (t < 10) {
303                         t++;
304                         PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305                                 "Should you update scst_scsi_op_table?",
306                                 cmd->cdb[0], dev->handler->name);
307                 }
308                 if (scst_cmd_is_expected_set(cmd)) {
309                         TRACE(TRACE_MINOR, "Using initiator supplied values: "
310                                 "direction %d, transfer_len %d",
311                                 cmd->expected_data_direction,
312                                 cmd->expected_transfer_len);
313                         cmd->data_direction = cmd->expected_data_direction;
314                         cmd->bufflen = cmd->expected_transfer_len;
315                         /* Restore (most probably) lost CDB length */
316                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317                         if (cmd->cdb_len == -1) {
318                                 PRINT_ERROR_PR("Unable to get CDB length for "
319                                         "opcode 0x%02x. Returning INVALID "
320                                         "OPCODE", cmd->cdb[0]);
321                                 scst_set_cmd_error(cmd,
322                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
323                                 goto out_xmit;
324                         }
325                 }
326                 else {
327                         PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328                              "target %s not supplied expected values. "
329                              "Returning INVALID OPCODE.", cmd->cdb[0], 
330                              dev->handler->name, cmd->tgtt->name);
331                         scst_set_cmd_error(cmd,
332                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
333                         goto out_xmit;
334                 }
335         } else {
336                 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337                         "set %s), transfer_len=%d (expected len %d), flags=%d",
338                         cdb_info.op_name, cdb_info.direction,
339                         cmd->expected_data_direction,
340                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341                         cdb_info.transfer_len, cmd->expected_transfer_len,
342                         cdb_info.flags);
343
344                 /* Restore (most probably) lost CDB length */
345                 cmd->cdb_len = cdb_info.cdb_len;
346
347                 cmd->data_direction = cdb_info.direction;
348                 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349                         cmd->bufflen = cdb_info.transfer_len;
350                 /* else cmd->bufflen remained as it was inited in 0 */
351         }
352
353         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354                 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355                             "(opcode 0x%02x)", cmd->cdb[0]);
356                 scst_set_cmd_error(cmd,
357                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
358                 goto out_xmit;
359         }
360
361         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362                 PRINT_ERROR_PR("Linked commands are not supported "
363                             "(opcode 0x%02x)", cmd->cdb[0]);
364                 scst_set_cmd_error(cmd,
365                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
366                 goto out_xmit;
367         }
368
369         if (likely(!scst_is_cmd_local(cmd))) {
370                 TRACE_DBG("Calling dev handler %s parse(%p)",
371                       dev->handler->name, cmd);
372                 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373                 state = dev->handler->parse(cmd, &cdb_info);
374                 TRACE_DBG("Dev handler %s parse() returned %d",
375                         dev->handler->name, state);
376
377                 if (cmd->data_len == -1)
378                         cmd->data_len = cmd->bufflen;
379
380                 if (state == SCST_CMD_STATE_DEFAULT)
381                         state = SCST_CMD_STATE_PREPARE_SPACE;
382         }
383         else
384                 state = SCST_CMD_STATE_PREPARE_SPACE;
385
386 #ifdef EXTRACHECKS
387         if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
388                 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
389                         (state != SCST_CMD_STATE_DEV_PARSE)) ||
390                     ((cmd->bufflen != 0) && 
391                         (cmd->data_direction == SCST_DATA_NONE)) ||
392                     ((cmd->bufflen == 0) && 
393                         (cmd->data_direction != SCST_DATA_NONE)) ||
394                     ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
395                         (state > SCST_CMD_STATE_PREPARE_SPACE))) 
396                 {
397                         PRINT_ERROR_PR("Dev handler %s parse() returned "
398                                        "invalid cmd data_direction %d, "
399                                        "bufflen %zd or state %d (opcode 0x%x)",
400                                        dev->handler->name, 
401                                        cmd->data_direction, cmd->bufflen,
402                                        state, cmd->cdb[0]);
403                         goto out_error;
404                 }
405         }
406 #endif
407
408         switch (state) {
409         case SCST_CMD_STATE_PREPARE_SPACE:
410         case SCST_CMD_STATE_DEV_PARSE:
411         case SCST_CMD_STATE_RDY_TO_XFER:
412         case SCST_CMD_STATE_SEND_TO_MIDLEV:
413         case SCST_CMD_STATE_DEV_DONE:
414         case SCST_CMD_STATE_XMIT_RESP:
415         case SCST_CMD_STATE_FINISHED:
416                 cmd->state = state;
417                 res = SCST_CMD_STATE_RES_CONT_SAME;
418                 break;
419
420         case SCST_CMD_STATE_REINIT:
421                 cmd->tgt_dev_saved = tgt_dev_saved;
422                 cmd->state = state;
423                 res = SCST_CMD_STATE_RES_RESTART;
424                 set_dir = 0;
425                 break;
426
427         case SCST_CMD_STATE_NEED_THREAD_CTX:
428                 TRACE_DBG("Dev handler %s parse() requested thread "
429                       "context, rescheduling", dev->handler->name);
430                 res = SCST_CMD_STATE_RES_NEED_THREAD;
431                 set_dir = 0;
432                 break;
433
434         default:
435                 if (state >= 0) {
436                         PRINT_ERROR_PR("Dev handler %s parse() returned "
437                              "invalid cmd state %d (opcode %d)", 
438                              dev->handler->name, state, cmd->cdb[0]);
439                 } else {
440                         PRINT_ERROR_PR("Dev handler %s parse() returned "
441                                 "error %d (opcode %d)", dev->handler->name, 
442                                 state, cmd->cdb[0]);
443                 }
444                 goto out_error;
445         }
446
447         if ((cmd->resp_data_len == -1) && set_dir) {
448                 if (cmd->data_direction == SCST_DATA_READ)
449                         cmd->resp_data_len = cmd->bufflen;
450                 else
451                          cmd->resp_data_len = 0;
452         }
453         
454 out:
455         TRACE_EXIT_HRES(res);
456         return res;
457
458 out_error:
459         /* dev_done() will be called as part of the regular cmd's finish */
460         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
461         cmd->state = SCST_CMD_STATE_DEV_DONE;
462         res = SCST_CMD_STATE_RES_CONT_SAME;
463         goto out;
464
465 out_xmit:
466         cmd->state = SCST_CMD_STATE_XMIT_RESP;
467         res = SCST_CMD_STATE_RES_CONT_SAME;
468         goto out;
469 }
470
471 void scst_cmd_mem_work_fn(void *p)
472 {
473         TRACE_ENTRY();
474
475         spin_lock_bh(&scst_cmd_mem_lock);
476
477         scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
478         if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
479                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
480                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
481         } else {
482                 scst_cur_max_cmd_mem = scst_max_cmd_mem;
483                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
484         }
485         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
486
487         spin_unlock_bh(&scst_cmd_mem_lock);
488
489         TRACE_EXIT();
490         return;
491 }
492
493 int scst_check_mem(struct scst_cmd *cmd)
494 {
495         int res = 0;
496
497         TRACE_ENTRY();
498
499         if (cmd->mem_checked)
500                 goto out;
501
502         spin_lock_bh(&scst_cmd_mem_lock);
503
504         scst_cur_cmd_mem += cmd->bufflen;
505         cmd->mem_checked = 1;
506         if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
507                 goto out_unlock;
508
509         TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
510                 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
511                 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
512                 (cmd->sess->initiator_name[0] == '\0') ?
513                   "Anonymous" : cmd->sess->initiator_name,
514                 scst_cur_max_cmd_mem >> 10);
515
516         scst_cur_cmd_mem -= cmd->bufflen;
517         cmd->mem_checked = 0;
518         scst_set_busy(cmd);
519         cmd->state = SCST_CMD_STATE_XMIT_RESP;
520         res = 1;
521
522 out_unlock:
523         spin_unlock_bh(&scst_cmd_mem_lock);
524
525 out:
526         TRACE_EXIT_RES(res);
527         return res;
528 }
529
530 static void scst_low_cur_max_cmd_mem(void)
531 {
532         TRACE_ENTRY();
533
534         if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
535                 cancel_delayed_work(&scst_cmd_mem_work);
536                 flush_scheduled_work();
537                 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
538         }
539
540         spin_lock_bh(&scst_cmd_mem_lock);
541
542         scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) + 
543                                 (scst_cur_cmd_mem >> 2);
544         if (scst_cur_max_cmd_mem < 16*1024*1024)
545                 scst_cur_max_cmd_mem = 16*1024*1024;
546
547         if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
548                 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
549                 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
550                 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
551         }
552
553         spin_unlock_bh(&scst_cmd_mem_lock);
554
555         TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
556
557         TRACE_EXIT();
558         return;
559 }
560
561 static int scst_prepare_space(struct scst_cmd *cmd)
562 {
563         int r, res = SCST_CMD_STATE_RES_CONT_SAME;
564
565         TRACE_ENTRY();
566
567         if (cmd->data_direction == SCST_DATA_NONE) {
568                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
569                 goto out;
570         }
571
572         r = scst_check_mem(cmd);
573         if (unlikely(r != 0))
574                 goto out;
575
576         if (cmd->data_buf_tgt_alloc) {
577                 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
578                 r = cmd->tgtt->alloc_data_buf(cmd);
579                 cmd->data_buf_alloced = (r == 0);
580         } else
581                 r = scst_alloc_space(cmd);
582
583         if (r != 0) {
584                 if (scst_cmd_atomic(cmd)) {
585                         TRACE_MEM("%s", "Atomic memory allocation failed, "
586                               "rescheduling to the thread");
587                         res = SCST_CMD_STATE_RES_NEED_THREAD;
588                         goto out;
589                 } else
590                         goto out_no_space;
591         }
592
593         switch (cmd->data_direction) {
594         case SCST_DATA_WRITE:
595                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
596                 break;
597
598         default:
599                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
600                 break;
601         }
602
603 out:
604         TRACE_EXIT_HRES(res);
605         return res;
606
607 out_no_space:
608         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
609                 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
610         scst_low_cur_max_cmd_mem();
611         scst_set_busy(cmd);
612         cmd->state = SCST_CMD_STATE_DEV_DONE;
613         res = SCST_CMD_STATE_RES_CONT_SAME;
614         goto out;
615 }
616
617 /* No locks */
618 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
619 {
620         struct scst_tgt *tgt = cmd->sess->tgt;
621         int res = 0;
622         unsigned long flags;
623
624         TRACE_ENTRY();
625
626         spin_lock_irqsave(&tgt->tgt_lock, flags);
627         tgt->retry_cmds++;
628         smp_mb();
629         TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
630               tgt->retry_cmds);
631         if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
632                 /* At least one cmd finished, so try again */
633                 tgt->retry_cmds--;
634                 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
635                       "(finished_cmds=%d, tgt->finished_cmds=%d, "
636                       "retry_cmds=%d)", finished_cmds,
637                       atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
638                 res = -1;
639                 goto out_unlock_tgt;
640         }
641
642         TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
643         /* IRQ already off */
644         spin_lock(&scst_list_lock);
645         list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
646         spin_unlock(&scst_list_lock);
647
648         if (!tgt->retry_timer_active) {
649                 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
650                 add_timer(&tgt->retry_timer);
651                 tgt->retry_timer_active = 1;
652         }
653
654 out_unlock_tgt:
655         spin_unlock_irqrestore(&tgt->tgt_lock, flags);
656
657         TRACE_EXIT_RES(res);
658         return res;
659 }
660
661 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
662 {
663         int res, rc;
664         int atomic = scst_cmd_atomic(cmd);
665
666         TRACE_ENTRY();
667
668         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
669         {
670                 TRACE_DBG("ABORTED set, returning ABORTED for "
671                         "cmd %p", cmd);
672                 goto out_dev_done;
673         }
674
675         if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
676                 TRACE_DBG("%s", "rdy_to_xfer() can not be "
677                       "called in atomic context, rescheduling to the thread");
678                 res = SCST_CMD_STATE_RES_NEED_THREAD;
679                 goto out;
680         }
681
682         while (1) {
683                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
684
685                 res = SCST_CMD_STATE_RES_CONT_NEXT;
686                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
687
688                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
689 #ifdef DEBUG_RETRY
690                 if (((scst_random() % 100) == 75))
691                         rc = SCST_TGT_RES_QUEUE_FULL;
692                 else
693 #endif
694                         rc = cmd->tgtt->rdy_to_xfer(cmd);
695                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
696
697                 if (likely(rc == SCST_TGT_RES_SUCCESS))
698                         goto out;
699
700                 /* Restore the previous state */
701                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
702
703                 switch (rc) {
704                 case SCST_TGT_RES_QUEUE_FULL:
705                 {
706                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
707                                 break;
708                         else
709                                 continue;
710                 }
711
712                 case SCST_TGT_RES_NEED_THREAD_CTX:
713                 {
714                         TRACE_DBG("Target driver %s "
715                               "rdy_to_xfer() requested thread "
716                               "context, rescheduling", cmd->tgtt->name);
717                         res = SCST_CMD_STATE_RES_NEED_THREAD;
718                         break;
719                 }
720
721                 default:
722                         goto out_error_rc;
723                 }
724                 break;
725         }
726
727 out:
728         TRACE_EXIT_HRES(res);
729         return res;
730
731 out_error_rc:
732         if (rc == SCST_TGT_RES_FATAL_ERROR) {
733                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
734                      "fatal error", cmd->tgtt->name);
735         } else {
736                 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
737                             "value %d", cmd->tgtt->name, rc);
738         }
739         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
740
741 out_dev_done:
742         cmd->state = SCST_CMD_STATE_DEV_DONE;
743         res = SCST_CMD_STATE_RES_CONT_SAME;
744         goto out;
745 }
746
747 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
748 {
749         unsigned long flags;
750
751         TRACE_ENTRY();
752
753         TRACE_DBG("Preferred context: %d", pref_context);
754         TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
755         cmd->non_atomic_only = 0;
756
757         if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
758                          (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
759         {
760                 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
761                         "SCST_CONTEXT_TASKLET instead\n", pref_context,
762                         cmd->tgtt->name);
763                 pref_context = SCST_CONTEXT_TASKLET;
764         }
765
766         switch (status) {
767         case SCST_RX_STATUS_SUCCESS:
768                 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
769                 break;
770
771         case SCST_RX_STATUS_ERROR_SENSE_SET:
772                 cmd->state = SCST_CMD_STATE_DEV_DONE;
773                 break;
774
775         case SCST_RX_STATUS_ERROR_FATAL:
776                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
777                 /* go through */
778         case SCST_RX_STATUS_ERROR:
779                 scst_set_cmd_error(cmd,
780                            SCST_LOAD_SENSE(scst_sense_hardw_error));
781                 cmd->state = SCST_CMD_STATE_DEV_DONE;
782                 break;
783
784         default:
785                 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
786                             status);
787                 break;
788         }
789
790         switch (pref_context) {
791         case SCST_CONTEXT_DIRECT:
792         case SCST_CONTEXT_DIRECT_ATOMIC:
793                 scst_check_retries(cmd->tgt, 0);
794                 __scst_process_active_cmd(cmd, pref_context, 0);
795                 break;
796
797         default:
798                 PRINT_ERROR_PR("Context %x is undefined, using thread one",
799                             pref_context);
800                 /* go through */
801         case SCST_CONTEXT_THREAD:
802                 spin_lock_irqsave(&scst_list_lock, flags);
803                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
804                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
805                 cmd->non_atomic_only = 1;
806                 spin_unlock_irqrestore(&scst_list_lock, flags);
807                 scst_check_retries(cmd->tgt, 1);
808                 wake_up(&scst_list_waitQ);
809                 break;
810
811         case SCST_CONTEXT_TASKLET:
812                 spin_lock_irqsave(&scst_list_lock, flags);
813                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
814                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
815                 spin_unlock_irqrestore(&scst_list_lock, flags);
816                 scst_schedule_tasklet();
817                 scst_check_retries(cmd->tgt, 0);
818                 break;
819         }
820
821         TRACE_EXIT();
822         return;
823 }
824
825 /* No locks supposed to be held */
826 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
827         int rq_sense_len, int *next_state)
828 {
829         int sense_valid;
830         struct scst_device *dev = cmd->dev;
831         int dbl_ua_possible, ua_sent = 0;
832
833         TRACE_ENTRY();
834
835         /* If we had a internal bus reset behind us, set the command error UA */
836         if ((dev->scsi_dev != NULL) &&
837             unlikely(cmd->host_status == DID_RESET) &&
838             scst_is_ua_command(cmd))
839         {
840                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
841                       dev->scsi_dev->was_reset, cmd->host_status);
842                 scst_set_cmd_error(cmd,
843                    SCST_LOAD_SENSE(scst_sense_reset_UA));
844                 /* just in case */
845                 cmd->ua_ignore = 0;
846                 /* It looks like it is safe to clear was_reset here */
847                 dev->scsi_dev->was_reset = 0;
848                 smp_mb();
849         }
850
851         if (rq_sense != NULL) {
852                 sense_valid = SCST_SENSE_VALID(rq_sense);
853                 if (sense_valid) {
854                         memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
855                         /* 
856                          * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
857                          * in init_scst()
858                          */
859                         memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
860                 }
861         } else
862                 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
863
864         dbl_ua_possible = dev->dev_double_ua_possible;
865         TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
866         if (unlikely(dbl_ua_possible)) {
867                 spin_lock_bh(&dev->dev_lock);
868                 barrier(); /* to reread dev_double_ua_possible */
869                 dbl_ua_possible = dev->dev_double_ua_possible;
870                 if (dbl_ua_possible)
871                         ua_sent = dev->dev_reset_ua_sent;
872                 else
873                         spin_unlock_bh(&dev->dev_lock);
874         }
875
876         if (sense_valid) {
877                 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
878                              sizeof(cmd->sense_buffer));
879                 /* Check Unit Attention Sense Key */
880                 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
881                         if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
882                                 if (dbl_ua_possible) 
883                                 {
884                                         if (ua_sent) {
885                                                 TRACE(TRACE_MGMT, "%s", 
886                                                         "Double UA detected");
887                                                 /* Do retry */
888                                                 TRACE(TRACE_MGMT, "Retrying cmd %p "
889                                                         "(tag %d)", cmd, cmd->tag);
890                                                 cmd->status = 0;
891                                                 cmd->masked_status = 0;
892                                                 cmd->msg_status = 0;
893                                                 cmd->host_status = DID_OK;
894                                                 cmd->driver_status = 0;
895                                                 memset(cmd->sense_buffer, 0,
896                                                         sizeof(cmd->sense_buffer));
897                                                 cmd->retry = 1;
898                                                 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
899                                                 /* 
900                                                  * Dev is still blocked by this cmd, so
901                                                  * it's OK to clear SCST_DEV_SERIALIZED
902                                                  * here.
903                                                  */
904                                                 dev->dev_double_ua_possible = 0;
905                                                 dev->dev_serialized = 0;
906                                                 dev->dev_reset_ua_sent = 0;
907                                                 goto out_unlock;
908                                         } else
909                                                 dev->dev_reset_ua_sent = 1;
910                                 }
911                         }
912                         if (cmd->ua_ignore == 0) {
913                                 if (unlikely(dbl_ua_possible)) {
914                                         __scst_process_UA(dev, cmd,
915                                                 cmd->sense_buffer,
916                                                 sizeof(cmd->sense_buffer), 0);
917                                 } else {
918                                         scst_process_UA(dev, cmd,
919                                                 cmd->sense_buffer,
920                                                 sizeof(cmd->sense_buffer), 0);
921                                 }
922                         }
923                 }
924         }
925
926         if (unlikely(dbl_ua_possible)) {
927                 if (ua_sent && scst_is_ua_command(cmd)) {
928                         TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
929                         dev->dev_double_ua_possible = 0;
930                         dev->dev_serialized = 0;
931                         dev->dev_reset_ua_sent = 0;
932                 }
933                 spin_unlock_bh(&dev->dev_lock);
934         }
935
936 out:
937         TRACE_EXIT();
938         return;
939
940 out_unlock:
941         spin_unlock_bh(&dev->dev_lock);
942         goto out;
943 }
944
945 static int scst_check_auto_sense(struct scst_cmd *cmd)
946 {
947         int res = 0;
948
949         TRACE_ENTRY();
950
951         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
952             (!SCST_SENSE_VALID(cmd->sense_buffer) ||
953              SCST_NO_SENSE(cmd->sense_buffer)))
954         {
955                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
956                       "cmd->status=%x, cmd->masked_status=%x, "
957                       "cmd->msg_status=%x, cmd->host_status=%x, "
958                       "cmd->driver_status=%x", cmd->status, cmd->masked_status, 
959                       cmd->msg_status, cmd->host_status, cmd->driver_status);
960                 res = 1;
961         } else if (unlikely(cmd->host_status)) {
962                 if ((cmd->host_status == DID_REQUEUE) ||
963                     (cmd->host_status == DID_IMM_RETRY) ||
964                     (cmd->host_status == DID_SOFT_ERROR)) {
965                         scst_set_busy(cmd);
966                 } else {
967                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
968                                 "received, returning HARDWARE ERROR instead",
969                                 cmd->host_status);
970                         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
971                 }
972         }
973
974         TRACE_EXIT_RES(res);
975         return res;
976 }
977
978 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
979         const uint8_t *rq_sense, int rq_sense_len, int *next_state)
980 {
981         unsigned char type;
982
983         TRACE_ENTRY();
984
985         cmd->status = result & 0xff;
986         cmd->masked_status = status_byte(result);
987         cmd->msg_status = msg_byte(result);
988         cmd->host_status = host_byte(result);
989         cmd->driver_status = driver_byte(result);
990         TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, "
991               "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
992               "cmd->driver_status=%x", result, cmd->status,
993               cmd->masked_status, cmd->msg_status, cmd->host_status,
994               cmd->driver_status);
995
996         cmd->completed = 1;
997
998         scst_dec_on_dev_cmd(cmd);
999
1000         type = cmd->dev->handler->type;
1001         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1002             cmd->tgt_dev->acg_dev->rd_only_flag &&
1003             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1004              type == TYPE_TAPE)) {
1005                 int32_t length;
1006                 uint8_t *address;
1007
1008                 length = scst_get_buf_first(cmd, &address);
1009                 TRACE_DBG("length %d", length);
1010                 if (unlikely(length <= 0)) {
1011                         PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1012                                 __func__);
1013                         goto next;
1014                 }
1015                 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1016                         address[2] |= 0x80;   /* Write Protect*/
1017                 }
1018                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1019                         address[3] |= 0x80;   /* Write Protect*/
1020                 }
1021                 scst_put_buf(cmd, address);
1022         }
1023
1024 next:
1025         scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1026
1027         TRACE_EXIT();
1028         return;
1029 }
1030
1031 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1032 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1033                                             struct scsi_request **req)
1034 {
1035         struct scst_cmd *cmd = NULL;
1036
1037         if (scsi_cmd && (*req = scsi_cmd->sc_request))
1038                 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1039
1040         if (cmd == NULL) {
1041                 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1042                 if (*req)
1043                         scsi_release_request(*req);
1044         }
1045
1046         return cmd;
1047 }
1048
1049 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1050 {
1051         struct scsi_request *req = NULL;
1052         struct scst_cmd *cmd;
1053         int next_state;
1054
1055         TRACE_ENTRY();
1056
1057         WARN_ON(in_irq());
1058
1059         /*
1060          * We don't use scsi_cmd->resid, because:
1061          * 1. Many low level initiator drivers don't use (set) this field
1062          * 2. We determine the command's buffer size directly from CDB, 
1063          *    so scsi_cmd->resid is not relevant for us, and target drivers 
1064          *    should know the residual, if necessary, by comparing expected 
1065          *    and actual transfer sizes.
1066          */
1067
1068         cmd = scst_get_cmd(scsi_cmd, &req);
1069         if (cmd == NULL)
1070                 goto out;
1071
1072         next_state = SCST_CMD_STATE_DEV_DONE;
1073         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1074                 sizeof(req->sr_sense_buffer), &next_state);
1075
1076         /* Clear out request structure */
1077         req->sr_use_sg = 0;
1078         req->sr_sglist_len = 0;
1079         req->sr_bufflen = 0;
1080         req->sr_buffer = NULL;
1081         req->sr_underflow = 0;
1082         req->sr_request->rq_disk = NULL; /* disown request blk */
1083
1084         cmd->bufflen = req->sr_bufflen; //??
1085
1086         scst_release_request(cmd);
1087
1088         cmd->state = next_state;
1089         cmd->non_atomic_only = 0;
1090
1091         __scst_process_active_cmd(cmd, scst_get_context(), 0);
1092
1093 out:
1094         TRACE_EXIT();
1095         return;
1096 }
1097 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1098 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1099 {
1100         struct scst_cmd *cmd;
1101         int next_state;
1102
1103         TRACE_ENTRY();
1104
1105         WARN_ON(in_irq());
1106
1107         /*
1108          * We don't use resid, because:
1109          * 1. Many low level initiator drivers don't use (set) this field
1110          * 2. We determine the command's buffer size directly from CDB,
1111          *    so resid is not relevant for us, and target drivers
1112          *    should know the residual, if necessary, by comparing expected
1113          *    and actual transfer sizes.
1114          */
1115
1116         cmd = (struct scst_cmd *)data;
1117         if (cmd == NULL)
1118                 goto out;
1119
1120         next_state = SCST_CMD_STATE_DEV_DONE;
1121         scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE,
1122                 &next_state);
1123
1124         cmd->state = next_state;
1125         cmd->non_atomic_only = 0;
1126
1127         __scst_process_active_cmd(cmd, scst_get_context(), 0);
1128
1129 out:
1130         TRACE_EXIT();
1131         return;
1132 }
1133 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1134
1135 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1136 {
1137         TRACE_ENTRY();
1138
1139         BUG_ON(in_irq());
1140
1141         scst_dec_on_dev_cmd(cmd);
1142
1143         if (next_state == SCST_CMD_STATE_DEFAULT)
1144                 next_state = SCST_CMD_STATE_DEV_DONE;
1145
1146         if (next_state == SCST_CMD_STATE_DEV_DONE) {
1147 #if defined(DEBUG) || defined(TRACING)
1148                 if (cmd->sg) {
1149                         int i;
1150                         struct scatterlist *sg = cmd->sg;
1151                         TRACE(TRACE_RECV_TOP, 
1152                               "Exec'd %d S/G(s) at %p sg[0].page at %p",
1153                               cmd->sg_cnt, sg, (void*)sg[0].page);
1154                         for(i = 0; i < cmd->sg_cnt; ++i) {
1155                                 TRACE_BUFF_FLAG(TRACE_RECV_TOP, 
1156                                         "Exec'd sg:", page_address(sg[i].page),
1157                                         sg[i].length);
1158                         }
1159                 }
1160 #endif
1161         }
1162
1163
1164 #ifdef EXTRACHECKS
1165         if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1166             (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1167             (next_state != SCST_CMD_STATE_FINISHED)) 
1168         {
1169                 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1170                             "state %d (opcode %d)", next_state, cmd->cdb[0]);
1171                 scst_set_cmd_error(cmd,
1172                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1173                 next_state = SCST_CMD_STATE_DEV_DONE;
1174         }
1175
1176         if (scst_check_auto_sense(cmd)) {
1177                 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1178                         "opcode %d", cmd->cdb[0]);
1179         }
1180 #endif
1181
1182         scst_check_sense(cmd, NULL, 0, &next_state);
1183
1184         cmd->state = next_state;
1185         cmd->non_atomic_only = 0;
1186
1187         __scst_process_active_cmd(cmd, scst_get_context(), 0);
1188
1189         TRACE_EXIT();
1190         return;
1191 }
1192
1193 static int scst_report_luns_local(struct scst_cmd *cmd)
1194 {
1195         int res = SCST_EXEC_COMPLETED;
1196         int dev_cnt = 0;
1197         int buffer_size;
1198         struct scst_tgt_dev *tgt_dev = NULL;
1199         uint8_t *buffer;
1200
1201         TRACE_ENTRY();
1202
1203         cmd->status = 0;
1204         cmd->masked_status = 0;
1205         cmd->msg_status = 0;
1206         cmd->host_status = DID_OK;
1207         cmd->driver_status = 0;
1208
1209         /* ToDo: use full SG buffer, not only the first entry */
1210         buffer_size = scst_get_buf_first(cmd, &buffer);
1211         if (unlikely(buffer_size <= 0))
1212                 goto out_err;
1213
1214         if (buffer_size < 16) {
1215                 goto out_put_err;
1216         }
1217
1218         memset(buffer, 0, buffer_size);
1219
1220         /* sess->sess_tgt_dev_list is protected by suspended activity */
1221         list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1222                             sess_tgt_dev_list_entry) 
1223         {
1224                 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1225                         buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1226                         buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1227                 }
1228                 dev_cnt++;
1229                 /* Tmp, until ToDo above done */
1230                 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1231                         break;
1232         }
1233
1234         /* Set the response header */
1235         dev_cnt *= 8;
1236         buffer[0] = (dev_cnt >> 24) & 0xff;
1237         buffer[1] = (dev_cnt >> 16) & 0xff;
1238         buffer[2] = (dev_cnt >> 8) & 0xff;
1239         buffer[3] = dev_cnt & 0xff;
1240
1241         dev_cnt += 8;
1242
1243         scst_put_buf(cmd, buffer);
1244
1245         if (buffer_size > dev_cnt)
1246                 scst_set_resp_data_len(cmd, dev_cnt);
1247         
1248 out_done:
1249         cmd->completed = 1;
1250
1251         /* Report the result */
1252         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1253
1254         TRACE_EXIT_RES(res);
1255         return res;
1256         
1257 out_put_err:
1258         scst_put_buf(cmd, buffer);
1259
1260 out_err:
1261         scst_set_cmd_error(cmd,
1262                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1263         goto out_done;
1264 }
1265
1266 static int scst_pre_select(struct scst_cmd *cmd)
1267 {
1268         int res = SCST_EXEC_NOT_COMPLETED;
1269
1270         TRACE_ENTRY();
1271
1272         if (scst_cmd_atomic(cmd)) {
1273                 res = SCST_EXEC_NEED_THREAD;
1274                 goto out;
1275         }
1276
1277         scst_block_dev(cmd->dev, 1);
1278         /* Device will be unblocked in scst_done_cmd_check() */
1279
1280         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1281                 int rc = scst_set_pending_UA(cmd);
1282                 if (rc == 0) {
1283                         res = SCST_EXEC_COMPLETED;
1284                         cmd->completed = 1;
1285                         /* Report the result */
1286                         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1287                         goto out;
1288                 }
1289         }
1290
1291 out:
1292         TRACE_EXIT_RES(res);
1293         return res;
1294 }
1295
1296 static inline void scst_report_reserved(struct scst_cmd *cmd)
1297 {
1298         TRACE_ENTRY();
1299
1300         scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1301         cmd->completed = 1;
1302         /* Report the result */
1303         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1304
1305         TRACE_EXIT();
1306         return;
1307 }
1308
1309 static int scst_reserve_local(struct scst_cmd *cmd)
1310 {
1311         int res = SCST_EXEC_NOT_COMPLETED;
1312         struct scst_device *dev;
1313         struct scst_tgt_dev *tgt_dev_tmp;
1314
1315         TRACE_ENTRY();
1316
1317         if (scst_cmd_atomic(cmd)) {
1318                 res = SCST_EXEC_NEED_THREAD;
1319                 goto out;
1320         }
1321
1322         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1323                 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1324                      "(lun=%Ld)", (uint64_t)cmd->lun);
1325                 scst_set_cmd_error(cmd,
1326                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1327                 cmd->completed = 1;
1328                 res = SCST_EXEC_COMPLETED;
1329                 goto out;
1330         }
1331
1332         dev = cmd->dev;
1333         scst_block_dev(dev, 1);
1334         /* Device will be unblocked in scst_done_cmd_check() */
1335
1336         spin_lock_bh(&dev->dev_lock);
1337
1338         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1339                 scst_report_reserved(cmd);
1340                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1341                 res = SCST_EXEC_COMPLETED;
1342                 goto out_unlock;
1343         }
1344
1345         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1346                             dev_tgt_dev_list_entry) 
1347         {
1348                 if (cmd->tgt_dev != tgt_dev_tmp)
1349                         set_bit(SCST_TGT_DEV_RESERVED, 
1350                                 &tgt_dev_tmp->tgt_dev_flags);
1351         }
1352         dev->dev_reserved = 1;
1353
1354 out_unlock:
1355         spin_unlock_bh(&dev->dev_lock);
1356         
1357 out:
1358         TRACE_EXIT_RES(res);
1359         return res;
1360 }
1361
1362 static int scst_release_local(struct scst_cmd *cmd)
1363 {
1364         int res = SCST_EXEC_NOT_COMPLETED;
1365         struct scst_tgt_dev *tgt_dev_tmp;
1366         struct scst_device *dev;
1367
1368         TRACE_ENTRY();
1369
1370         dev = cmd->dev;
1371
1372         scst_block_dev(dev, 1);
1373         cmd->blocking = 1;
1374         TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1375
1376         spin_lock_bh(&dev->dev_lock);
1377
1378         /* 
1379          * The device could be RELEASED behind us, if RESERVING session 
1380          * is closed (see scst_free_tgt_dev()), but this actually doesn't 
1381          * matter, so use lock and no retest for DEV_RESERVED bits again
1382          */
1383         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1384                 res = SCST_EXEC_COMPLETED;
1385                 cmd->status = 0;
1386                 cmd->masked_status = 0;
1387                 cmd->msg_status = 0;
1388                 cmd->host_status = DID_OK;
1389                 cmd->driver_status = 0;
1390         } else {
1391                 list_for_each_entry(tgt_dev_tmp,
1392                                     &dev->dev_tgt_dev_list,
1393                                     dev_tgt_dev_list_entry) 
1394                 {
1395                         clear_bit(SCST_TGT_DEV_RESERVED, 
1396                                 &tgt_dev_tmp->tgt_dev_flags);
1397                 }
1398                 dev->dev_reserved = 0;
1399         }
1400
1401         spin_unlock_bh(&dev->dev_lock);
1402
1403         if (res == SCST_EXEC_COMPLETED) {
1404                 cmd->completed = 1;
1405                 /* Report the result */
1406                 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1407         }
1408
1409         TRACE_EXIT_RES(res);
1410         return res;
1411 }
1412
1413 /* 
1414  * The result of cmd execution, if any, should be reported 
1415  * via scst_cmd_done_local() 
1416  */
1417 static int scst_pre_exec(struct scst_cmd *cmd)
1418 {
1419         int res = SCST_EXEC_NOT_COMPLETED, rc;
1420         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1421
1422         TRACE_ENTRY();
1423
1424         /* Reserve check before Unit Attention */
1425         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1426             (cmd->cdb[0] != INQUIRY) &&
1427             (cmd->cdb[0] != REPORT_LUNS) &&
1428             (cmd->cdb[0] != RELEASE) &&
1429             (cmd->cdb[0] != RELEASE_10) &&
1430             (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1431             (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1432             (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) 
1433         {
1434                 scst_report_reserved(cmd);
1435                 res = SCST_EXEC_COMPLETED;
1436                 goto out;
1437         }
1438
1439         /* If we had a internal bus reset, set the command error unit attention */
1440         if ((cmd->dev->scsi_dev != NULL) &&
1441             unlikely(cmd->dev->scsi_dev->was_reset) &&
1442             scst_is_ua_command(cmd)) 
1443         {
1444                 struct scst_device *dev = cmd->dev;
1445                 int done = 0;
1446                 /* Prevent more than 1 cmd to be triggered by was_reset */
1447                 spin_lock_bh(&dev->dev_lock);
1448                 barrier(); /* to reread was_reset */
1449                 if (dev->scsi_dev->was_reset) {
1450                         TRACE(TRACE_MGMT, "was_reset is %d", 1);
1451                         scst_set_cmd_error(cmd,
1452                                    SCST_LOAD_SENSE(scst_sense_reset_UA));
1453                         /* It looks like it is safe to clear was_reset here */
1454                         dev->scsi_dev->was_reset = 0;
1455                         smp_mb();
1456                         done = 1;
1457                 }
1458                 spin_unlock_bh(&dev->dev_lock);
1459
1460                 if (done)
1461                         goto out_done;
1462         }
1463
1464         if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1465             scst_is_ua_command(cmd)) 
1466         {
1467                 rc = scst_set_pending_UA(cmd);
1468                 if (rc == 0)
1469                         goto out_done;
1470         }
1471
1472         /* Check READ_ONLY device status */
1473         if (tgt_dev->acg_dev->rd_only_flag &&
1474             (cmd->cdb[0] == WRITE_6 ||  /* ToDo: full list of the modify cmds */
1475              cmd->cdb[0] == WRITE_10 ||
1476              cmd->cdb[0] == WRITE_12 ||
1477              cmd->cdb[0] == WRITE_16 ||
1478              cmd->cdb[0] == WRITE_VERIFY ||
1479              cmd->cdb[0] == WRITE_VERIFY_12 ||
1480              cmd->cdb[0] == WRITE_VERIFY_16 ||
1481              (cmd->dev->handler->type == TYPE_TAPE &&
1482               (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1483         {
1484                 scst_set_cmd_error(cmd,
1485                            SCST_LOAD_SENSE(scst_sense_data_protect));
1486                 goto out_done;
1487         }
1488 out:
1489         TRACE_EXIT_RES(res);
1490         return res;
1491
1492 out_done:
1493         res = SCST_EXEC_COMPLETED;
1494         cmd->completed = 1;
1495         /* Report the result */
1496         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1497         goto out;
1498 }
1499
1500 /* 
1501  * The result of cmd execution, if any, should be reported 
1502  * via scst_cmd_done_local() 
1503  */
1504 static inline int scst_local_exec(struct scst_cmd *cmd)
1505 {
1506         int res = SCST_EXEC_NOT_COMPLETED;
1507
1508         TRACE_ENTRY();
1509
1510         /*
1511          * Adding new commands here don't forget to update
1512          * scst_is_cmd_local() in scsi_tgt.h, if necessary
1513          */
1514
1515         switch (cmd->cdb[0]) {
1516         case MODE_SELECT:
1517         case MODE_SELECT_10:
1518         case LOG_SELECT:
1519                 res = scst_pre_select(cmd);
1520                 break;
1521         case RESERVE:
1522         case RESERVE_10:
1523                 res = scst_reserve_local(cmd);
1524                 break;
1525         case RELEASE:
1526         case RELEASE_10:
1527                 res = scst_release_local(cmd);
1528                 break;
1529         case REPORT_LUNS:
1530                 res = scst_report_luns_local(cmd);
1531                 break;
1532         }
1533
1534         TRACE_EXIT_RES(res);
1535         return res;
1536 }
1537
1538 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1539 {
1540         int rc = SCST_EXEC_NOT_COMPLETED;
1541
1542         TRACE_ENTRY();
1543
1544         cmd->sent_to_midlev = 1;
1545         cmd->state = SCST_CMD_STATE_EXECUTING;
1546         cmd->scst_cmd_done = scst_cmd_done_local;
1547
1548         set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1549         smp_mb__after_set_bit();
1550
1551         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1552                 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1553                 goto out_aborted;
1554         }
1555
1556         rc = scst_pre_exec(cmd);
1557         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1558         if (rc != SCST_EXEC_NOT_COMPLETED) {
1559                 if (rc == SCST_EXEC_COMPLETED)
1560                         goto out;
1561                 else if (rc == SCST_EXEC_NEED_THREAD)
1562                         goto out_clear;
1563                 else
1564                         goto out_rc_error;
1565         }
1566
1567         rc = scst_local_exec(cmd);
1568         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1569         if (rc != SCST_EXEC_NOT_COMPLETED) {
1570                 if (rc == SCST_EXEC_COMPLETED)
1571                         goto out;
1572                 else if (rc == SCST_EXEC_NEED_THREAD)
1573                         goto out_clear;
1574                 else
1575                         goto out_rc_error;
1576         }
1577
1578         if (cmd->dev->handler->exec) {
1579                 struct scst_device *dev = cmd->dev;
1580                 TRACE_DBG("Calling dev handler %s exec(%p)",
1581                       dev->handler->name, cmd);
1582                 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1583                 cmd->scst_cmd_done = scst_cmd_done_local;
1584                 rc = dev->handler->exec(cmd);
1585                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1586                 TRACE_DBG("Dev handler %s exec() returned %d",
1587                       dev->handler->name, rc);
1588                 if (rc != SCST_EXEC_NOT_COMPLETED) {
1589                         if (rc == SCST_EXEC_COMPLETED)
1590                                 goto out;
1591                         else if (rc == SCST_EXEC_NEED_THREAD)
1592                                 goto out_clear;
1593                         else
1594                                 goto out_rc_error;
1595                 }
1596         }
1597
1598         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1599         
1600         if (unlikely(cmd->dev->scsi_dev == NULL)) {
1601                 PRINT_ERROR_PR("Command for virtual device must be "
1602                         "processed by device handler (lun %Ld)!",
1603                         (uint64_t)cmd->lun);
1604                 goto out_error;
1605         }
1606
1607 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1608         if (scst_alloc_request(cmd) != 0) {
1609                 PRINT_INFO_PR("%s", "Unable to allocate request, "
1610                         "sending BUSY status");
1611                 goto out_busy;
1612         }
1613         
1614         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1615                     (void *)cmd->scsi_req->sr_buffer,
1616                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1617                     cmd->retries);
1618 #else
1619         rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1620                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1621                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1622                         GFP_KERNEL);
1623         if (rc) {
1624                 PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1625                 goto out_error;
1626         }
1627 #endif
1628
1629         rc = SCST_EXEC_COMPLETED;
1630
1631 out:
1632         TRACE_EXIT();
1633         return rc;
1634
1635 out_clear:
1636         /* Restore the state */
1637         cmd->sent_to_midlev = 0;
1638         cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1639         goto out;
1640
1641 out_rc_error:
1642         PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1643                     "invalid code %d", cmd->dev->handler->name, rc);
1644         /* go through */
1645
1646 out_error:
1647         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1648         cmd->completed = 1;
1649         cmd->state = SCST_CMD_STATE_DEV_DONE;
1650         rc = SCST_EXEC_COMPLETED;
1651         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1652         goto out;
1653
1654 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) 
1655 out_busy:
1656         scst_set_busy(cmd);
1657         cmd->completed = 1;
1658         cmd->state = SCST_CMD_STATE_DEV_DONE;
1659         rc = SCST_EXEC_COMPLETED;
1660         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1661         goto out;
1662 #endif
1663
1664 out_aborted:
1665         rc = SCST_EXEC_COMPLETED;
1666         /* Report the result. The cmd is not completed */
1667         scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1668         goto out;
1669 }
1670
1671 static int scst_send_to_midlev(struct scst_cmd *cmd)
1672 {
1673         int res, rc;
1674         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1675         struct scst_device *dev = cmd->dev;
1676         int expected_sn;
1677         int count;
1678         int atomic = scst_cmd_atomic(cmd);
1679
1680         TRACE_ENTRY();
1681
1682         res = SCST_CMD_STATE_RES_CONT_NEXT;
1683
1684         if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1685                 TRACE_DBG("Dev handler %s exec() can not be "
1686                       "called in atomic context, rescheduling to the thread",
1687                       dev->handler->name);
1688                 res = SCST_CMD_STATE_RES_NEED_THREAD;
1689                 goto out;
1690         }
1691
1692         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1693                 goto out;
1694
1695         scst_inc_cmd_count(); /* protect dev & tgt_dev */
1696
1697         if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1698                 rc = scst_do_send_to_midlev(cmd);
1699                 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1700                 if (rc == SCST_EXEC_NEED_THREAD) {
1701                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1702                               "thread context, rescheduling");
1703                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1704                         scst_dec_on_dev_cmd(cmd);
1705                         goto out_dec_cmd_count;
1706                 } else {
1707                         BUG_ON(rc != SCST_EXEC_COMPLETED);
1708                         goto out_unplug;
1709                 }
1710         }
1711
1712         expected_sn = tgt_dev->expected_sn;
1713         if (cmd->sn != expected_sn) {
1714                 spin_lock_bh(&tgt_dev->sn_lock);
1715                 tgt_dev->def_cmd_count++;
1716                 smp_mb();
1717                 barrier(); /* to reread expected_sn */
1718                 expected_sn = tgt_dev->expected_sn;
1719                 if (cmd->sn != expected_sn) {
1720                         scst_dec_on_dev_cmd(cmd);
1721                         TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1722                               "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1723                         list_add_tail(&cmd->sn_cmd_list_entry,
1724                                       &tgt_dev->deferred_cmd_list);
1725                         spin_unlock_bh(&tgt_dev->sn_lock);
1726                         /* !! At this point cmd can be already freed !! */
1727                         goto out_dec_cmd_count;
1728                 } else {
1729                         TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1730                               "expected_sn %d, continuing", expected_sn);
1731                         tgt_dev->def_cmd_count--;
1732                         spin_unlock_bh(&tgt_dev->sn_lock);
1733                 }
1734         }
1735
1736         count = 0;
1737         while(1) {
1738                 rc = scst_do_send_to_midlev(cmd);
1739                 if (rc == SCST_EXEC_NEED_THREAD) {
1740                         TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1741                               "thread context, rescheduling");
1742                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1743                         scst_dec_on_dev_cmd(cmd);
1744                         if (count != 0)
1745                                 goto out_unplug;
1746                         else
1747                                 goto out_dec_cmd_count;
1748                 }
1749                 BUG_ON(rc != SCST_EXEC_COMPLETED);
1750                 /* !! At this point cmd can be already freed !! */
1751                 count++;
1752                 expected_sn = __scst_inc_expected_sn(tgt_dev);
1753                 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1754                 if (cmd == NULL)
1755                         break;
1756                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1757                         break;
1758         }
1759
1760 out_unplug:
1761         if (dev->scsi_dev != NULL)
1762                 generic_unplug_device(dev->scsi_dev->request_queue);
1763
1764 out_dec_cmd_count:
1765         scst_dec_cmd_count();
1766         /* !! At this point sess, dev and tgt_dev can be already freed !! */
1767
1768 out:
1769         TRACE_EXIT_HRES(res);
1770         return res;
1771 }
1772
1773 static struct scst_cmd *scst_create_prepare_internal_cmd(
1774         struct scst_cmd *orig_cmd, int bufsize)
1775 {
1776         struct scst_cmd *res;
1777         int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1778
1779         TRACE_ENTRY();
1780
1781         res = scst_alloc_cmd(gfp_mask);
1782         if (unlikely(res == NULL)) {
1783                 goto out;
1784         }
1785
1786         res->sess = orig_cmd->sess;
1787         res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1788         res->atomic = scst_cmd_atomic(orig_cmd);
1789         res->internal = 1;
1790         res->tgtt = orig_cmd->tgtt;
1791         res->tgt = orig_cmd->tgt;
1792         res->dev = orig_cmd->dev;
1793         res->tgt_dev = orig_cmd->tgt_dev;
1794         res->lun = orig_cmd->lun;
1795         res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1796         res->data_direction = SCST_DATA_UNKNOWN;
1797         res->orig_cmd = orig_cmd;
1798
1799         res->bufflen = bufsize;
1800         if (bufsize > 0) {
1801                 if (scst_alloc_space(res) != 0)
1802                         PRINT_ERROR("Unable to create buffer (size %d) for "
1803                                 "internal cmd", bufsize);
1804                         goto out_free_res;
1805         }
1806
1807 out:
1808         TRACE_EXIT_HRES((unsigned long)res);
1809         return res;
1810
1811 out_free_res:
1812         scst_destroy_cmd(res);
1813         res = NULL;
1814         goto out;
1815 }
1816
1817 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1818 {
1819         TRACE_ENTRY();
1820
1821         if (cmd->bufflen > 0)
1822                 scst_release_space(cmd);
1823         scst_destroy_cmd(cmd);
1824
1825         TRACE_EXIT();
1826         return;
1827 }
1828
1829 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1830 {
1831         int res = SCST_CMD_STATE_RES_RESTART;
1832 #define sbuf_size 252
1833         static const unsigned char request_sense[6] =
1834             { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1835         struct scst_cmd *rs_cmd;
1836
1837         TRACE_ENTRY();
1838
1839         rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1840         if (rs_cmd != 0)
1841                 goto out_error;
1842
1843         memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1844         rs_cmd->cdb_len = sizeof(request_sense);
1845         rs_cmd->data_direction = SCST_DATA_READ;
1846
1847         spin_lock_irq(&scst_list_lock);
1848         list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1849         spin_unlock_irq(&scst_list_lock);
1850
1851 out:
1852         TRACE_EXIT_RES(res);
1853         return res;
1854
1855 out_error:
1856         res = -1;
1857         goto out;
1858 #undef sbuf_size
1859 }
1860
1861 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1862 {
1863         struct scst_cmd *orig_cmd = cmd->orig_cmd;
1864         uint8_t *buf;
1865         int len;
1866
1867         TRACE_ENTRY();
1868
1869         BUG_ON(orig_cmd);
1870
1871         len = scst_get_buf_first(cmd, &buf);
1872
1873         if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1874             (!SCST_NO_SENSE(buf))) 
1875         {
1876                 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned", 
1877                         buf, len);
1878                 memcpy(orig_cmd->sense_buffer, buf,
1879                         (sizeof(orig_cmd->sense_buffer) > len) ?
1880                                 len : sizeof(orig_cmd->sense_buffer));
1881         } else {
1882                 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1883                         "REQUEST SENSE, returning HARDWARE ERROR");
1884                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1885         }
1886
1887         scst_put_buf(cmd, buf);
1888
1889         scst_free_internal_cmd(cmd);
1890
1891         TRACE_EXIT_HRES((unsigned long)orig_cmd);
1892         return orig_cmd;
1893 }
1894
1895 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1896 {
1897         int res = 0, rc;
1898         unsigned char type;
1899
1900         TRACE_ENTRY();
1901
1902         if (cmd->cdb[0] == REQUEST_SENSE) {
1903                 if (cmd->internal)
1904                         cmd = scst_complete_request_sense(cmd);
1905         } else if (scst_check_auto_sense(cmd)) {
1906                 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1907                             "without sense data (opcode 0x%x), issuing "
1908                             "REQUEST SENSE", cmd->cdb[0]);
1909                 rc = scst_prepare_request_sense(cmd);
1910                 if (res > 0) {
1911                         *pres = rc;
1912                         res = 1;
1913                         goto out;
1914                 } else {
1915                         PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1916                                     "returning HARDWARE ERROR");
1917                         scst_set_cmd_error(cmd,
1918                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
1919                 }
1920         }
1921
1922         type = cmd->dev->handler->type;
1923         if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1924             cmd->tgt_dev->acg_dev->rd_only_flag &&
1925             (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1926              type == TYPE_TAPE))
1927         {
1928                 int32_t length;
1929                 uint8_t *address;
1930
1931                 length = scst_get_buf_first(cmd, &address);
1932                 if (length <= 0)
1933                         goto out;
1934                 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1935                         address[2] |= 0x80;   /* Write Protect*/
1936                 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1937                         address[3] |= 0x80;   /* Write Protect*/
1938                 scst_put_buf(cmd, address);
1939         }
1940
1941         /* 
1942          * Check and clear NormACA option for the device, if necessary,
1943          * since we don't support ACA
1944          */
1945         if ((cmd->cdb[0] == INQUIRY) &&
1946             !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1947             (cmd->resp_data_len > SCST_INQ_BYTE3))
1948         {
1949                 uint8_t *buffer;
1950                 int buflen;
1951
1952                 /* ToDo: all pages ?? */
1953                 buflen = scst_get_buf_first(cmd, &buffer);
1954                 if (buflen > 0) {
1955                         if (buflen > SCST_INQ_BYTE3) {
1956 #ifdef EXTRACHECKS
1957                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1958                                         PRINT_INFO_PR("NormACA set for device: "
1959                                             "lun=%Ld, type 0x%02x", 
1960                                             (uint64_t)cmd->lun, buffer[0]);
1961                                 }
1962 #endif
1963                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1964                         } else
1965                                 scst_set_cmd_error(cmd,
1966                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1967
1968                         scst_put_buf(cmd, buffer);
1969                 }
1970         }
1971
1972         if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
1973                 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
1974                                                 &cmd->tgt_dev->tgt_dev_flags)) {
1975                         struct scst_tgt_dev *tgt_dev_tmp;
1976                         TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
1977                               (uint64_t)cmd->lun, cmd->masked_status);
1978                         TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
1979                                      sizeof(cmd->sense_buffer));
1980                         /* Clearing the reservation */
1981                         list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
1982                                             dev_tgt_dev_list_entry) {
1983                                 clear_bit(SCST_TGT_DEV_RESERVED, 
1984                                         &tgt_dev_tmp->tgt_dev_flags);
1985                         }
1986                         cmd->dev->dev_reserved = 0;
1987                 }
1988                 scst_unblock_dev(cmd->dev);
1989         }
1990         
1991         if (unlikely((cmd->cdb[0] == MODE_SELECT) || 
1992                      (cmd->cdb[0] == MODE_SELECT_10) ||
1993                      (cmd->cdb[0] == LOG_SELECT)))
1994         {
1995                 if (cmd->status == 0) {
1996                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
1997                                 "setting the SELECT UA (lun=%Ld)", 
1998                                 (uint64_t)cmd->lun);
1999                         spin_lock_bh(&scst_temp_UA_lock);
2000                         if (cmd->cdb[0] == LOG_SELECT) {
2001                                 scst_set_sense(scst_temp_UA,
2002                                         sizeof(scst_temp_UA),
2003                                         UNIT_ATTENTION, 0x2a, 0x02);
2004                         } else {
2005                                 scst_set_sense(scst_temp_UA,
2006                                         sizeof(scst_temp_UA),
2007                                         UNIT_ATTENTION, 0x2a, 0x01);
2008                         }
2009                         scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2010                                 sizeof(scst_temp_UA), 1);
2011                         spin_unlock_bh(&scst_temp_UA_lock);
2012                 }
2013                 scst_unblock_dev(cmd->dev);
2014         }
2015
2016 out:
2017         TRACE_EXIT_RES(res);
2018         return res;
2019 }
2020
2021 static int scst_dev_done(struct scst_cmd *cmd)
2022 {
2023         int res = SCST_CMD_STATE_RES_CONT_SAME;
2024         int state;
2025         int atomic = scst_cmd_atomic(cmd);
2026
2027         TRACE_ENTRY();
2028
2029         if (atomic && !cmd->dev->handler->dev_done_atomic &&
2030             cmd->dev->handler->dev_done) 
2031         {
2032                 TRACE_DBG("Dev handler %s dev_done() can not be "
2033                       "called in atomic context, rescheduling to the thread",
2034                       cmd->dev->handler->name);
2035                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2036                 goto out;
2037         }
2038
2039         if (scst_done_cmd_check(cmd, &res))
2040                 goto out;
2041
2042         state = SCST_CMD_STATE_XMIT_RESP;
2043         if (likely(!scst_is_cmd_local(cmd)) && 
2044             likely(cmd->dev->handler->dev_done != NULL))
2045         {
2046                 int rc;
2047                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2048                       cmd->dev->handler->name, cmd);
2049                 rc = cmd->dev->handler->dev_done(cmd);
2050                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2051                       cmd->dev->handler->name, rc);
2052                 if (rc != SCST_CMD_STATE_DEFAULT)
2053                         state = rc;
2054         }
2055
2056         switch (state) {
2057         case SCST_CMD_STATE_REINIT:
2058                 cmd->state = state;
2059                 res = SCST_CMD_STATE_RES_RESTART;
2060                 break;
2061
2062         case SCST_CMD_STATE_DEV_PARSE:
2063         case SCST_CMD_STATE_PREPARE_SPACE:
2064         case SCST_CMD_STATE_RDY_TO_XFER:
2065         case SCST_CMD_STATE_SEND_TO_MIDLEV:
2066         case SCST_CMD_STATE_DEV_DONE:
2067         case SCST_CMD_STATE_XMIT_RESP:
2068         case SCST_CMD_STATE_FINISHED:
2069                 cmd->state = state;
2070                 res = SCST_CMD_STATE_RES_CONT_SAME;
2071                 break;
2072
2073         case SCST_CMD_STATE_NEED_THREAD_CTX:
2074                 TRACE_DBG("Dev handler %s dev_done() requested "
2075                       "thread context, rescheduling",
2076                       cmd->dev->handler->name);
2077                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2078                 break;
2079
2080         default:
2081                 if (state >= 0) {
2082                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2083                                 "invalid cmd state %d", 
2084                                 cmd->dev->handler->name, state);
2085                 } else {
2086                         PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2087                                 "error %d", cmd->dev->handler->name, 
2088                                 state);
2089                 }
2090                 scst_set_cmd_error(cmd,
2091                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2092                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2093                 res = SCST_CMD_STATE_RES_CONT_SAME;
2094                 break;
2095         }
2096
2097 out:
2098         TRACE_EXIT_HRES(res);
2099         return res;
2100 }
2101
2102 static int scst_xmit_response(struct scst_cmd *cmd)
2103 {
2104         int res, rc;
2105         int atomic = scst_cmd_atomic(cmd);
2106
2107         TRACE_ENTRY();
2108
2109         /* 
2110          * Check here also in order to avoid unnecessary delays of other
2111          * commands.
2112          */
2113         if (unlikely(cmd->sent_to_midlev == 0) &&
2114             (cmd->tgt_dev != NULL))
2115         {
2116                 TRACE(TRACE_SCSI_SERIALIZING,
2117                       "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2118                 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2119                 cmd->sent_to_midlev = 1;
2120         }
2121
2122         if (atomic && !cmd->tgtt->xmit_response_atomic) {
2123                 TRACE_DBG("%s", "xmit_response() can not be "
2124                       "called in atomic context, rescheduling to the thread");
2125                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2126                 goto out;
2127         }
2128
2129         set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2130         smp_mb__after_set_bit();
2131
2132         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2133                 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2134                         TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2135                                 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2136                         scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2137                 }
2138         }
2139
2140         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2141                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2142                         cmd, cmd->tag);
2143                 cmd->state = SCST_CMD_STATE_FINISHED;
2144                 res = SCST_CMD_STATE_RES_CONT_SAME;
2145                 goto out;
2146         }
2147
2148 #ifdef DEBUG_TM
2149         if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2150                 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2151                         TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2152                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2153                         goto out;
2154                 }
2155                 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2156                         cmd, cmd->tag);
2157                 schedule_timeout_uninterruptible(HZ);
2158         }
2159 #endif
2160
2161         while (1) {
2162                 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2163
2164                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2165                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2166
2167                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2168
2169 #if defined(DEBUG) || defined(TRACING)
2170                 if (cmd->sg) {
2171                         int i;
2172                         struct scatterlist *sg = cmd->sg;
2173                         TRACE(TRACE_SEND_BOT, 
2174                               "Xmitting %d S/G(s) at %p sg[0].page at %p",
2175                               cmd->sg_cnt, sg, (void*)sg[0].page);
2176                         for(i = 0; i < cmd->sg_cnt; ++i) {
2177                                 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2178                                     "Xmitting sg:", page_address(sg[i].page),
2179                                     sg[i].length);
2180                         }
2181                 }
2182 #endif
2183
2184 #ifdef DEBUG_RETRY
2185                 if (((scst_random() % 100) == 77))
2186                         rc = SCST_TGT_RES_QUEUE_FULL;
2187                 else
2188 #endif
2189                         rc = cmd->tgtt->xmit_response(cmd);
2190                 TRACE_DBG("xmit_response() returned %d", rc);
2191
2192                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2193                         goto out;
2194
2195                 /* Restore the previous state */
2196                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2197
2198                 switch (rc) {
2199                 case SCST_TGT_RES_QUEUE_FULL:
2200                 {
2201                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2202                                 break;
2203                         else
2204                                 continue;
2205                 }
2206
2207                 case SCST_TGT_RES_NEED_THREAD_CTX:
2208                 {
2209                         TRACE_DBG("Target driver %s xmit_response() "
2210                               "requested thread context, rescheduling",
2211                               cmd->tgtt->name);
2212                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2213                         break;
2214                 }
2215
2216                 default:
2217                         goto out_error;
2218                 }
2219                 break;
2220         }
2221
2222 out:
2223         /* Caution: cmd can be already dead here */
2224         TRACE_EXIT_HRES(res);
2225         return res;
2226
2227 out_error:
2228         if (rc == SCST_TGT_RES_FATAL_ERROR) {
2229                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2230                         "fatal error", cmd->tgtt->name);
2231         } else {
2232                 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2233                         "invalid value %d", cmd->tgtt->name, rc);
2234         }
2235         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2236         cmd->state = SCST_CMD_STATE_FINISHED;
2237         res = SCST_CMD_STATE_RES_CONT_SAME;
2238         goto out;
2239 }
2240
2241 static int scst_finish_cmd(struct scst_cmd *cmd)
2242 {
2243         int res;
2244
2245         TRACE_ENTRY();
2246
2247         if (cmd->mem_checked) {
2248                 spin_lock_bh(&scst_cmd_mem_lock);
2249                 scst_cur_cmd_mem -= cmd->bufflen;
2250                 spin_unlock_bh(&scst_cmd_mem_lock);
2251         }
2252
2253         spin_lock_irq(&scst_list_lock);
2254
2255         TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2256         list_del(&cmd->cmd_list_entry);
2257
2258         if (cmd->mgmt_cmnd)
2259                 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2260
2261         if (likely(cmd->tgt_dev != NULL))
2262                 cmd->tgt_dev->cmd_count--;
2263
2264         cmd->sess->sess_cmd_count--;
2265
2266         list_del(&cmd->search_cmd_list_entry);
2267
2268         spin_unlock_irq(&scst_list_lock);
2269
2270         scst_free_cmd(cmd);
2271
2272         res = SCST_CMD_STATE_RES_CONT_NEXT;
2273
2274         TRACE_EXIT_HRES(res);
2275         return res;
2276 }
2277
2278 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2279 {
2280         int res = 0;
2281         unsigned long flags;
2282         int context;
2283
2284         TRACE_ENTRY();
2285
2286         BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2287
2288         if (in_irq())
2289                 context = SCST_CONTEXT_TASKLET;
2290         else
2291                 context = scst_get_context();
2292
2293         TRACE_DBG("Context: %d", context);
2294         cmd->non_atomic_only = 0;
2295         cmd->state = SCST_CMD_STATE_FINISHED;
2296
2297         switch (context) {
2298         case SCST_CONTEXT_DIRECT:
2299         case SCST_CONTEXT_DIRECT_ATOMIC:
2300                 flags = 0;
2301                 scst_check_retries(cmd->tgt, 0);
2302                 res = __scst_process_active_cmd(cmd, context, 0);
2303                 BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
2304                 break;
2305
2306         case SCST_CONTEXT_TASKLET:
2307         {
2308                 spin_lock_irqsave(&scst_list_lock, flags);
2309                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2310                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2311                 spin_unlock_irqrestore(&scst_list_lock, flags);
2312                 scst_schedule_tasklet();
2313                 scst_check_retries(cmd->tgt, 0);
2314                 break;
2315         }
2316
2317         default:
2318                 BUG();
2319                 break;
2320         }
2321
2322         TRACE_EXIT();
2323         return;
2324 }
2325
2326 /*
2327  * Returns 0 on success, > 0 when we need to wait for unblock,
2328  * < 0 if there is no device (lun) or device type handler.
2329  * Called under scst_list_lock and IRQs disabled
2330  */
2331 static int scst_translate_lun(struct scst_cmd *cmd)
2332 {
2333         struct scst_tgt_dev *tgt_dev = NULL;
2334         int res = 0;
2335
2336         TRACE_ENTRY();
2337
2338         scst_inc_cmd_count();   
2339
2340         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2341                 res = -1;
2342                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2343                       (uint64_t)cmd->lun);
2344                 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2345                                     sess_tgt_dev_list_entry) 
2346                 {
2347                         if (tgt_dev->acg_dev->lun == cmd->lun) {
2348                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2349
2350                                 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2351                                         PRINT_INFO_PR("Dev handler for device "
2352                                           "%Ld is NULL, the device will not be "
2353                                           "visible remotely", (uint64_t)cmd->lun);
2354                                         break;
2355                                 }
2356                                 
2357                                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2358                                         cmd->tgt_dev_saved->cmd_count--;
2359                                         TRACE(TRACE_SCSI_SERIALIZING,
2360                                               "SCST_CMD_STATE_REINIT: "
2361                                               "incrementing expected_sn on tgt_dev_saved %p",
2362                                               cmd->tgt_dev_saved);
2363                                         scst_inc_expected_sn_unblock(
2364                                                 cmd->tgt_dev_saved, cmd, 1);
2365                                 }
2366                                 cmd->tgt_dev = tgt_dev;
2367                                 tgt_dev->cmd_count++;
2368                                 cmd->dev = tgt_dev->acg_dev->dev;
2369
2370                                 /* ToDo: cmd->queue_type */
2371
2372                                 /* scst_list_lock is enough to protect that */
2373                                 cmd->sn = tgt_dev->next_sn;
2374                                 tgt_dev->next_sn++;
2375
2376                                 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2377                                         "cmd->sn: %d", cmd->sn);
2378
2379                                 res = 0;
2380                                 break;
2381                         }
2382                 }
2383                 if (res != 0) {
2384                         TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2385                                 "unexisting LU?", (uint64_t)cmd->lun);
2386                         scst_dec_cmd_count();
2387                 }
2388         } else {
2389                 if ( !cmd->sess->waiting) {
2390                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2391                               cmd->sess);
2392                         list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2393                                       &scst_dev_wait_sess_list);
2394                         cmd->sess->waiting = 1;
2395                 }
2396                 scst_dec_cmd_count();
2397                 res = 1;
2398         }
2399
2400         TRACE_EXIT_RES(res);
2401         return res;
2402 }
2403
2404 /* Called under scst_list_lock and IRQs disabled */
2405 static int scst_process_init_cmd(struct scst_cmd *cmd)
2406 {
2407         int res = 0;
2408
2409         TRACE_ENTRY();
2410
2411         res = scst_translate_lun(cmd);
2412         if (likely(res == 0)) {
2413                 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2414                 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2415                         TRACE(TRACE_RETRY, "Too many pending commands in "
2416                                 "session, returning BUSY to initiator \"%s\"",
2417                                 (cmd->sess->initiator_name[0] == '\0') ?
2418                                   "Anonymous" : cmd->sess->initiator_name);
2419                         scst_set_busy(cmd);
2420                         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2421                 }
2422                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2423                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2424         } else if (res < 0) {
2425                 TRACE_DBG("Finishing cmd %p", cmd);
2426                 scst_set_cmd_error(cmd,
2427                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2428                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2429                 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2430                 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2431         }
2432
2433         TRACE_EXIT_RES(res);
2434         return res;
2435 }
2436
2437 /* 
2438  * Called under scst_list_lock and IRQs disabled
2439  * We don't drop it anywhere inside, because command execution
2440  * have to be serialized, i.e. commands must be executed in order
2441  * of their arrival, and we set this order inside scst_translate_lun().
2442  */
2443 static int scst_do_job_init(struct list_head *init_cmd_list)
2444 {
2445         int res = 1;
2446
2447         TRACE_ENTRY();
2448
2449         if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2450                 while (!list_empty(init_cmd_list)) {
2451                         struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2452                                                           typeof(*cmd),
2453                                                           cmd_list_entry);
2454                         res = scst_process_init_cmd(cmd);
2455                         if (res > 0)
2456                                 break;
2457                 }
2458         }
2459
2460         TRACE_EXIT_RES(res);
2461         return res;
2462 }
2463
2464 /* Called with no locks held */
2465 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2466         int left_locked)
2467 {
2468         int res;
2469
2470         TRACE_ENTRY();
2471
2472         BUG_ON(in_irq());
2473
2474         cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2475                         SCST_CONTEXT_DIRECT_ATOMIC);
2476         cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2477
2478         do {
2479                 switch (cmd->state) {
2480                 case SCST_CMD_STATE_DEV_PARSE:
2481                         res = scst_parse_cmd(cmd);
2482                         break;
2483
2484                 case SCST_CMD_STATE_PREPARE_SPACE:
2485                         res = scst_prepare_space(cmd);
2486                         break;
2487
2488                 case SCST_CMD_STATE_RDY_TO_XFER:
2489                         res = scst_rdy_to_xfer(cmd);
2490                         break;
2491
2492                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2493                         res = scst_send_to_midlev(cmd);
2494                         /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2495                         break;
2496
2497                 case SCST_CMD_STATE_DEV_DONE:
2498                         res = scst_dev_done(cmd);
2499                         break;
2500
2501                 case SCST_CMD_STATE_XMIT_RESP:
2502                         res = scst_xmit_response(cmd);
2503                         break;
2504
2505                 case SCST_CMD_STATE_FINISHED:
2506                         res = scst_finish_cmd(cmd);
2507                         break;
2508
2509                 default:
2510                         PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2511                                cmd, cmd->state);
2512                         BUG();
2513                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2514                         break;
2515                 }
2516         } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2517
2518         if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2519                 if (left_locked)
2520                         spin_lock_irq(&scst_list_lock);
2521         } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2522                 spin_lock_irq(&scst_list_lock);
2523
2524                 switch (cmd->state) {
2525                 case SCST_CMD_STATE_DEV_PARSE:
2526                 case SCST_CMD_STATE_PREPARE_SPACE:
2527                 case SCST_CMD_STATE_RDY_TO_XFER:
2528                 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2529                 case SCST_CMD_STATE_DEV_DONE:
2530                 case SCST_CMD_STATE_XMIT_RESP:
2531                 case SCST_CMD_STATE_FINISHED:
2532                         TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2533                         list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2534                         break;
2535 #ifdef EXTRACHECKS
2536                 /* not very valid commands */
2537                 case SCST_CMD_STATE_DEFAULT:
2538                 case SCST_CMD_STATE_NEED_THREAD_CTX:
2539                         PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2540                                 "useful list (left on scst cmd list)", cmd, 
2541                                 cmd->state);
2542                         spin_unlock_irq(&scst_list_lock);
2543                         BUG();
2544                         spin_lock_irq(&scst_list_lock);
2545                         break;
2546 #endif
2547                 default:
2548                         break;
2549                 }
2550                 cmd->non_atomic_only = 1;
2551                 if (!left_locked)
2552                         spin_unlock_irq(&scst_list_lock);
2553                 wake_up(&scst_list_waitQ);
2554         } else if (res == SCST_CMD_STATE_RES_RESTART) {
2555                 if (cmd->state == SCST_CMD_STATE_REINIT) {
2556                         spin_lock_irq(&scst_list_lock);
2557                         TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2558                         list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2559                         if (!left_locked)
2560                                 spin_unlock_irq(&scst_list_lock);
2561                 } else
2562                         BUG();
2563         } else
2564                 BUG();
2565
2566         TRACE_EXIT_RES(res);
2567         return res;
2568 }
2569
2570 /* Called under scst_list_lock and IRQs disabled */
2571 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2572 {
2573         int res;
2574         struct scst_cmd *cmd;
2575         int atomic = ((context & ~SCST_PROCESSIBLE_ENV) == 
2576                         SCST_CONTEXT_DIRECT_ATOMIC);
2577
2578         TRACE_ENTRY();
2579
2580         tm_dbg_check_released_cmds();
2581
2582 restart:
2583         list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2584                 if (atomic && cmd->non_atomic_only) {
2585                         TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2586                         continue;
2587                 }
2588                 if (tm_dbg_check_cmd(cmd) != 0)
2589                         goto restart;
2590                 res = scst_process_active_cmd(cmd, context, NULL, 1);
2591                 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2592                         goto restart;
2593                 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2594                         goto restart;
2595                 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2596                         break;
2597                 } else
2598                         BUG();
2599         }
2600
2601         TRACE_EXIT();
2602         return;
2603 }
2604
2605 static inline int test_cmd_lists(void)
2606 {
2607         int res = !list_empty(&scst_active_cmd_list) ||
2608             (!list_empty(&scst_init_cmd_list) &&
2609              !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2610             test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2611             unlikely(scst_shut_threads_count > 0) ||
2612             tm_dbg_is_release();
2613         return res;
2614 }
2615
2616 int scst_cmd_thread(void *arg)
2617 {
2618         static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2619         int n;
2620
2621         TRACE_ENTRY();
2622
2623         spin_lock(&lock);
2624         n = scst_thread_num++;
2625         spin_unlock(&lock);
2626         daemonize("scsi_tgt%d", n);
2627         recalc_sigpending();
2628         set_user_nice(current, 10);
2629         current->flags |= PF_NOFREEZE;
2630
2631         spin_lock_irq(&scst_list_lock);
2632         while (1) {
2633                 wait_queue_t wait;
2634                 init_waitqueue_entry(&wait, current);
2635
2636                 if (!test_cmd_lists()) {
2637                         add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2638                         for (;;) {
2639                                 set_current_state(TASK_INTERRUPTIBLE);
2640                                 if (test_cmd_lists())
2641                                         break;
2642                                 spin_unlock_irq(&scst_list_lock);
2643                                 schedule();
2644                                 spin_lock_irq(&scst_list_lock);
2645                         }
2646                         set_current_state(TASK_RUNNING);
2647                         remove_wait_queue(&scst_list_waitQ, &wait);
2648                 }
2649
2650                 scst_do_job_init(&scst_init_cmd_list);
2651                 scst_do_job_active(&scst_active_cmd_list,
2652                                    SCST_CONTEXT_THREAD|SCST_PROCESSIBLE_ENV);
2653
2654                 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2655                     list_empty(&scst_cmd_list) &&
2656                     list_empty(&scst_active_cmd_list) &&
2657                     list_empty(&scst_init_cmd_list)) {
2658                         break;
2659                 }
2660                 
2661                 if (unlikely(scst_shut_threads_count > 0)) {
2662                         scst_shut_threads_count--;
2663                         break;
2664                 }
2665         }
2666         spin_unlock_irq(&scst_list_lock);
2667
2668         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2669                 smp_mb__after_atomic_dec();
2670                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2671                 up(scst_shutdown_mutex);
2672         }
2673
2674         TRACE_EXIT();
2675         return 0;
2676 }
2677
2678 void scst_cmd_tasklet(long p)
2679 {
2680         TRACE_ENTRY();
2681
2682         spin_lock_irq(&scst_list_lock);
2683
2684         scst_do_job_init(&scst_init_cmd_list);
2685         scst_do_job_active(&scst_active_cmd_list, 
2686                 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2687
2688         spin_unlock_irq(&scst_list_lock);
2689
2690         TRACE_EXIT();
2691         return;
2692 }
2693
2694 /*
2695  * Returns 0 on success, < 0 if there is no device handler or
2696  * > 0 if SCST_FLAG_SUSPENDED set.
2697  */
2698 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2699 {
2700         struct scst_tgt_dev *tgt_dev = NULL;
2701         int res = -1;
2702
2703         TRACE_ENTRY();
2704
2705         TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2706               (uint64_t)mcmd->lun);
2707
2708         spin_lock_irq(&scst_list_lock);
2709         scst_inc_cmd_count();   
2710         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2711                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2712                                     sess_tgt_dev_list_entry) 
2713                 {
2714                         if (tgt_dev->acg_dev->lun == mcmd->lun) {
2715                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
2716                                 mcmd->mcmd_tgt_dev = tgt_dev;
2717                                 res = 0;
2718                                 break;
2719                         }
2720                 }
2721                 if (mcmd->mcmd_tgt_dev == NULL)
2722                         scst_dec_cmd_count();
2723         } else {
2724                 if ( !mcmd->sess->waiting) {
2725                         TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2726                               mcmd->sess);
2727                         list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2728                                       &scst_dev_wait_sess_list);
2729                         mcmd->sess->waiting = 1;
2730                 }
2731                 scst_dec_cmd_count();
2732                 res = 1;
2733         }
2734         spin_unlock_irq(&scst_list_lock);
2735
2736         TRACE_EXIT_HRES(res);
2737         return res;
2738 }
2739
2740 /* Called under scst_list_lock and IRQ off */
2741 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2742         struct scst_mgmt_cmd *mcmd)
2743 {
2744         TRACE_ENTRY();
2745
2746         TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2747                 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2748                 mcmd->cmd_wait_count);
2749
2750         cmd->mgmt_cmnd = NULL;
2751
2752         if (cmd->completed)
2753                 mcmd->completed_cmd_count++;
2754
2755         mcmd->cmd_wait_count--;
2756         if (mcmd->cmd_wait_count > 0) {
2757                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2758                         mcmd->cmd_wait_count);
2759                 goto out;
2760         }
2761
2762         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2763
2764         if (mcmd->completed) {
2765                 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2766                         mcmd);
2767                 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2768                         &scst_active_mgmt_cmd_list);
2769         }
2770
2771         wake_up(&scst_mgmt_cmd_list_waitQ);
2772
2773 out:
2774         TRACE_EXIT();
2775         return;
2776 }
2777
2778 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2779         struct scst_tgt_dev *tgt_dev, int set_status)
2780 {
2781         int res = SCST_DEV_TM_NOT_COMPLETED;
2782         if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2783                 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2784                       tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2785                 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd, 
2786                         tgt_dev);
2787                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2788                       tgt_dev->acg_dev->dev->handler->name, res);
2789                 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2790                         mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ? 
2791                                                 SCST_MGMT_STATUS_SUCCESS :
2792                                                 SCST_MGMT_STATUS_FAILED;
2793                 }
2794         }
2795         return res;
2796 }
2797
2798 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2799 {
2800         switch(mgmt_fn) {
2801                 case SCST_ABORT_TASK:
2802                 case SCST_ABORT_TASK_SET:
2803                 case SCST_CLEAR_TASK_SET:
2804                         return 1;
2805                 default:
2806                         return 0;
2807         }
2808 }
2809
2810 /* 
2811  * Called under scst_list_lock and IRQ off (to protect cmd
2812  * from being destroyed).
2813  * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2814  */
2815 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2816         int other_ini, int call_dev_task_mgmt_fn)
2817 {
2818         TRACE_ENTRY();
2819
2820         TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2821
2822         if (other_ini) {
2823                 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2824                 smp_mb__after_set_bit();
2825         }
2826         set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2827         smp_mb__after_set_bit();
2828
2829         if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2830                  scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2831
2832         if (mcmd) {
2833                 int defer;
2834                 if (cmd->tgtt->tm_sync_reply)
2835                         defer = 1;
2836                 else {
2837                         if (scst_is_strict_mgmt_fn(mcmd->fn))
2838                                 defer = test_bit(SCST_CMD_EXECUTING,
2839                                         &cmd->cmd_flags);
2840                         else
2841                                 defer = test_bit(SCST_CMD_XMITTING,
2842                                         &cmd->cmd_flags);
2843                 }
2844
2845                 if (defer) {
2846                         /*
2847                          * Delay the response until the command's finish in
2848                          * order to guarantee that "no further responses from
2849                          * the task are sent to the SCSI initiator port" after
2850                          * response from the TM function is sent (SAM)
2851                          */
2852                         TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2853                                 "xmitted (state %d), deferring ABORT...", cmd,
2854                                 cmd->tag, cmd->state);
2855 #ifdef EXTRACHECKS
2856                         if (cmd->mgmt_cmnd) {
2857                                 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2858                                         "has non-NULL mgmt_cmnd %p!!! Current "
2859                                         "mcmd %p\n", cmd, cmd->tag, cmd->state,
2860                                         cmd->mgmt_cmnd, mcmd);
2861                         }
2862 #endif
2863                         BUG_ON(cmd->mgmt_cmnd);
2864                         mcmd->cmd_wait_count++;
2865                         cmd->mgmt_cmnd = mcmd;
2866                 }
2867         }
2868
2869         tm_dbg_release_cmd(cmd);
2870
2871         TRACE_EXIT();
2872         return;
2873 }
2874
2875 /* Called under scst_list_lock and IRQ off */
2876 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2877 {
2878         int res;
2879         if (mcmd->cmd_wait_count != 0) {
2880                 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2881                         "wait", mcmd->cmd_wait_count);
2882                 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2883                 res = -1;
2884         } else {
2885                 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2886                 res = 0;
2887         }
2888         mcmd->completed = 1;
2889         return res;
2890 }
2891
2892 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2893 {
2894         struct scst_device *dev;
2895         int wake = 0;
2896
2897         TRACE_ENTRY();
2898
2899         if (!scst_mutex_held)
2900                 down(&scst_mutex);
2901
2902         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2903                 struct scst_cmd *cmd, *tcmd;
2904                 spin_lock_bh(&dev->dev_lock);
2905                 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2906                                         blocked_cmd_list_entry) {
2907                         if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2908                                 list_del(&cmd->blocked_cmd_list_entry);
2909                                 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2910                                         "to active cmd list", cmd);
2911                                 spin_lock_irq(&scst_list_lock);
2912                                 list_move_tail(&cmd->cmd_list_entry,
2913                                         &scst_active_cmd_list);
2914                                 spin_unlock_irq(&scst_list_lock);
2915                                 wake = 1;
2916                         }
2917                 }
2918                 spin_unlock_bh(&dev->dev_lock);
2919         }
2920
2921         if (!scst_mutex_held)
2922                 up(&scst_mutex);
2923
2924         if (wake)
2925                 wake_up(&scst_list_waitQ);
2926
2927         TRACE_EXIT();
2928         return;
2929 }
2930
2931 /* Returns 0 if the command processing should be continued, <0 otherwise */
2932 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2933         struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2934 {
2935         struct scst_cmd *cmd;
2936         struct scst_session *sess = tgt_dev->sess;
2937
2938         TRACE_ENTRY();
2939
2940         spin_lock_irq(&scst_list_lock);
2941
2942         TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2943         list_for_each_entry(cmd, &sess->search_cmd_list, 
2944                         search_cmd_list_entry) {
2945                 if ((cmd->tgt_dev == NULL) && 
2946                     (cmd->lun == tgt_dev->acg_dev->lun))
2947                         continue;
2948                 if (cmd->tgt_dev != tgt_dev)
2949                         continue;
2950                 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2951         }
2952         spin_unlock_irq(&scst_list_lock);
2953
2954         scst_unblock_aborted_cmds(scst_mutex_held);
2955
2956         TRACE_EXIT();
2957         return;
2958 }
2959
2960 /* Returns 0 if the command processing should be continued, <0 otherwise */
2961 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2962 {
2963         int res;
2964         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2965         struct scst_device *dev = tgt_dev->acg_dev->dev;
2966
2967         TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2968                 tgt_dev->acg_dev->lun, mcmd);
2969
2970         spin_lock_bh(&dev->dev_lock);
2971         __scst_block_dev(dev);
2972         spin_unlock_bh(&dev->dev_lock);
2973
2974         __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2975         scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2976
2977         res = scst_set_mcmd_next_state(mcmd);
2978
2979         TRACE_EXIT_RES(res);
2980         return res;
2981 }
2982
2983 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
2984 {
2985         /*
2986          * No need for special protection for SCST_FLAG_TM_ACTIVE, since
2987          * we could be called from the only thread.
2988          */
2989         if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
2990                 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
2991                         mcmd);
2992                 if (!locked)
2993                         spin_lock_irq(&scst_list_lock);
2994                 list_move_tail(&mcmd->mgmt_cmd_list_entry, 
2995                         &scst_delayed_mgmt_cmd_list);
2996                 if (!locked)
2997                         spin_unlock_irq(&scst_list_lock);
2998                 return -1;
2999         } else {
3000                 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3001                 return 0;
3002         }
3003 }
3004
3005 /* Returns 0 if the command processing should be continued, 
3006  * >0, if it should be requeued, <0 otherwise */
3007 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3008 {
3009         int res = 0;
3010
3011         TRACE_ENTRY();
3012
3013         res = scst_check_delay_mgmt_cmd(mcmd, 1);
3014         if (res != 0)
3015                 goto out;
3016
3017         if (mcmd->fn == SCST_ABORT_TASK) {
3018                 struct scst_session *sess = mcmd->sess;
3019                 struct scst_cmd *cmd;
3020
3021                 spin_lock_irq(&scst_list_lock);
3022                 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3023                 if (cmd == NULL) {
3024                         TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3025                                 "tag %d not found", mcmd->tag);
3026                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3027                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3028                 } else {
3029                         TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
3030                                 "aborting it", cmd, mcmd->tag, cmd->sn);
3031                         mcmd->cmd_to_abort = cmd;
3032                         scst_abort_cmd(cmd, mcmd, 0, 1);
3033                         res = scst_set_mcmd_next_state(mcmd);
3034                         mcmd->cmd_to_abort = NULL; /* just in case */
3035                 }
3036                 spin_unlock_irq(&scst_list_lock);
3037         } else {
3038                 int rc;
3039                 rc = scst_mgmt_translate_lun(mcmd);
3040                 if (rc < 0) {
3041                         PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3042                                 "found", (uint64_t)mcmd->lun);
3043                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3044                         mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3045                 } else if (rc == 0)
3046                         mcmd->state = SCST_MGMT_CMD_STATE_READY;
3047                 else
3048                         res = rc;
3049         }
3050
3051 out:
3052         TRACE_EXIT_RES(res);
3053         return res;
3054 }
3055
3056 /* Returns 0 if the command processing should be continued, <0 otherwise */
3057 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3058 {
3059         int res, rc;
3060         struct scst_device *dev, *d;
3061         struct scst_tgt_dev *tgt_dev;
3062         int cont, c;
3063         LIST_HEAD(host_devs);
3064
3065         TRACE_ENTRY();
3066
3067         TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3068                 mcmd, mcmd->sess->sess_cmd_count);
3069
3070         down(&scst_mutex);
3071
3072         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3073                 int found = 0;
3074
3075                 spin_lock_bh(&dev->dev_lock);
3076                 __scst_block_dev(dev);
3077                 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3078                 spin_unlock_bh(&dev->dev_lock);
3079
3080                 cont = 0;
3081                 c = 0;
3082                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3083                         dev_tgt_dev_list_entry) 
3084                 {
3085                         cont = 1;
3086                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3087                         if (rc == SCST_DEV_TM_NOT_COMPLETED) 
3088                                 c = 1;
3089                         else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3090                                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3091                 }
3092                 if (cont && !c)
3093                         continue;
3094                 
3095                 if (dev->scsi_dev == NULL)
3096                         continue;
3097
3098                 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3099                         if (dev->scsi_dev->host->host_no ==
3100                                     d->scsi_dev->host->host_no) 
3101                         {
3102                                 found = 1;
3103                                 break;
3104                         }
3105                 }
3106                 if (!found)
3107                         list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3108         }
3109
3110         /*
3111          * We suppose here that for all commands that already on devices
3112          * on/after scsi_reset_provider() completion callbacks will be called.
3113          */
3114
3115         list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3116                 /* dev->scsi_dev must be non-NULL here */
3117                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3118                       dev->scsi_dev->host->host_no);
3119                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3120                 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3121                       dev->scsi_dev->host->host_no,
3122                       (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3123                 if (rc != SUCCESS) {
3124                         /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3125                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3126                 }
3127         }
3128
3129         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3130                 if (dev->scsi_dev != NULL)
3131                         dev->scsi_dev->was_reset = 0;
3132         }
3133
3134         up(&scst_mutex);
3135
3136         spin_lock_irq(&scst_list_lock);
3137         tm_dbg_task_mgmt("TARGET RESET");
3138         res = scst_set_mcmd_next_state(mcmd);
3139         spin_unlock_irq(&scst_list_lock);
3140
3141         TRACE_EXIT_RES(res);
3142         return res;
3143 }
3144
3145 /* Returns 0 if the command processing should be continued, <0 otherwise */
3146 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3147 {
3148         int res, rc;
3149         struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3150         struct scst_device *dev = tgt_dev->acg_dev->dev;
3151
3152         TRACE_ENTRY();
3153
3154         TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3155                 mcmd);
3156
3157         spin_lock_bh(&dev->dev_lock);
3158         __scst_block_dev(dev);
3159         scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3160         spin_unlock_bh(&dev->dev_lock);
3161
3162         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3163         if (rc != SCST_DEV_TM_NOT_COMPLETED)
3164                 goto out_tm_dbg;
3165
3166         if (dev->scsi_dev != NULL) {
3167                 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3168                       dev->scsi_dev->host->host_no);
3169                 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3170                 if (rc != SUCCESS)
3171                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3172                 dev->scsi_dev->was_reset = 0;
3173         }
3174
3175 out_tm_dbg:
3176         spin_lock_irq(&scst_list_lock);
3177         tm_dbg_task_mgmt("LUN RESET");
3178         res = scst_set_mcmd_next_state(mcmd);
3179         spin_unlock_irq(&scst_list_lock);
3180
3181         TRACE_EXIT_RES(res);
3182         return res;
3183 }
3184
3185 /* Returns 0 if the command processing should be continued, <0 otherwise */
3186 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3187         int nexus_loss)
3188 {
3189         int res;
3190         struct scst_session *sess = mcmd->sess;
3191         struct scst_tgt_dev *tgt_dev;
3192
3193         TRACE_ENTRY();
3194
3195         if (nexus_loss) {
3196                 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3197                         mcmd);
3198         } else {
3199                 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3200                         mcmd);
3201         }
3202
3203         down(&scst_mutex);
3204         list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3205                 sess_tgt_dev_list_entry) 
3206         {
3207                 struct scst_device *dev = tgt_dev->acg_dev->dev;
3208                 int rc;
3209
3210                 spin_lock_bh(&dev->dev_lock);
3211                 __scst_block_dev(dev);
3212                 spin_unlock_bh(&dev->dev_lock);
3213
3214                 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3215                 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3216                         mcmd->status = SCST_MGMT_STATUS_FAILED;
3217
3218                 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3219                 if (nexus_loss)
3220                         scst_reset_tgt_dev(tgt_dev, 1);
3221         }
3222         up(&scst_mutex);
3223
3224         spin_lock_irq(&scst_list_lock);
3225         res = scst_set_mcmd_next_state(mcmd);
3226         spin_unlock_irq(&scst_list_lock);
3227
3228         TRACE_EXIT_RES(res);
3229         return res;
3230 }
3231
3232 /* Returns 0 if the command processing should be continued, <0 otherwise */
3233 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3234         int nexus_loss)
3235 {
3236         int res;
3237         struct scst_tgt *tgt = mcmd->sess->tgt;
3238         struct scst_session *sess;
3239         struct scst_device *dev;
3240         struct scst_tgt_dev *tgt_dev;
3241
3242         TRACE_ENTRY();
3243
3244         if (nexus_loss) {
3245                 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3246                         mcmd);
3247         } else {
3248                 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3249                         mcmd);
3250         }
3251
3252         down(&scst_mutex);
3253
3254         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3255                 spin_lock_bh(&dev->dev_lock);
3256                 __scst_block_dev(dev);
3257                 spin_unlock_bh(&dev->dev_lock);
3258         }
3259
3260         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3261                 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3262                         sess_tgt_dev_list_entry) 
3263                 {
3264                         int rc;
3265
3266                         rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3267                         if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3268                                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3269
3270                         __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3271                         if (nexus_loss)
3272                                 scst_reset_tgt_dev(tgt_dev, 1);
3273                 }
3274         }
3275
3276         up(&scst_mutex);
3277
3278         spin_lock_irq(&scst_list_lock);
3279         res = scst_set_mcmd_next_state(mcmd);
3280         spin_unlock_irq(&scst_list_lock);
3281
3282         TRACE_EXIT_RES(res);
3283         return res;
3284 }
3285
3286 /* Returns 0 if the command processing should be continued, <0 otherwise */
3287 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3288 {
3289         int res = 0;
3290
3291         TRACE_ENTRY();
3292
3293         mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3294
3295         switch (mcmd->fn) {
3296         case SCST_ABORT_TASK_SET:
3297         case SCST_CLEAR_TASK_SET:
3298                 res = scst_abort_task_set(mcmd);
3299                 break;
3300
3301         case SCST_LUN_RESET:
3302                 res = scst_lun_reset(mcmd);
3303                 break;
3304
3305         case SCST_TARGET_RESET:
3306                 res = scst_target_reset(mcmd);
3307                 break;
3308
3309         case SCST_ABORT_ALL_TASKS_SESS:
3310                 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3311                 break;
3312
3313         case SCST_NEXUS_LOSS_SESS:
3314                 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3315                 break;
3316
3317         case SCST_ABORT_ALL_TASKS:
3318                 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3319                 break;
3320
3321         case SCST_NEXUS_LOSS:
3322                 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3323                 break;
3324
3325         case SCST_CLEAR_ACA:
3326                 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3327                 /* Nothing to do (yet) */
3328                 break;
3329
3330         default:
3331                 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3332                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3333                 break;
3334         }
3335
3336         TRACE_EXIT_RES(res);
3337         return res;
3338 }
3339
3340 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3341 {
3342         struct scst_device *dev;
3343         struct scst_tgt_dev *tgt_dev;
3344
3345         TRACE_ENTRY();
3346
3347         clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3348         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3349                 struct scst_mgmt_cmd *m;
3350                 spin_lock_irq(&scst_list_lock);
3351                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3352                                 mgmt_cmd_list_entry);
3353                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3354                         "cmd list", m);
3355                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3356                 spin_unlock_irq(&scst_list_lock);
3357         }
3358
3359         mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3360         if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3361                 mcmd->status = SCST_MGMT_STATUS_FAILED;
3362
3363         if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3364                 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3365                       mcmd->sess->tgt->tgtt->name);
3366                 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3367                 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3368                       mcmd->sess->tgt->tgtt->name);
3369         }
3370
3371         switch (mcmd->fn) {
3372         case SCST_ABORT_TASK_SET:
3373         case SCST_CLEAR_TASK_SET:
3374         case SCST_LUN_RESET:
3375                 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3376                 break;
3377
3378         case SCST_TARGET_RESET:
3379         case SCST_ABORT_ALL_TASKS:
3380         case SCST_NEXUS_LOSS:
3381                 down(&scst_mutex);
3382                 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3383                         scst_unblock_dev(dev);
3384                 }
3385                 up(&scst_mutex);
3386                 break;
3387
3388         case SCST_NEXUS_LOSS_SESS:
3389         case SCST_ABORT_ALL_TASKS_SESS:
3390                 down(&scst_mutex);
3391                 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3392                                 sess_tgt_dev_list_entry) {
3393                         scst_unblock_dev(tgt_dev->acg_dev->dev);
3394                 }
3395                 up(&scst_mutex);
3396                 break;
3397
3398         case SCST_CLEAR_ACA:
3399         default:
3400                 break;
3401         }
3402
3403         mcmd->tgt_priv = NULL;
3404
3405         TRACE_EXIT();
3406         return;
3407 }
3408
3409 /* Returns >0, if cmd should be requeued */
3410 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3411 {
3412         int res = 0;
3413
3414         TRACE_ENTRY();
3415
3416         TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3417
3418         while (1) {
3419                 switch (mcmd->state) {
3420                 case SCST_MGMT_CMD_STATE_INIT:
3421                         res = scst_mgmt_cmd_init(mcmd);
3422                         if (res)
3423                                 goto out;
3424                         break;
3425
3426                 case SCST_MGMT_CMD_STATE_READY:
3427                         if (scst_mgmt_cmd_exec(mcmd))
3428                                 goto out;
3429                         break;
3430
3431                 case SCST_MGMT_CMD_STATE_DONE:
3432                         scst_mgmt_cmd_send_done(mcmd);
3433                         break;
3434
3435                 case SCST_MGMT_CMD_STATE_FINISHED:
3436                         goto out_free;
3437
3438 #ifdef EXTRACHECKS
3439                 case SCST_MGMT_CMD_STATE_EXECUTING:
3440                         BUG();
3441 #endif
3442
3443                 default:
3444                         PRINT_ERROR_PR("Unknown state %d of management command",
3445                                     mcmd->state);
3446                         res = -1;
3447                         goto out_free;
3448                 }
3449         }
3450
3451 out:
3452         TRACE_EXIT_RES(res);
3453         return res;
3454
3455 out_free:
3456         scst_free_mgmt_cmd(mcmd, 1);
3457         goto out;
3458 }
3459
3460 static inline int test_mgmt_cmd_list(void)
3461 {
3462         int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3463                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3464                   test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3465         return res;
3466 }
3467
3468 int scst_mgmt_cmd_thread(void *arg)
3469 {
3470         struct scst_mgmt_cmd *mcmd;
3471
3472         TRACE_ENTRY();
3473
3474         daemonize("scsi_tgt_mc");
3475         recalc_sigpending();
3476         current->flags |= PF_NOFREEZE;
3477
3478         spin_lock_irq(&scst_list_lock);
3479         while (1) {
3480                 wait_queue_t wait;
3481                 init_waitqueue_entry(&wait, current);
3482
3483                 if (!test_mgmt_cmd_list()) {
3484                         add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3485                                                  &wait);
3486                         for (;;) {
3487                                 set_current_state(TASK_INTERRUPTIBLE);
3488                                 if (test_mgmt_cmd_list())
3489                                         break;
3490                                 spin_unlock_irq(&scst_list_lock);
3491                                 schedule();
3492                                 spin_lock_irq(&scst_list_lock);
3493                         }
3494                         set_current_state(TASK_RUNNING);
3495                         remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3496                 }
3497
3498                 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3499                        !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3500                 {
3501                         int rc;
3502                         mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3503                                           typeof(*mcmd), mgmt_cmd_list_entry);
3504                         TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3505                               mcmd);
3506                         list_move_tail(&mcmd->mgmt_cmd_list_entry,
3507                                        &scst_mgmt_cmd_list);
3508                         spin_unlock_irq(&scst_list_lock);
3509                         rc = scst_process_mgmt_cmd(mcmd);
3510                         spin_lock_irq(&scst_list_lock);
3511                         if (rc > 0) {
3512                                 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3513                                         "of active mgmt cmd list", mcmd);
3514                                 list_move(&mcmd->mgmt_cmd_list_entry,
3515                                        &scst_active_mgmt_cmd_list);
3516                         }
3517                 }
3518
3519                 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3520                     list_empty(&scst_active_mgmt_cmd_list)) 
3521                 {
3522                         break;
3523                 }
3524         }
3525         spin_unlock_irq(&scst_list_lock);
3526
3527         if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3528                 smp_mb__after_atomic_dec();
3529                 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3530                 up(scst_shutdown_mutex);
3531         }
3532
3533         TRACE_EXIT();
3534         return 0;
3535 }
3536
3537 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3538         *sess, int fn, int atomic, void *tgt_priv)
3539 {
3540         struct scst_mgmt_cmd *mcmd = NULL;
3541
3542         TRACE_ENTRY();
3543
3544         if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3545                 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3546                             "(target %s)", sess->tgt->tgtt->name);
3547                 goto out;
3548         }
3549
3550         mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3551         if (mcmd == NULL)
3552                 goto out;
3553
3554         mcmd->sess = sess;
3555         mcmd->fn = fn;
3556         mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3557         mcmd->tgt_priv = tgt_priv;
3558
3559 out:
3560         TRACE_EXIT();
3561         return mcmd;
3562 }
3563
3564 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3565         struct scst_mgmt_cmd *mcmd)
3566 {
3567         unsigned long flags;
3568         int res = 0;
3569
3570         TRACE_ENTRY();
3571
3572         scst_sess_get(sess);
3573
3574         spin_lock_irqsave(&scst_list_lock, flags);
3575
3576         sess->sess_cmd_count++;
3577
3578 #ifdef EXTRACHECKS
3579         if (unlikely(sess->shutting_down)) {
3580                 PRINT_ERROR_PR("%s",
3581                         "New mgmt cmd while shutting down the session");
3582                 BUG();
3583         }
3584 #endif
3585
3586         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3587                 switch(sess->init_phase) {
3588                 case SCST_SESS_IPH_INITING:
3589                         TRACE_DBG("Adding mcmd %p to init deferred mcmd list", 
3590                                 mcmd);
3591                         list_add_tail(&mcmd->mgmt_cmd_list_entry, 
3592                                 &sess->init_deferred_mcmd_list);
3593                         goto out_unlock;
3594                 case SCST_SESS_IPH_SUCCESS:
3595                         break;
3596                 case SCST_SESS_IPH_FAILED:
3597                         res = -1;
3598                         goto out_unlock;
3599                 default:
3600                         BUG();
3601                 }
3602         }
3603
3604         TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3605         list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3606
3607         spin_unlock_irqrestore(&scst_list_lock, flags);
3608
3609         wake_up(&scst_mgmt_cmd_list_waitQ);
3610
3611 out:
3612         TRACE_EXIT();
3613         return res;
3614
3615 out_unlock:
3616         spin_unlock_irqrestore(&scst_list_lock, flags);
3617         goto out;
3618 }
3619
3620 /* 
3621  * Must not been called in parallel with scst_unregister_session() for the 
3622  * same sess
3623  */
3624 int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
3625                         const uint8_t *lun, int lun_len, int atomic,
3626                         void *tgt_priv)
3627 {
3628         int res = -EFAULT;
3629         struct scst_mgmt_cmd *mcmd = NULL;
3630