Patch from Alexey Obitotskiy <alexeyo1@open-e.com> with 2 fixes and cleanups implemen...
[mirror/scst/.git] / scst / src / scst_targ.c
1 /*
2  *  scst_targ.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2009 ID7 Ltd.
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/smp_lock.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29 #include <linux/kthread.h>
30 #include <linux/delay.h>
31 #include <linux/ktime.h>
32
33 #include "scst.h"
34 #include "scst_priv.h"
35
36 #if 0 /* Temporary, left for future performance investigations */
37 /* Deleting it don't forget to delete write_cmd_count */
38 #define CONFIG_SCST_ORDERED_READS
39 #endif
40
41 static void scst_cmd_set_sn(struct scst_cmd *cmd);
42 static int __scst_init_cmd(struct scst_cmd *cmd);
43 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
44 static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
45         uint64_t tag);
46 static void scst_process_redirect_cmd(struct scst_cmd *cmd,
47         enum scst_exec_context context, int check_retries);
48
49 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
50 {
51         struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
52         unsigned long flags;
53
54         spin_lock_irqsave(&t->tasklet_lock, flags);
55         TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
56                 smp_processor_id());
57         list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
58         spin_unlock_irqrestore(&t->tasklet_lock, flags);
59
60         tasklet_schedule(&t->tasklet);
61 }
62
63 /*
64  * Must not be called in parallel with scst_unregister_session() for the
65  * same sess
66  */
67 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
68                              const uint8_t *lun, int lun_len,
69                              const uint8_t *cdb, int cdb_len, int atomic)
70 {
71         struct scst_cmd *cmd;
72
73         TRACE_ENTRY();
74
75 #ifdef CONFIG_SCST_EXTRACHECKS
76         if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
77                 PRINT_CRIT_ERROR("%s",
78                         "New cmd while shutting down the session");
79                 sBUG();
80         }
81 #endif
82
83         cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
84         if (cmd == NULL)
85                 goto out;
86
87         cmd->sess = sess;
88         cmd->tgt = sess->tgt;
89         cmd->tgtt = sess->tgt->tgtt;
90
91         /*
92          * For both wrong lun and CDB defer the error reporting for
93          * scst_cmd_init_done()
94          */
95
96         cmd->lun = scst_unpack_lun(lun, lun_len);
97
98         if (cdb_len <= SCST_MAX_CDB_SIZE) {
99                 memcpy(cmd->cdb, cdb, cdb_len);
100                 cmd->cdb_len = cdb_len;
101         }
102
103         TRACE_DBG("cmd %p, sess %p", cmd, sess);
104         scst_sess_get(sess);
105
106 out:
107         TRACE_EXIT();
108         return cmd;
109 }
110 EXPORT_SYMBOL(scst_rx_cmd);
111
112 /*
113  * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
114  * this command should be stopped.
115  */
116 static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
117 {
118         int rc, res = 0;
119
120         TRACE_ENTRY();
121
122         /* See the comment in scst_do_job_init() */
123         if (unlikely(!list_empty(&scst_init_cmd_list))) {
124                 TRACE_MGMT_DBG("%s", "init cmd list busy");
125                 goto out_redirect;
126         }
127         /*
128          * Memory barrier isn't necessary here, because CPU appears to
129          * be self-consistent and we don't care about the race, described
130          * in comment in scst_do_job_init().
131          */
132
133         rc = __scst_init_cmd(cmd);
134         if (unlikely(rc > 0))
135                 goto out_redirect;
136         else if (unlikely(rc != 0)) {
137                 res = 1;
138                 goto out;
139         }
140
141         EXTRACHECKS_BUG_ON(*context == SCST_CONTEXT_SAME);
142
143         /* Small context optimization */
144         if (((*context == SCST_CONTEXT_TASKLET) ||
145              (*context == SCST_CONTEXT_DIRECT_ATOMIC)) &&
146               scst_cmd_is_expected_set(cmd)) {
147                 if (cmd->expected_data_direction & SCST_DATA_WRITE) {
148                         if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
149                                         &cmd->tgt_dev->tgt_dev_flags))
150                                 *context = SCST_CONTEXT_THREAD;
151                 } else {
152                         if (!test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
153                                         &cmd->tgt_dev->tgt_dev_flags))
154                                 *context = SCST_CONTEXT_THREAD;
155                 }
156         }
157
158 out:
159         TRACE_EXIT_RES(res);
160         return res;
161
162 out_redirect:
163         if (cmd->preprocessing_only) {
164                 /*
165                  * Poor man solution for single threaded targets, where
166                  * blocking receiver at least sometimes means blocking all.
167                  * For instance, iSCSI target won't be able to receive
168                  * Data-Out PDUs.
169                  */
170                 sBUG_ON(*context != SCST_CONTEXT_DIRECT);
171                 scst_set_busy(cmd);
172                 scst_set_cmd_abnormal_done_state(cmd);
173                 res = 1;
174                 /* Keep initiator away from too many BUSY commands */
175                 msleep(50);
176         } else {
177                 unsigned long flags;
178                 spin_lock_irqsave(&scst_init_lock, flags);
179                 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
180                         "%d)", cmd, atomic_read(&scst_cmd_count));
181                 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
182                 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
183                         scst_init_poll_cnt++;
184                 spin_unlock_irqrestore(&scst_init_lock, flags);
185                 wake_up(&scst_init_cmd_list_waitQ);
186                 res = -1;
187         }
188         goto out;
189 }
190
191 void scst_cmd_init_done(struct scst_cmd *cmd,
192         enum scst_exec_context pref_context)
193 {
194         unsigned long flags;
195         struct scst_session *sess = cmd->sess;
196         int rc;
197
198         TRACE_ENTRY();
199
200         scst_set_start_time(cmd);
201
202         TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
203         TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
204                 "(cmd %p)", (long long unsigned int)cmd->tag,
205                 (long long unsigned int)cmd->lun, cmd->cdb_len,
206                 cmd->queue_type, cmd);
207         PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
208                 cmd->cdb, cmd->cdb_len);
209
210 #ifdef CONFIG_SCST_EXTRACHECKS
211         if (unlikely((in_irq() || irqs_disabled())) &&
212             ((pref_context == SCST_CONTEXT_DIRECT) ||
213              (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
214                 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
215                         "SCST_CONTEXT_THREAD instead", pref_context,
216                         cmd->tgtt->name);
217                 pref_context = SCST_CONTEXT_THREAD;
218         }
219 #endif
220
221         atomic_inc(&sess->sess_cmd_count);
222
223         spin_lock_irqsave(&sess->sess_list_lock, flags);
224
225         if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
226                 /*
227                  * We must always keep commands in the sess list from the
228                  * very beginning, because otherwise they can be missed during
229                  * TM processing. This check is needed because there might be
230                  * old, i.e. deferred, commands and new, i.e. just coming, ones.
231                  */
232                 if (cmd->sess_cmd_list_entry.next == NULL)
233                         list_add_tail(&cmd->sess_cmd_list_entry,
234                                 &sess->sess_cmd_list);
235                 switch (sess->init_phase) {
236                 case SCST_SESS_IPH_SUCCESS:
237                         break;
238                 case SCST_SESS_IPH_INITING:
239                         TRACE_DBG("Adding cmd %p to init deferred cmd list",
240                                   cmd);
241                         list_add_tail(&cmd->cmd_list_entry,
242                                 &sess->init_deferred_cmd_list);
243                         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
244                         goto out;
245                 case SCST_SESS_IPH_FAILED:
246                         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
247                         scst_set_busy(cmd);
248                         scst_set_cmd_abnormal_done_state(cmd);
249                         goto active;
250                 default:
251                         sBUG();
252                 }
253         } else
254                 list_add_tail(&cmd->sess_cmd_list_entry,
255                               &sess->sess_cmd_list);
256
257         spin_unlock_irqrestore(&sess->sess_list_lock, flags);
258
259         if (unlikely(cmd->lun == NO_SUCH_LUN)) {
260                 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
261                 scst_set_cmd_error(cmd,
262                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
263                 scst_set_cmd_abnormal_done_state(cmd);
264                 goto active;
265         }
266
267         if (unlikely(cmd->cdb_len == 0)) {
268                 PRINT_ERROR("%s", "Wrong CDB len, finishing cmd");
269                 scst_set_cmd_error(cmd,
270                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
271                 scst_set_cmd_abnormal_done_state(cmd);
272                 goto active;
273         }
274
275         if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
276                 PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
277                 scst_set_cmd_error(cmd,
278                         SCST_LOAD_SENSE(scst_sense_invalid_message));
279                 scst_set_cmd_abnormal_done_state(cmd);
280                 goto active;
281         }
282
283         /*
284          * Cmd must be inited here to preserve the order. In case if cmd
285          * already preliminary completed by target driver we need to init
286          * cmd anyway to find out in which format we should return sense.
287          */
288         cmd->state = SCST_CMD_STATE_INIT;
289         rc = scst_init_cmd(cmd, &pref_context);
290         if (unlikely(rc < 0))
291                 goto out;
292
293 active:
294         /* Here cmd must not be in any cmd list, no locks */
295         switch (pref_context) {
296         case SCST_CONTEXT_TASKLET:
297                 scst_schedule_tasklet(cmd);
298                 break;
299
300         case SCST_CONTEXT_DIRECT:
301                 scst_process_active_cmd(cmd, false);
302                 break;
303
304         case SCST_CONTEXT_DIRECT_ATOMIC:
305                 scst_process_active_cmd(cmd, true);
306                 break;
307
308         default:
309                 PRINT_ERROR("Context %x is undefined, using the thread one",
310                         pref_context);
311                 /* go through */
312         case SCST_CONTEXT_THREAD:
313                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
314                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
315                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
316                         list_add(&cmd->cmd_list_entry,
317                                 &cmd->cmd_lists->active_cmd_list);
318                 else
319                         list_add_tail(&cmd->cmd_list_entry,
320                                 &cmd->cmd_lists->active_cmd_list);
321                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
322                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
323                 break;
324         }
325
326 out:
327         TRACE_EXIT();
328         return;
329 }
330 EXPORT_SYMBOL(scst_cmd_init_done);
331
332 static int scst_pre_parse(struct scst_cmd *cmd)
333 {
334         int res = SCST_CMD_STATE_RES_CONT_SAME;
335         struct scst_device *dev = cmd->dev;
336         int rc;
337
338         TRACE_ENTRY();
339
340         cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
341              (!dev->has_own_order_mgmt &&
342               (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
343                cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
344
345         /*
346          * Expected transfer data supplied by the SCSI transport via the
347          * target driver are untrusted, so we prefer to fetch them from CDB.
348          * Additionally, not all transports support supplying the expected
349          * transfer data.
350          */
351
352         rc = scst_get_cdb_info(cmd);
353         if (unlikely(rc != 0)) {
354                 if (rc > 0) {
355                         PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
356                         goto out_xmit;
357                 }
358                 PRINT_ERROR("Unknown opcode 0x%02x for %s. "
359                         "Should you update scst_scsi_op_table?",
360                         cmd->cdb[0], dev->handler->name);
361                 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
362 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
363                 if (scst_cmd_is_expected_set(cmd)) {
364                         TRACE(TRACE_SCSI, "Using initiator supplied values: "
365                                 "direction %d, transfer_len %d",
366                                 cmd->expected_data_direction,
367                                 cmd->expected_transfer_len);
368                         cmd->data_direction = cmd->expected_data_direction;
369
370                         cmd->bufflen = cmd->expected_transfer_len;
371                         /* Restore (possibly) lost CDB length */
372                         cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
373                         if (cmd->cdb_len == -1) {
374                                 PRINT_ERROR("Unable to get CDB length for "
375                                         "opcode 0x%02x. Returning INVALID "
376                                         "OPCODE", cmd->cdb[0]);
377                                 scst_set_cmd_error(cmd,
378                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
379                                 goto out_xmit;
380                         }
381                 } else {
382                         PRINT_ERROR("Unknown opcode 0x%02x for %s and "
383                              "target %s not supplied expected values",
384                              cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
385                         scst_set_cmd_error(cmd,
386                                    SCST_LOAD_SENSE(scst_sense_invalid_opcode));
387                         goto out_xmit;
388                 }
389 #else
390                 scst_set_cmd_error(cmd,
391                            SCST_LOAD_SENSE(scst_sense_invalid_opcode));
392                 goto out_xmit;
393 #endif
394         } else {
395                 TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
396                         "(expected %d, set %s), transfer_len=%d (expected "
397                         "len %d), flags=%d", cmd->op_name, cmd,
398                         cmd->data_direction, cmd->expected_data_direction,
399                         scst_cmd_is_expected_set(cmd) ? "yes" : "no",
400                         cmd->bufflen, cmd->expected_transfer_len,
401                         cmd->op_flags);
402
403                 if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
404                         if (scst_cmd_is_expected_set(cmd)) {
405                                 /*
406                                  * Command data length can't be easily
407                                  * determined from the CDB. ToDo, all such
408                                  * commands processing should be fixed. Until
409                                  * it's done, get the length from the supplied
410                                  * expected value, but limit it to some
411                                  * reasonable value (15MB).
412                                  */
413                                 cmd->bufflen = min(cmd->expected_transfer_len,
414                                                         15*1024*1024);
415                                 cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
416                         } else
417                                 cmd->bufflen = 0;
418                 }
419         }
420
421         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
422                 PRINT_ERROR("NACA bit in control byte CDB is not supported "
423                             "(opcode 0x%02x)", cmd->cdb[0]);
424                 scst_set_cmd_error(cmd,
425                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
426                 goto out_xmit;
427         }
428
429         if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
430                 PRINT_ERROR("Linked commands are not supported "
431                             "(opcode 0x%02x)", cmd->cdb[0]);
432                 scst_set_cmd_error(cmd,
433                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
434                 goto out_xmit;
435         }
436
437         cmd->state = SCST_CMD_STATE_DEV_PARSE;
438
439 out:
440         TRACE_EXIT_RES(res);
441         return res;
442
443 out_xmit:
444         scst_set_cmd_abnormal_done_state(cmd);
445         res = SCST_CMD_STATE_RES_CONT_SAME;
446         goto out;
447 }
448
449 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
450 static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
451 {
452         bool res = false;
453
454         switch (cmd->cdb[0]) {
455         case TEST_UNIT_READY:
456                 /* Crazy VMware people sometimes do TUR with READ direction */
457                 res = true;
458                 break;
459         case VERIFY:
460         case VERIFY_6:
461         case VERIFY_12:
462         case VERIFY_16:
463                 /* VERIFY commands with BYTCHK unset shouldn't fail here */
464                 if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
465                     (cmd->cdb[1] & BYTCHK) == 0)
466                         res = true;
467                 break;
468         }
469
470         return res;
471 }
472 #endif
473
474 static int scst_parse_cmd(struct scst_cmd *cmd)
475 {
476         int res = SCST_CMD_STATE_RES_CONT_SAME;
477         int state;
478         struct scst_device *dev = cmd->dev;
479         int orig_bufflen = cmd->bufflen;
480
481         TRACE_ENTRY();
482
483         if (likely(!scst_is_cmd_fully_local(cmd))) {
484                 if (unlikely(!dev->handler->parse_atomic &&
485                              scst_cmd_atomic(cmd))) {
486                         /*
487                          * It shouldn't be because of SCST_TGT_DEV_AFTER_*
488                          * optimization.
489                          */
490                         TRACE_DBG("Dev handler %s parse() needs thread "
491                                 "context, rescheduling", dev->handler->name);
492                         res = SCST_CMD_STATE_RES_NEED_THREAD;
493                         goto out;
494                 }
495
496                 TRACE_DBG("Calling dev handler %s parse(%p)",
497                       dev->handler->name, cmd);
498                 TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
499                                 cmd->cdb, cmd->cdb_len);
500                 scst_set_cur_start(cmd);
501                 state = dev->handler->parse(cmd);
502                 /* Caution: cmd can be already dead here */
503                 TRACE_DBG("Dev handler %s parse() returned %d",
504                         dev->handler->name, state);
505
506                 switch (state) {
507                 case SCST_CMD_STATE_NEED_THREAD_CTX:
508                         scst_set_parse_time(cmd);
509                         TRACE_DBG("Dev handler %s parse() requested thread "
510                               "context, rescheduling", dev->handler->name);
511                         res = SCST_CMD_STATE_RES_NEED_THREAD;
512                         goto out;
513
514                 case SCST_CMD_STATE_STOP:
515                         TRACE_DBG("Dev handler %s parse() requested stop "
516                                 "processing", dev->handler->name);
517                         res = SCST_CMD_STATE_RES_CONT_NEXT;
518                         goto out;
519                 }
520
521                 scst_set_parse_time(cmd);
522
523                 if (state == SCST_CMD_STATE_DEFAULT)
524                         state = SCST_CMD_STATE_PREPARE_SPACE;
525         } else
526                 state = SCST_CMD_STATE_PREPARE_SPACE;
527
528         if (cmd->data_len == -1)
529                 cmd->data_len = cmd->bufflen;
530
531         if (cmd->bufflen == 0) {
532                 /*
533                  * According to SPC bufflen 0 for data transfer commands isn't
534                  * an error, so we need to fix the transfer direction.
535                  */
536                 cmd->data_direction = SCST_DATA_NONE;
537         }
538
539         if (cmd->dh_data_buf_alloced &&
540             unlikely((orig_bufflen > cmd->bufflen))) {
541                 PRINT_ERROR("Dev handler supplied data buffer (size %d), "
542                         "is less, than required (size %d)", cmd->bufflen,
543                         orig_bufflen);
544                 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
545                 goto out_error;
546         }
547
548         if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
549                 goto set_res;
550
551         if (unlikely((cmd->bufflen == 0) &&
552                      (cmd->op_flags & SCST_UNKNOWN_LENGTH))) {
553                 PRINT_ERROR("Unknown data transfer length for opcode 0x%x "
554                         "(handler %s, target %s)", cmd->cdb[0],
555                         dev->handler->name, cmd->tgtt->name);
556                 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
557                 goto out_error;
558         }
559
560 #ifdef CONFIG_SCST_EXTRACHECKS
561         if ((cmd->bufflen != 0) &&
562             ((cmd->data_direction == SCST_DATA_NONE) ||
563              ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
564                 PRINT_ERROR("Dev handler %s parse() returned "
565                         "invalid cmd data_direction %d, bufflen %d, state %d "
566                         "or sg %p (opcode 0x%x)", dev->handler->name,
567                         cmd->data_direction, cmd->bufflen, state, cmd->sg,
568                         cmd->cdb[0]);
569                 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
570                 goto out_error;
571         }
572 #endif
573
574         if (scst_cmd_is_expected_set(cmd)) {
575 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
576 #       ifdef CONFIG_SCST_EXTRACHECKS
577                 if ((cmd->data_direction != cmd->expected_data_direction) ||
578                     (cmd->bufflen != cmd->expected_transfer_len)) {
579                         PRINT_WARNING("Expected values don't match decoded "
580                                 "ones: data_direction %d, "
581                                 "expected_data_direction %d, "
582                                 "bufflen %d, expected_transfer_len %d",
583                                 cmd->data_direction,
584                                 cmd->expected_data_direction,
585                                 cmd->bufflen, cmd->expected_transfer_len);
586                         PRINT_BUFFER("Suspicious CDB", cmd->cdb, cmd->cdb_len);
587                 }
588 #       endif
589                 cmd->data_direction = cmd->expected_data_direction;
590                 cmd->bufflen = cmd->expected_transfer_len;
591 #else
592                 if (unlikely(cmd->data_direction !=
593                                 cmd->expected_data_direction)) {
594                         if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
595                              (cmd->bufflen != 0)) &&
596                             !scst_is_allowed_to_mismatch_cmd(cmd)) {
597                                 PRINT_ERROR("Expected data direction %d for "
598                                         "opcode 0x%02x (handler %s, target %s) "
599                                         "doesn't match "
600                                         "decoded value %d",
601                                         cmd->expected_data_direction,
602                                         cmd->cdb[0], dev->handler->name,
603                                         cmd->tgtt->name, cmd->data_direction);
604                                 PRINT_BUFFER("Failed CDB",
605                                         cmd->cdb, cmd->cdb_len);
606                                 scst_set_cmd_error(cmd,
607                                    SCST_LOAD_SENSE(scst_sense_invalid_message));
608                                 goto out_dev_done;
609                         }
610                 }
611                 if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
612                         TRACE(TRACE_MGMT_MINOR, "Warning: expected "
613                                 "transfer length %d for opcode 0x%02x "
614                                 "(handler %s, target %s) doesn't match "
615                                 "decoded value %d. Faulty initiator "
616                                 "(e.g. VMware is known to be such) or "
617                                 "scst_scsi_op_table should be updated?",
618                                 cmd->expected_transfer_len, cmd->cdb[0],
619                                 dev->handler->name, cmd->tgtt->name,
620                                 cmd->bufflen);
621                         PRINT_BUFF_FLAG(TRACE_MGMT_MINOR, "Suspicious CDB",
622                                 cmd->cdb, cmd->cdb_len);
623                 }
624 #endif
625         }
626
627         if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
628                 PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
629                         "target %s", cmd->cdb[0], dev->handler->name,
630                         cmd->tgtt->name);
631                 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
632                 goto out_error;
633         }
634
635 set_res:
636 #ifdef CONFIG_SCST_EXTRACHECKS
637         switch (state) {
638         case SCST_CMD_STATE_PREPARE_SPACE:
639         case SCST_CMD_STATE_PRE_PARSE:
640         case SCST_CMD_STATE_DEV_PARSE:
641         case SCST_CMD_STATE_RDY_TO_XFER:
642         case SCST_CMD_STATE_TGT_PRE_EXEC:
643         case SCST_CMD_STATE_SEND_FOR_EXEC:
644         case SCST_CMD_STATE_LOCAL_EXEC:
645         case SCST_CMD_STATE_REAL_EXEC:
646         case SCST_CMD_STATE_PRE_DEV_DONE:
647         case SCST_CMD_STATE_DEV_DONE:
648         case SCST_CMD_STATE_PRE_XMIT_RESP:
649         case SCST_CMD_STATE_XMIT_RESP:
650         case SCST_CMD_STATE_FINISHED:
651         case SCST_CMD_STATE_FINISHED_INTERNAL:
652 #endif
653                 cmd->state = state;
654                 res = SCST_CMD_STATE_RES_CONT_SAME;
655 #ifdef CONFIG_SCST_EXTRACHECKS
656                 break;
657
658         default:
659                 if (state >= 0) {
660                         PRINT_ERROR("Dev handler %s parse() returned "
661                              "invalid cmd state %d (opcode %d)",
662                              dev->handler->name, state, cmd->cdb[0]);
663                 } else {
664                         PRINT_ERROR("Dev handler %s parse() returned "
665                                 "error %d (opcode %d)", dev->handler->name,
666                                 state, cmd->cdb[0]);
667                 }
668                 goto out_error;
669         }
670 #endif
671
672         if (cmd->resp_data_len == -1) {
673                 if (cmd->data_direction & SCST_DATA_READ)
674                         cmd->resp_data_len = cmd->bufflen;
675                 else
676                          cmd->resp_data_len = 0;
677         }
678
679 out:
680         TRACE_EXIT_HRES(res);
681         return res;
682
683 out_error:
684         /* dev_done() will be called as part of the regular cmd's finish */
685         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
686
687 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
688 out_dev_done:
689 #endif
690         cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
691         res = SCST_CMD_STATE_RES_CONT_SAME;
692         goto out;
693 }
694
695 static int scst_prepare_space(struct scst_cmd *cmd)
696 {
697         int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
698
699         TRACE_ENTRY();
700
701         if (cmd->data_direction == SCST_DATA_NONE)
702                 goto done;
703
704         if (cmd->tgt_need_alloc_data_buf) {
705                 int orig_bufflen = cmd->bufflen;
706
707                 TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
708                         cmd);
709
710                 scst_set_cur_start(cmd);
711                 r = cmd->tgtt->alloc_data_buf(cmd);
712                 scst_set_alloc_buf_time(cmd);
713
714                 if (r > 0)
715                         goto alloc;
716                 else if (r == 0) {
717                         if (unlikely(cmd->bufflen == 0)) {
718                                 /* See comment in scst_alloc_space() */
719                                 if (cmd->sg == NULL)
720                                         goto alloc;
721                         }
722
723                         cmd->tgt_data_buf_alloced = 1;
724
725                         if (unlikely(orig_bufflen < cmd->bufflen)) {
726                                 PRINT_ERROR("Target driver allocated data "
727                                         "buffer (size %d), is less, than "
728                                         "required (size %d)", orig_bufflen,
729                                         cmd->bufflen);
730                                 goto out_error;
731                         }
732                         TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
733                 } else
734                         goto check;
735         }
736
737 alloc:
738         if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
739                 r = scst_alloc_space(cmd);
740         } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
741                 TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
742                 r = 0;
743         } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
744                 TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
745                 cmd->sg = cmd->tgt_sg;
746                 cmd->sg_cnt = cmd->tgt_sg_cnt;
747                 cmd->in_sg = cmd->tgt_in_sg;
748                 cmd->in_sg_cnt = cmd->tgt_in_sg_cnt;
749                 r = 0;
750         } else {
751                 TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
752                         "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
753                         cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
754                 r = 0;
755         }
756
757 check:
758         if (r != 0) {
759                 if (scst_cmd_atomic(cmd)) {
760                         TRACE_MEM("%s", "Atomic memory allocation failed, "
761                               "rescheduling to the thread");
762                         res = SCST_CMD_STATE_RES_NEED_THREAD;
763                         goto out;
764                 } else
765                         goto out_no_space;
766         }
767
768 done:
769         if (cmd->preprocessing_only)
770                 cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE;
771         else if (cmd->data_direction & SCST_DATA_WRITE)
772                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
773         else
774                 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
775
776 out:
777         TRACE_EXIT_HRES(res);
778         return res;
779
780 out_no_space:
781         TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
782                 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
783         scst_set_busy(cmd);
784         scst_set_cmd_abnormal_done_state(cmd);
785         res = SCST_CMD_STATE_RES_CONT_SAME;
786         goto out;
787
788 out_error:
789         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
790         scst_set_cmd_abnormal_done_state(cmd);
791         res = SCST_CMD_STATE_RES_CONT_SAME;
792         goto out;
793 }
794
795 static int scst_preprocessing_done(struct scst_cmd *cmd)
796 {
797         int res;
798
799         TRACE_ENTRY();
800
801         EXTRACHECKS_BUG_ON(!cmd->preprocessing_only);
802
803         cmd->preprocessing_only = 0;
804
805         res = SCST_CMD_STATE_RES_CONT_NEXT;
806         cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE_CALLED;
807
808         TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
809         scst_set_cur_start(cmd);
810         cmd->tgtt->preprocessing_done(cmd);
811         TRACE_DBG("%s", "preprocessing_done() returned");
812
813         TRACE_EXIT_HRES(res);
814         return res;
815 }
816
817 void scst_restart_cmd(struct scst_cmd *cmd, int status,
818         enum scst_exec_context pref_context)
819 {
820         TRACE_ENTRY();
821
822         scst_set_restart_waiting_time(cmd);
823
824         TRACE_DBG("Preferred context: %d", pref_context);
825         TRACE_DBG("tag=%llu, status=%#x",
826                   (long long unsigned int)scst_cmd_get_tag(cmd),
827                   status);
828
829 #ifdef CONFIG_SCST_EXTRACHECKS
830         if ((in_irq() || irqs_disabled()) &&
831             ((pref_context == SCST_CONTEXT_DIRECT) ||
832              (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
833                 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
834                         "SCST_CONTEXT_THREAD instead", pref_context,
835                         cmd->tgtt->name);
836                 pref_context = SCST_CONTEXT_THREAD;
837         }
838 #endif
839
840         switch (status) {
841         case SCST_PREPROCESS_STATUS_SUCCESS:
842                 if (cmd->data_direction & SCST_DATA_WRITE)
843                         cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
844                 else
845                         cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
846                 if (cmd->set_sn_on_restart_cmd)
847                         scst_cmd_set_sn(cmd);
848                 /* Small context optimization */
849                 if ((pref_context == SCST_CONTEXT_TASKLET) ||
850                     (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
851                     ((pref_context == SCST_CONTEXT_SAME) &&
852                      scst_cmd_atomic(cmd))) {
853                         if (cmd->data_direction & SCST_DATA_WRITE) {
854                                 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
855                                                 &cmd->tgt_dev->tgt_dev_flags))
856                                         pref_context = SCST_CONTEXT_THREAD;
857                         } else {
858                                 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
859                                                 &cmd->tgt_dev->tgt_dev_flags))
860                                         pref_context = SCST_CONTEXT_THREAD;
861                         }
862                 }
863                 break;
864
865         case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
866                 scst_set_cmd_abnormal_done_state(cmd);
867                 break;
868
869         case SCST_PREPROCESS_STATUS_ERROR_FATAL:
870                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
871                 /* go through */
872         case SCST_PREPROCESS_STATUS_ERROR:
873                 if (cmd->sense != NULL)
874                         scst_set_cmd_error(cmd,
875                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
876                 scst_set_cmd_abnormal_done_state(cmd);
877                 break;
878
879         default:
880                 PRINT_ERROR("%s() received unknown status %x", __func__,
881                         status);
882                 scst_set_cmd_abnormal_done_state(cmd);
883                 break;
884         }
885
886         scst_process_redirect_cmd(cmd, pref_context, 1);
887
888         TRACE_EXIT();
889         return;
890 }
891 EXPORT_SYMBOL(scst_restart_cmd);
892
893 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
894 {
895         int res, rc;
896         struct scst_tgt_template *tgtt = cmd->tgtt;
897
898         TRACE_ENTRY();
899
900         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
901                 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
902                 goto out_dev_done;
903         }
904
905         if ((tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
906                 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
907                 res = SCST_CMD_STATE_RES_CONT_SAME;
908                 goto out;
909         }
910
911         if (unlikely(!tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
912                 /*
913                  * It shouldn't be because of SCST_TGT_DEV_AFTER_*
914                  * optimization.
915                  */
916                 TRACE_DBG("Target driver %s rdy_to_xfer() needs thread "
917                               "context, rescheduling", tgtt->name);
918                 res = SCST_CMD_STATE_RES_NEED_THREAD;
919                 goto out;
920         }
921
922         while (1) {
923                 int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
924
925                 res = SCST_CMD_STATE_RES_CONT_NEXT;
926                 cmd->state = SCST_CMD_STATE_DATA_WAIT;
927
928                 if (tgtt->on_hw_pending_cmd_timeout != NULL) {
929                         struct scst_session *sess = cmd->sess;
930                         cmd->hw_pending_start = jiffies;
931                         cmd->cmd_hw_pending = 1;
932                         if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
933                                 TRACE_DBG("Sched HW pending work for sess %p "
934                                         "(max time %d)", sess,
935                                         tgtt->max_hw_pending_time);
936                                 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
937                                         &sess->sess_aflags);
938                                 schedule_delayed_work(&sess->hw_pending_work,
939                                         tgtt->max_hw_pending_time * HZ);
940                         }
941                 }
942
943                 scst_set_cur_start(cmd);
944
945                 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
946 #ifdef CONFIG_SCST_DEBUG_RETRY
947                 if (((scst_random() % 100) == 75))
948                         rc = SCST_TGT_RES_QUEUE_FULL;
949                 else
950 #endif
951                         rc = tgtt->rdy_to_xfer(cmd);
952                 TRACE_DBG("rdy_to_xfer() returned %d", rc);
953
954                 if (likely(rc == SCST_TGT_RES_SUCCESS))
955                         goto out;
956
957                 scst_set_rdy_to_xfer_time(cmd);
958
959                 cmd->cmd_hw_pending = 0;
960
961                 /* Restore the previous state */
962                 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
963
964                 switch (rc) {
965                 case SCST_TGT_RES_QUEUE_FULL:
966                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
967                                 break;
968                         else
969                                 continue;
970
971                 case SCST_TGT_RES_NEED_THREAD_CTX:
972                         TRACE_DBG("Target driver %s "
973                               "rdy_to_xfer() requested thread "
974                               "context, rescheduling", tgtt->name);
975                         res = SCST_CMD_STATE_RES_NEED_THREAD;
976                         break;
977
978                 default:
979                         goto out_error_rc;
980                 }
981                 break;
982         }
983
984 out:
985         TRACE_EXIT_HRES(res);
986         return res;
987
988 out_error_rc:
989         if (rc == SCST_TGT_RES_FATAL_ERROR) {
990                 PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
991                      "fatal error", tgtt->name);
992         } else {
993                 PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
994                             "value %d", tgtt->name, rc);
995         }
996         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
997
998 out_dev_done:
999         scst_set_cmd_abnormal_done_state(cmd);
1000         res = SCST_CMD_STATE_RES_CONT_SAME;
1001         goto out;
1002 }
1003
1004 /* No locks, but might be in IRQ */
1005 static void scst_process_redirect_cmd(struct scst_cmd *cmd,
1006         enum scst_exec_context context, int check_retries)
1007 {
1008         struct scst_tgt *tgt = cmd->tgt;
1009         unsigned long flags;
1010
1011         TRACE_ENTRY();
1012
1013         TRACE_DBG("Context: %x", context);
1014
1015         if (context == SCST_CONTEXT_SAME)
1016                 context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
1017                                                  SCST_CONTEXT_DIRECT;
1018
1019         switch (context) {
1020         case SCST_CONTEXT_DIRECT_ATOMIC:
1021                 scst_process_active_cmd(cmd, true);
1022                 break;
1023
1024         case SCST_CONTEXT_DIRECT:
1025                 if (check_retries)
1026                         scst_check_retries(tgt);
1027                 scst_process_active_cmd(cmd, false);
1028                 break;
1029
1030         default:
1031                 PRINT_ERROR("Context %x is unknown, using the thread one",
1032                             context);
1033                 /* go through */
1034         case SCST_CONTEXT_THREAD:
1035                 if (check_retries)
1036                         scst_check_retries(tgt);
1037                 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
1038                 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
1039                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1040                         list_add(&cmd->cmd_list_entry,
1041                                 &cmd->cmd_lists->active_cmd_list);
1042                 else
1043                         list_add_tail(&cmd->cmd_list_entry,
1044                                 &cmd->cmd_lists->active_cmd_list);
1045                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
1046                 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
1047                 break;
1048
1049         case SCST_CONTEXT_TASKLET:
1050                 if (check_retries)
1051                         scst_check_retries(tgt);
1052                 scst_schedule_tasklet(cmd);
1053                 break;
1054         }
1055
1056         TRACE_EXIT();
1057         return;
1058 }
1059
1060 void scst_rx_data(struct scst_cmd *cmd, int status,
1061         enum scst_exec_context pref_context)
1062 {
1063         TRACE_ENTRY();
1064
1065         scst_set_rdy_to_xfer_time(cmd);
1066
1067         TRACE_DBG("Preferred context: %d", pref_context);
1068         TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
1069
1070         cmd->cmd_hw_pending = 0;
1071
1072 #ifdef CONFIG_SCST_EXTRACHECKS
1073         if ((in_irq() || irqs_disabled()) &&
1074             ((pref_context == SCST_CONTEXT_DIRECT) ||
1075              (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
1076                 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
1077                         "SCST_CONTEXT_THREAD instead", pref_context,
1078                         cmd->tgtt->name);
1079                 pref_context = SCST_CONTEXT_THREAD;
1080         }
1081 #endif
1082
1083         switch (status) {
1084         case SCST_RX_STATUS_SUCCESS:
1085 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1086                 if (trace_flag & TRACE_RCV_BOT) {
1087                         int i;
1088                         struct scatterlist *sg;
1089                         if (cmd->in_sg != NULL)
1090                                 sg = cmd->in_sg;
1091                         else if (cmd->tgt_in_sg != NULL)
1092                                 sg = cmd->tgt_in_sg;
1093                         else if (cmd->tgt_sg != NULL)
1094                                 sg = cmd->tgt_sg;
1095                         else
1096                                 sg = cmd->sg;
1097                         if (sg != NULL) {
1098                                 TRACE_RECV_BOT("RX data for cmd %p "
1099                                         "(sg_cnt %d, sg %p, sg[0].page %p)",
1100                                         cmd, cmd->tgt_sg_cnt, sg,
1101                                         (void *)sg_page(&sg[0]));
1102                                 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
1103                                         PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
1104                                                 sg_virt(&sg[i]), sg[i].length);
1105                                 }
1106                         }
1107                 }
1108 #endif
1109                 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1110
1111                 /* Small context optimization */
1112                 if ((pref_context == SCST_CONTEXT_TASKLET) ||
1113                     (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
1114                     ((pref_context == SCST_CONTEXT_SAME) &&
1115                      scst_cmd_atomic(cmd))) {
1116                         if (!test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1117                                         &cmd->tgt_dev->tgt_dev_flags))
1118                                 pref_context = SCST_CONTEXT_THREAD;
1119                 }
1120                 break;
1121
1122         case SCST_RX_STATUS_ERROR_SENSE_SET:
1123                 scst_set_cmd_abnormal_done_state(cmd);
1124                 break;
1125
1126         case SCST_RX_STATUS_ERROR_FATAL:
1127                 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1128                 /* go through */
1129         case SCST_RX_STATUS_ERROR:
1130                 scst_set_cmd_error(cmd,
1131                            SCST_LOAD_SENSE(scst_sense_hardw_error));
1132                 scst_set_cmd_abnormal_done_state(cmd);
1133                 break;
1134
1135         default:
1136                 PRINT_ERROR("scst_rx_data() received unknown status %x",
1137                         status);
1138                 scst_set_cmd_abnormal_done_state(cmd);
1139                 break;
1140         }
1141
1142         scst_process_redirect_cmd(cmd, pref_context, 1);
1143
1144         TRACE_EXIT();
1145         return;
1146 }
1147 EXPORT_SYMBOL(scst_rx_data);
1148
1149 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
1150 {
1151         int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
1152
1153         TRACE_ENTRY();
1154
1155         cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
1156
1157         if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
1158                 goto out;
1159
1160         TRACE_DBG("Calling pre_exec(%p)", cmd);
1161         scst_set_cur_start(cmd);
1162         rc = cmd->tgtt->pre_exec(cmd);
1163         scst_set_pre_exec_time(cmd);
1164         TRACE_DBG("pre_exec() returned %d", rc);
1165
1166         if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1167                 switch (rc) {
1168                 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1169                         scst_set_cmd_abnormal_done_state(cmd);
1170                         break;
1171                 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1172                         set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1173                         /* go through */
1174                 case SCST_PREPROCESS_STATUS_ERROR:
1175                         scst_set_cmd_error(cmd,
1176                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1177                         scst_set_cmd_abnormal_done_state(cmd);
1178                         break;
1179                 case SCST_PREPROCESS_STATUS_NEED_THREAD:
1180                         TRACE_DBG("Target driver's %s pre_exec() requested "
1181                                 "thread context, rescheduling",
1182                                 cmd->tgtt->name);
1183                         res = SCST_CMD_STATE_RES_NEED_THREAD;
1184                         cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1185                         break;
1186                 default:
1187                         sBUG();
1188                         break;
1189                 }
1190         }
1191
1192 out:
1193         TRACE_EXIT_RES(res);
1194         return res;
1195 }
1196
1197 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1198         const uint8_t *rq_sense, int rq_sense_len, int resid)
1199 {
1200         TRACE_ENTRY();
1201
1202         scst_set_exec_time(cmd);
1203
1204         cmd->status = result & 0xff;
1205         cmd->msg_status = msg_byte(result);
1206         cmd->host_status = host_byte(result);
1207         cmd->driver_status = driver_byte(result);
1208         if (unlikely(resid != 0)) {
1209 #ifdef CONFIG_SCST_EXTRACHECKS
1210                 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1211                         PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
1212                                 "op %x)", resid, cmd->resp_data_len,
1213                                 cmd->cdb[0]);
1214                 } else
1215 #endif
1216                         scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1217         }
1218
1219         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
1220                 /* We might have double reset UA here */
1221                 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
1222                 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
1223
1224                 scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
1225         }
1226
1227         TRACE(TRACE_SCSI, "cmd %p, result=%x, cmd->status=%x, resid=%d, "
1228               "cmd->msg_status=%x, cmd->host_status=%x, "
1229               "cmd->driver_status=%x (cmd %p)", cmd, result, cmd->status, resid,
1230               cmd->msg_status, cmd->host_status, cmd->driver_status, cmd);
1231
1232         cmd->completed = 1;
1233
1234         TRACE_EXIT();
1235         return;
1236 }
1237
1238 /* For small context optimization */
1239 static inline enum scst_exec_context scst_optimize_post_exec_context(
1240         struct scst_cmd *cmd, enum scst_exec_context context)
1241 {
1242         if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
1243             (context == SCST_CONTEXT_TASKLET) ||
1244             (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1245                 if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1246                                 &cmd->tgt_dev->tgt_dev_flags))
1247                         context = SCST_CONTEXT_THREAD;
1248         }
1249         return context;
1250 }
1251
1252 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1253 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1254                                             struct scsi_request **req)
1255 {
1256         struct scst_cmd *cmd = NULL;
1257
1258         if (scsi_cmd) {
1259                 *req = scsi_cmd->sc_request;
1260                 if (*req)
1261                         cmd = (struct scst_cmd *)(*req)->upper_private_data;
1262         }
1263
1264         if (cmd == NULL) {
1265                 PRINT_ERROR("%s", "Request with NULL cmd");
1266                 if (*req)
1267                         scsi_release_request(*req);
1268         }
1269
1270         return cmd;
1271 }
1272
1273 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1274 {
1275         struct scsi_request *req = NULL;
1276         struct scst_cmd *cmd;
1277
1278         TRACE_ENTRY();
1279
1280         cmd = scst_get_cmd(scsi_cmd, &req);
1281         if (cmd == NULL)
1282                 goto out;
1283
1284         scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1285                 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1286
1287         /* Clear out request structure */
1288         req->sr_use_sg = 0;
1289         req->sr_sglist_len = 0;
1290         req->sr_bufflen = 0;
1291         req->sr_buffer = NULL;
1292         req->sr_underflow = 0;
1293         req->sr_request->rq_disk = NULL; /* disown request blk */
1294
1295         scst_release_request(cmd);
1296
1297         cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1298
1299         scst_process_redirect_cmd(cmd,
1300                 scst_optimize_post_exec_context(cmd, scst_estimate_context()),
1301                                                 0);
1302
1303 out:
1304         TRACE_EXIT();
1305         return;
1306 }
1307 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1308 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1309 {
1310         struct scst_cmd *cmd;
1311
1312         TRACE_ENTRY();
1313
1314         cmd = (struct scst_cmd *)data;
1315         if (cmd == NULL)
1316                 goto out;
1317
1318         scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
1319
1320         cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1321
1322         scst_process_redirect_cmd(cmd,
1323             scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
1324
1325 out:
1326         TRACE_EXIT();
1327         return;
1328 }
1329 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1330
1331 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
1332         enum scst_exec_context pref_context)
1333 {
1334         TRACE_ENTRY();
1335
1336         scst_set_exec_time(cmd);
1337
1338         if (next_state == SCST_CMD_STATE_DEFAULT)
1339                 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1340
1341 #if defined(CONFIG_SCST_DEBUG)
1342         if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
1343                 if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
1344                         int i;
1345                         struct scatterlist *sg = cmd->sg;
1346                         TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
1347                                 "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
1348                         for (i = 0; i < cmd->sg_cnt; ++i) {
1349                                 TRACE_BUFF_FLAG(TRACE_RCV_TOP,
1350                                         "Exec'd sg", sg_virt(&sg[i]),
1351                                         sg[i].length);
1352                         }
1353                 }
1354         }
1355 #endif
1356
1357         cmd->state = next_state;
1358
1359 #ifdef CONFIG_SCST_EXTRACHECKS
1360         if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
1361             (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1362             (next_state != SCST_CMD_STATE_FINISHED) &&
1363             (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
1364                 PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
1365                         __func__, next_state, cmd->cdb[0]);
1366                 scst_set_cmd_error(cmd,
1367                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1368                 scst_set_cmd_abnormal_done_state(cmd);
1369         }
1370 #endif
1371         pref_context = scst_optimize_post_exec_context(cmd, pref_context);
1372         scst_process_redirect_cmd(cmd, pref_context, 0);
1373
1374         TRACE_EXIT();
1375         return;
1376 }
1377
1378 static int scst_report_luns_local(struct scst_cmd *cmd)
1379 {
1380         int res = SCST_EXEC_COMPLETED, rc;
1381         int dev_cnt = 0;
1382         int buffer_size;
1383         int i;
1384         struct scst_tgt_dev *tgt_dev = NULL;
1385         uint8_t *buffer;
1386         int offs, overflow = 0;
1387
1388         TRACE_ENTRY();
1389
1390         if (scst_cmd_atomic(cmd)) {
1391                 res = SCST_EXEC_NEED_THREAD;
1392                 goto out;
1393         }
1394
1395         rc = scst_check_local_events(cmd);
1396         if (unlikely(rc != 0))
1397                 goto out_done;
1398
1399         cmd->status = 0;
1400         cmd->msg_status = 0;
1401         cmd->host_status = DID_OK;
1402         cmd->driver_status = 0;
1403
1404         if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1405                 PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
1406                         "LUNS command", cmd->cdb[2]);
1407                 goto out_err;
1408         }
1409
1410         buffer_size = scst_get_buf_first(cmd, &buffer);
1411         if (unlikely(buffer_size == 0))
1412                 goto out_compl;
1413         else if (unlikely(buffer_size < 0))
1414                 goto out_hw_err;
1415
1416         if (buffer_size < 16)
1417                 goto out_put_err;
1418
1419         memset(buffer, 0, buffer_size);
1420         offs = 8;
1421
1422         /*
1423          * cmd won't allow to suspend activities, so we can access
1424          * sess->sess_tgt_dev_list_hash without any additional protection.
1425          */
1426         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1427                 struct list_head *sess_tgt_dev_list_head =
1428                         &cmd->sess->sess_tgt_dev_list_hash[i];
1429                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1430                                 sess_tgt_dev_list_entry) {
1431                         if (!overflow) {
1432                                 if (offs >= buffer_size) {
1433                                         scst_put_buf(cmd, buffer);
1434                                         buffer_size = scst_get_buf_next(cmd,
1435                                                                        &buffer);
1436                                         if (buffer_size > 0) {
1437                                                 memset(buffer, 0, buffer_size);
1438                                                 offs = 0;
1439                                         } else {
1440                                                 overflow = 1;
1441                                                 goto inc_dev_cnt;
1442                                         }
1443                                 }
1444                                 if ((buffer_size - offs) < 8) {
1445                                         PRINT_ERROR("Buffer allocated for "
1446                                                 "REPORT LUNS command doesn't "
1447                                                 "allow to fit 8 byte entry "
1448                                                 "(buffer_size=%d)",
1449                                                 buffer_size);
1450                                         goto out_put_hw_err;
1451                                 }
1452                                 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1453                                 buffer[offs+1] = tgt_dev->lun & 0xff;
1454                                 offs += 8;
1455                         }
1456 inc_dev_cnt:
1457                         dev_cnt++;
1458                 }
1459         }
1460         if (!overflow)
1461                 scst_put_buf(cmd, buffer);
1462
1463         /* Set the response header */
1464         buffer_size = scst_get_buf_first(cmd, &buffer);
1465         if (unlikely(buffer_size == 0))
1466                 goto out_compl;
1467         else if (unlikely(buffer_size < 0))
1468                 goto out_hw_err;
1469
1470         dev_cnt *= 8;
1471         buffer[0] = (dev_cnt >> 24) & 0xff;
1472         buffer[1] = (dev_cnt >> 16) & 0xff;
1473         buffer[2] = (dev_cnt >> 8) & 0xff;
1474         buffer[3] = dev_cnt & 0xff;
1475
1476         scst_put_buf(cmd, buffer);
1477
1478         dev_cnt += 8;
1479         if (dev_cnt < cmd->resp_data_len)
1480                 scst_set_resp_data_len(cmd, dev_cnt);
1481
1482 out_compl:
1483         cmd->completed = 1;
1484
1485         /* Clear left sense_reported_luns_data_changed UA, if any. */
1486
1487         /*
1488          * cmd won't allow to suspend activities, so we can access
1489          * sess->sess_tgt_dev_list_hash without any additional protection.
1490          */
1491         for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1492                 struct list_head *sess_tgt_dev_list_head =
1493                         &cmd->sess->sess_tgt_dev_list_hash[i];
1494
1495                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1496                                 sess_tgt_dev_list_entry) {
1497                         struct scst_tgt_dev_UA *ua;
1498
1499                         spin_lock_bh(&tgt_dev->tgt_dev_lock);
1500                         list_for_each_entry(ua, &tgt_dev->UA_list,
1501                                                 UA_list_entry) {
1502                                 if (scst_analyze_sense(ua->UA_sense_buffer,
1503                                                 ua->UA_valid_sense_len,
1504                                                 SCST_SENSE_ALL_VALID,
1505                                                 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
1506                                         TRACE_MGMT_DBG("Freeing not needed "
1507                                                 "REPORTED LUNS DATA CHANGED UA "
1508                                                 "%p", ua);
1509                                         list_del(&ua->UA_list_entry);
1510                                         mempool_free(ua, scst_ua_mempool);
1511                                         break;
1512                                 }
1513                         }
1514                         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1515                 }
1516         }
1517
1518 out_done:
1519         /* Report the result */
1520         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1521
1522 out:
1523         TRACE_EXIT_RES(res);
1524         return res;
1525
1526 out_put_err:
1527         scst_put_buf(cmd, buffer);
1528
1529 out_err:
1530         scst_set_cmd_error(cmd,
1531                    SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1532         goto out_compl;
1533
1534 out_put_hw_err:
1535         scst_put_buf(cmd, buffer);
1536
1537 out_hw_err:
1538         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1539         goto out_compl;
1540 }
1541
1542 static int scst_request_sense_local(struct scst_cmd *cmd)
1543 {
1544         int res = SCST_EXEC_COMPLETED, rc;
1545         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1546         uint8_t *buffer;
1547         int buffer_size = 0, sl = 0;
1548
1549         TRACE_ENTRY();
1550
1551         rc = scst_check_local_events(cmd);
1552         if (unlikely(rc != 0))
1553                 goto out_done;
1554
1555         cmd->status = 0;
1556         cmd->msg_status = 0;
1557         cmd->host_status = DID_OK;
1558         cmd->driver_status = 0;
1559
1560         spin_lock_bh(&tgt_dev->tgt_dev_lock);
1561
1562         if (tgt_dev->tgt_dev_valid_sense_len == 0)
1563                 goto out_not_completed;
1564
1565         TRACE(TRACE_SCSI, "%s: Returning stored sense", cmd->op_name);
1566
1567         buffer_size = scst_get_buf_first(cmd, &buffer);
1568         if (unlikely(buffer_size == 0))
1569                 goto out_compl;
1570         else if (unlikely(buffer_size < 0))
1571                 goto out_hw_err;
1572
1573         memset(buffer, 0, buffer_size);
1574
1575         if (((tgt_dev->tgt_dev_sense[0] == 0x70) ||
1576              (tgt_dev->tgt_dev_sense[0] == 0x71)) && (cmd->cdb[1] & 1)) {
1577                 PRINT_WARNING("%s: Fixed format of the saved sense, but "
1578                         "descriptor format requested. Convertion will "
1579                         "truncated data", cmd->op_name);
1580                 PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
1581                         tgt_dev->tgt_dev_valid_sense_len);
1582
1583                 buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
1584                 sl = scst_set_sense(buffer, buffer_size, true,
1585                         tgt_dev->tgt_dev_sense[2], tgt_dev->tgt_dev_sense[12],
1586                         tgt_dev->tgt_dev_sense[13]);
1587         } else if (((tgt_dev->tgt_dev_sense[0] == 0x72) ||
1588                     (tgt_dev->tgt_dev_sense[0] == 0x73)) && !(cmd->cdb[1] & 1)) {
1589                 PRINT_WARNING("%s: Descriptor format of the "
1590                         "saved sense, but fixed format requested. Convertion "
1591                         "will truncated data", cmd->op_name);
1592                 PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
1593                         tgt_dev->tgt_dev_valid_sense_len);
1594
1595                 buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
1596                 sl = scst_set_sense(buffer, buffer_size, false,
1597                         tgt_dev->tgt_dev_sense[1], tgt_dev->tgt_dev_sense[2],
1598                         tgt_dev->tgt_dev_sense[3]);
1599         } else {
1600                 if (buffer_size >= tgt_dev->tgt_dev_valid_sense_len)
1601                         sl = tgt_dev->tgt_dev_valid_sense_len;
1602                 else {
1603                         sl = buffer_size;
1604                         PRINT_WARNING("%s: Being returned sense truncated to "
1605                                 "size %d (needed %d)", cmd->op_name,
1606                                 buffer_size, tgt_dev->tgt_dev_valid_sense_len);
1607                 }
1608                 memcpy(buffer, tgt_dev->tgt_dev_sense, sl);
1609         }
1610
1611         scst_put_buf(cmd, buffer);
1612
1613         tgt_dev->tgt_dev_valid_sense_len = 0;
1614
1615         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1616
1617         scst_set_resp_data_len(cmd, sl);
1618
1619 out_compl:
1620         cmd->completed = 1;
1621
1622 out_done:
1623         /* Report the result */
1624         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1625
1626 out:
1627         TRACE_EXIT_RES(res);
1628         return res;
1629
1630 out_hw_err:
1631         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1632         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1633         goto out_compl;
1634
1635 out_not_completed:
1636         spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1637         res = SCST_EXEC_NOT_COMPLETED;
1638         goto out;
1639 }
1640
1641 static int scst_pre_select(struct scst_cmd *cmd)
1642 {
1643         int res = SCST_EXEC_NOT_COMPLETED;
1644
1645         TRACE_ENTRY();
1646
1647         if (scst_cmd_atomic(cmd)) {
1648                 res = SCST_EXEC_NEED_THREAD;
1649                 goto out;
1650         }
1651
1652         scst_block_dev_cmd(cmd, 1);
1653
1654         /* Check for local events will be done when cmd will be executed */
1655
1656 out:
1657         TRACE_EXIT_RES(res);
1658         return res;
1659 }
1660
1661 static int scst_reserve_local(struct scst_cmd *cmd)
1662 {
1663         int res = SCST_EXEC_NOT_COMPLETED, rc;
1664         struct scst_device *dev;
1665         struct scst_tgt_dev *tgt_dev_tmp;
1666
1667         TRACE_ENTRY();
1668
1669         if (scst_cmd_atomic(cmd)) {
1670                 res = SCST_EXEC_NEED_THREAD;
1671                 goto out;
1672         }
1673
1674         if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1675                 PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
1676                      "(lun=%lld)", (long long unsigned int)cmd->lun);
1677                 scst_set_cmd_error(cmd,
1678                         SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1679                 goto out_done;
1680         }
1681
1682         dev = cmd->dev;
1683
1684         if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1685                 scst_block_dev_cmd(cmd, 1);
1686
1687         rc = scst_check_local_events(cmd);
1688         if (unlikely(rc != 0))
1689                 goto out_done;
1690
1691         spin_lock_bh(&dev->dev_lock);
1692
1693         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1694                 spin_unlock_bh(&dev->dev_lock);
1695                 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1696                 goto out_done;
1697         }
1698
1699         list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1700                             dev_tgt_dev_list_entry) {
1701                 if (cmd->tgt_dev != tgt_dev_tmp)
1702                         set_bit(SCST_TGT_DEV_RESERVED,
1703                                 &tgt_dev_tmp->tgt_dev_flags);
1704         }
1705         dev->dev_reserved = 1;
1706
1707         spin_unlock_bh(&dev->dev_lock);
1708
1709 out:
1710         TRACE_EXIT_RES(res);
1711         return res;
1712
1713 out_done:
1714         /* Report the result */
1715         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1716         res = SCST_EXEC_COMPLETED;
1717         goto out;
1718 }
1719
1720 static int scst_release_local(struct scst_cmd *cmd)
1721 {
1722         int res = SCST_EXEC_NOT_COMPLETED, rc;
1723         struct scst_tgt_dev *tgt_dev_tmp;
1724         struct scst_device *dev;
1725
1726         TRACE_ENTRY();
1727
1728         if (scst_cmd_atomic(cmd)) {
1729                 res = SCST_EXEC_NEED_THREAD;
1730                 goto out;
1731         }
1732
1733         dev = cmd->dev;
1734
1735         if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1736                 scst_block_dev_cmd(cmd, 1);
1737
1738         rc = scst_check_local_events(cmd);
1739         if (unlikely(rc != 0))
1740                 goto out_done;
1741
1742         spin_lock_bh(&dev->dev_lock);
1743
1744         /*
1745          * The device could be RELEASED behind us, if RESERVING session
1746          * is closed (see scst_free_tgt_dev()), but this actually doesn't
1747          * matter, so use lock and no retest for DEV_RESERVED bits again
1748          */
1749         if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1750                 res = SCST_EXEC_COMPLETED;
1751                 cmd->status = 0;
1752                 cmd->msg_status = 0;
1753                 cmd->host_status = DID_OK;
1754                 cmd->driver_status = 0;
1755                 cmd->completed = 1;
1756         } else {
1757                 list_for_each_entry(tgt_dev_tmp,
1758                                     &dev->dev_tgt_dev_list,
1759                                     dev_tgt_dev_list_entry) {
1760                         clear_bit(SCST_TGT_DEV_RESERVED,
1761                                 &tgt_dev_tmp->tgt_dev_flags);
1762                 }
1763                 dev->dev_reserved = 0;
1764         }
1765
1766         spin_unlock_bh(&dev->dev_lock);
1767
1768         if (res == SCST_EXEC_COMPLETED)
1769                 goto out_done;
1770
1771 out:
1772         TRACE_EXIT_RES(res);
1773         return res;
1774
1775 out_done:
1776         res = SCST_EXEC_COMPLETED;
1777         /* Report the result */
1778         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1779         goto out;
1780 }
1781
1782 /* No locks, no IRQ or IRQ-disabled context allowed */
1783 int scst_check_local_events(struct scst_cmd *cmd)
1784 {
1785         int res, rc;
1786         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1787         struct scst_device *dev = cmd->dev;
1788
1789         TRACE_ENTRY();
1790
1791         /*
1792          * There's no race here, because we need to trace commands sent
1793          * *after* dev_double_ua_possible flag was set.
1794          */
1795         if (unlikely(dev->dev_double_ua_possible))
1796                 cmd->double_ua_possible = 1;
1797
1798         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1799                 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1800                 goto out_uncomplete;
1801         }
1802
1803         /* Reserve check before Unit Attention */
1804         if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
1805                               &tgt_dev->tgt_dev_flags))) {
1806                 if (cmd->cdb[0] != INQUIRY &&
1807                     cmd->cdb[0] != REPORT_LUNS &&
1808                     cmd->cdb[0] != RELEASE &&
1809                     cmd->cdb[0] != RELEASE_10 &&
1810                     cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER &&
1811                     (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL ||
1812                      (cmd->cdb[4] & 3)) &&
1813                     cmd->cdb[0] != LOG_SENSE &&
1814                     cmd->cdb[0] != REQUEST_SENSE) {
1815                         scst_set_cmd_error_status(cmd,
1816                                 SAM_STAT_RESERVATION_CONFLICT);
1817                         goto out_complete;
1818                 }
1819         }
1820
1821         /* If we had internal bus reset, set the command error unit attention */
1822         if ((dev->scsi_dev != NULL) &&
1823             unlikely(dev->scsi_dev->was_reset)) {
1824                 if (scst_is_ua_command(cmd)) {
1825                         int done = 0;
1826                         /*
1827                          * Prevent more than 1 cmd to be triggered by
1828                          * was_reset.
1829                          */
1830                         spin_lock_bh(&dev->dev_lock);
1831                         if (dev->scsi_dev->was_reset) {
1832                                 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1833                                 scst_set_cmd_error(cmd,
1834                                           SCST_LOAD_SENSE(scst_sense_reset_UA));
1835                                 /*
1836                                  * It looks like it is safe to clear was_reset
1837                                  * here.
1838                                  */
1839                                 dev->scsi_dev->was_reset = 0;
1840                                 done = 1;
1841                         }
1842                         spin_unlock_bh(&dev->dev_lock);
1843
1844                         if (done)
1845                                 goto out_complete;
1846                 }
1847         }
1848
1849         if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
1850                         &cmd->tgt_dev->tgt_dev_flags))) {
1851                 if (scst_is_ua_command(cmd)) {
1852                         rc = scst_set_pending_UA(cmd);
1853                         if (rc == 0)
1854                                 goto out_complete;
1855                 }
1856         }
1857
1858         res = 0;
1859
1860 out:
1861         TRACE_EXIT_RES(res);
1862         return res;
1863
1864 out_complete:
1865         res = 1;
1866         sBUG_ON(!cmd->completed);
1867         goto out;
1868
1869 out_uncomplete:
1870         res = -1;
1871         goto out;
1872 }
1873 EXPORT_SYMBOL(scst_check_local_events);
1874
1875 /* No locks */
1876 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1877 {
1878         if (slot == NULL)
1879                 goto inc;
1880
1881         /* Optimized for lockless fast path */
1882
1883         TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1884                 atomic_read(slot));
1885
1886         if (!atomic_dec_and_test(slot))
1887                 goto out;
1888
1889         TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1890                 tgt_dev->num_free_sn_slots);
1891         if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
1892                 spin_lock_irq(&tgt_dev->sn_lock);
1893                 if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
1894                         if (tgt_dev->num_free_sn_slots < 0)
1895                                 tgt_dev->cur_sn_slot = slot;
1896                         /*
1897                          * To be in-sync with SIMPLE case in scst_cmd_set_sn()
1898                          */
1899                         smp_mb();
1900                         tgt_dev->num_free_sn_slots++;
1901                         TRACE_SN("Incremented num_free_sn_slots (%d)",
1902                                 tgt_dev->num_free_sn_slots);
1903
1904                 }
1905                 spin_unlock_irq(&tgt_dev->sn_lock);
1906         }
1907
1908 inc:
1909         /*
1910          * No protection of expected_sn is needed, because only one thread
1911          * at time can be here (serialized by sn). Also it is supposed that
1912          * there could not be half-incremented halves.
1913          */
1914         tgt_dev->expected_sn++;
1915         /*
1916          * Write must be before def_cmd_count read to be in sync. with
1917          * scst_post_exec_sn(). See comment in scst_send_for_exec().
1918          */
1919         smp_mb();
1920         TRACE_SN("Next expected_sn: %d", tgt_dev->expected_sn);
1921
1922 out:
1923         return;
1924 }
1925
1926 /* No locks */
1927 static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
1928         bool make_active)
1929 {
1930         /* For HQ commands SN is not set */
1931         bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1932                                cmd->sn_set && !cmd->retry;
1933         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1934         struct scst_cmd *res;
1935
1936         TRACE_ENTRY();
1937
1938         if (inc_expected_sn)
1939                 scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
1940
1941         if (make_active) {
1942                 scst_make_deferred_commands_active(tgt_dev);
1943                 res = NULL;
1944         } else
1945                 res = scst_check_deferred_commands(tgt_dev);
1946
1947         TRACE_EXIT_HRES(res);
1948         return res;
1949 }
1950
1951 /* cmd must be additionally referenced to not die inside */
1952 static int scst_do_real_exec(struct scst_cmd *cmd)
1953 {
1954         int res = SCST_EXEC_NOT_COMPLETED;
1955 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1956         int rc;
1957 #endif
1958         bool atomic = scst_cmd_atomic(cmd);
1959         struct scst_device *dev = cmd->dev;
1960         struct scst_dev_type *handler = dev->handler;
1961         struct io_context *old_ctx = NULL;
1962         bool ctx_changed = false;
1963
1964         TRACE_ENTRY();
1965
1966         if (!atomic)
1967                 ctx_changed = scst_set_io_context(cmd, &old_ctx);
1968
1969         cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
1970
1971         if (handler->exec) {
1972                 if (unlikely(!dev->handler->exec_atomic && atomic)) {
1973                         /*
1974                          * It shouldn't be because of SCST_TGT_DEV_AFTER_*
1975                          * optimization.
1976                          */
1977                         TRACE_DBG("Dev handler %s exec() needs thread "
1978                                 "context, rescheduling", dev->handler->name);
1979                         res = SCST_EXEC_NEED_THREAD;
1980                         goto out_restore;
1981                 }
1982
1983                 TRACE_DBG("Calling dev handler %s exec(%p)",
1984                       handler->name, cmd);
1985                 TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
1986                         cmd->cdb_len);
1987                 scst_set_cur_start(cmd);
1988                 res = handler->exec(cmd);
1989                 TRACE_DBG("Dev handler %s exec() returned %d",
1990                       handler->name, res);
1991
1992                 if (res == SCST_EXEC_COMPLETED)
1993                         goto out_complete;
1994                 else if (res == SCST_EXEC_NEED_THREAD)
1995                         goto out_restore;
1996
1997                 scst_set_exec_time(cmd);
1998
1999                 sBUG_ON(res != SCST_EXEC_NOT_COMPLETED);
2000         }
2001
2002         TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
2003
2004         if (unlikely(dev->scsi_dev == NULL)) {
2005                 PRINT_ERROR("Command for virtual device must be "
2006                         "processed by device handler (LUN %lld)!",
2007                         (long long unsigned int)cmd->lun);
2008                 goto out_error;
2009         }
2010
2011         res = scst_check_local_events(cmd);
2012         if (unlikely(res != 0))
2013                 goto out_done;
2014
2015 #ifndef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
2016         if (unlikely(atomic)) {
2017                 TRACE_DBG("Pass-through exec() can not be called in atomic "
2018                         "context, rescheduling to the thread (handler %s)",
2019                         handler->name);
2020                 res = SCST_EXEC_NEED_THREAD;
2021                 goto out_restore;
2022         }
2023 #endif
2024
2025         scst_set_cur_start(cmd);
2026
2027 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2028         if (unlikely(scst_alloc_request(cmd) != 0)) {
2029                 if (atomic) {
2030                         res = SCST_EXEC_NEED_THREAD;
2031                         goto out_restore;
2032                 } else {
2033                         PRINT_INFO("%s", "Unable to allocate request, "
2034                                 "sending BUSY status");
2035                         goto out_busy;
2036                 }
2037         }
2038
2039         scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
2040                     (void *)cmd->scsi_req->sr_buffer,
2041                     cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
2042                     cmd->retries);
2043 #else
2044 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
2045         rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
2046                         cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
2047                         cmd->timeout, cmd->retries, cmd, scst_cmd_done,
2048                         atomic ? GFP_ATOMIC : GFP_KERNEL);
2049 #else
2050         rc = scst_scsi_exec_async(cmd, scst_cmd_done);
2051 #endif
2052         if (unlikely(rc != 0)) {
2053                 if (atomic) {
2054                         res = SCST_EXEC_NEED_THREAD;
2055                         goto out_restore;
2056                 } else {
2057                         PRINT_ERROR("scst pass-through exec failed: %x", rc);
2058                         goto out_error;
2059                 }
2060         }
2061 #endif
2062
2063 out_complete:
2064         res = SCST_EXEC_COMPLETED;
2065
2066 out_reset_ctx:
2067         if (ctx_changed)
2068                 scst_reset_io_context(cmd->tgt_dev, old_ctx);
2069
2070         TRACE_EXIT();
2071         return res;
2072
2073 out_restore:
2074         scst_set_exec_time(cmd);
2075         /* Restore the state */
2076         cmd->state = SCST_CMD_STATE_REAL_EXEC;
2077         goto out_reset_ctx;
2078
2079 out_error:
2080         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2081         goto out_done;
2082
2083 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2084 out_busy:
2085         scst_set_busy(cmd);
2086         /* go through */
2087 #endif
2088
2089 out_done:
2090         res = SCST_EXEC_COMPLETED;
2091         /* Report the result */
2092         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2093         goto out_complete;
2094 }
2095
2096 static inline int scst_real_exec(struct scst_cmd *cmd)
2097 {
2098         int res;
2099
2100         TRACE_ENTRY();
2101
2102         BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2103         BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2104         BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2105
2106         __scst_cmd_get(cmd);
2107
2108         res = scst_do_real_exec(cmd);
2109
2110         if (likely(res == SCST_EXEC_COMPLETED)) {
2111                 scst_post_exec_sn(cmd, true);
2112                 if (cmd->dev->scsi_dev != NULL)
2113                         generic_unplug_device(
2114                                 cmd->dev->scsi_dev->request_queue);
2115         } else
2116                 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2117
2118         __scst_cmd_put(cmd);
2119
2120         /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2121
2122         TRACE_EXIT_RES(res);
2123         return res;
2124 }
2125
2126 static int scst_do_local_exec(struct scst_cmd *cmd)
2127 {
2128         int res;
2129         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2130
2131         TRACE_ENTRY();
2132
2133         /* Check READ_ONLY device status */
2134         if ((cmd->op_flags & SCST_WRITE_MEDIUM) &&
2135             (tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
2136              cmd->dev->rd_only)) {
2137                 PRINT_WARNING("Attempt of write access to read-only device: "
2138                         "initiator %s, LUN %lld, op %x",
2139                         cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
2140                 scst_set_cmd_error(cmd,
2141                            SCST_LOAD_SENSE(scst_sense_data_protect));
2142                 goto out_done;
2143         }
2144
2145         if (!scst_is_cmd_local(cmd)) {
2146                 res = SCST_EXEC_NOT_COMPLETED;
2147                 goto out;
2148         }
2149
2150         switch (cmd->cdb[0]) {
2151         case MODE_SELECT:
2152         case MODE_SELECT_10:
2153         case LOG_SELECT:
2154                 res = scst_pre_select(cmd);
2155                 break;
2156         case RESERVE:
2157         case RESERVE_10:
2158                 res = scst_reserve_local(cmd);
2159                 break;
2160         case RELEASE:
2161         case RELEASE_10:
2162                 res = scst_release_local(cmd);
2163                 break;
2164         case REPORT_LUNS:
2165                 res = scst_report_luns_local(cmd);
2166                 break;
2167         case REQUEST_SENSE:
2168                 res = scst_request_sense_local(cmd);
2169                 break;
2170         default:
2171                 res = SCST_EXEC_NOT_COMPLETED;
2172                 break;
2173         }
2174
2175 out:
2176         TRACE_EXIT_RES(res);
2177         return res;
2178
2179 out_done:
2180         /* Report the result */
2181         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2182         res = SCST_EXEC_COMPLETED;
2183         goto out;
2184 }
2185
2186 static int scst_local_exec(struct scst_cmd *cmd)
2187 {
2188         int res;
2189
2190         TRACE_ENTRY();
2191
2192         BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2193         BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2194         BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2195
2196         __scst_cmd_get(cmd);
2197
2198         res = scst_do_local_exec(cmd);
2199         if (likely(res == SCST_EXEC_NOT_COMPLETED))
2200                 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2201         else if (res == SCST_EXEC_COMPLETED)
2202                 scst_post_exec_sn(cmd, true);
2203         else
2204                 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2205
2206         __scst_cmd_put(cmd);
2207
2208         /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2209         TRACE_EXIT_RES(res);
2210         return res;
2211 }
2212
2213 static int scst_exec(struct scst_cmd **active_cmd)
2214 {
2215         struct scst_cmd *cmd = *active_cmd;
2216         struct scst_cmd *ref_cmd;
2217         struct scst_device *dev = cmd->dev;
2218         int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
2219
2220         TRACE_ENTRY();
2221
2222         if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2223                 goto out;
2224
2225         /* To protect tgt_dev */
2226         ref_cmd = cmd;
2227         __scst_cmd_get(ref_cmd);
2228
2229         count = 0;
2230         while (1) {
2231                 int rc;
2232
2233                 cmd->sent_for_exec = 1;
2234                 /*
2235                  * To sync with scst_abort_cmd(). The above assignment must
2236                  * be before SCST_CMD_ABORTED test, done later in
2237                  * scst_check_local_events(). It's far from here, so the order
2238                  * is virtually guaranteed, but let's have it just in case.
2239                  */
2240                 smp_mb();
2241
2242                 cmd->scst_cmd_done = scst_cmd_done_local;
2243                 cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
2244
2245                 rc = scst_do_local_exec(cmd);
2246                 if (likely(rc == SCST_EXEC_NOT_COMPLETED))
2247                         /* Nothing to do */;
2248                 else if (rc == SCST_EXEC_NEED_THREAD) {
2249                         TRACE_DBG("%s", "scst_do_local_exec() requested "
2250                                 "thread context, rescheduling");
2251                         scst_dec_on_dev_cmd(cmd);
2252                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2253                         break;
2254                 } else {
2255                         sBUG_ON(rc != SCST_EXEC_COMPLETED);
2256                         goto done;
2257                 }
2258
2259                 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2260
2261                 rc = scst_do_real_exec(cmd);
2262                 if (likely(rc == SCST_EXEC_COMPLETED))
2263                         /* Nothing to do */;
2264                 else if (rc == SCST_EXEC_NEED_THREAD) {
2265                         TRACE_DBG("scst_real_exec() requested thread "
2266                                 "context, rescheduling (cmd %p)", cmd);
2267                         scst_dec_on_dev_cmd(cmd);
2268                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2269                         break;
2270                 } else
2271                         sBUG();
2272
2273 done:
2274                 count++;
2275
2276                 cmd = scst_post_exec_sn(cmd, false);
2277                 if (cmd == NULL)
2278                         break;
2279
2280                 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2281                         break;
2282
2283                 __scst_cmd_put(ref_cmd);
2284                 ref_cmd = cmd;
2285                 __scst_cmd_get(ref_cmd);
2286         }
2287
2288         *active_cmd = cmd;
2289
2290         if (count == 0)
2291                 goto out_put;
2292
2293         if (dev->scsi_dev != NULL)
2294                 generic_unplug_device(dev->scsi_dev->request_queue);
2295
2296 out_put:
2297         __scst_cmd_put(ref_cmd);
2298         /* !! At this point sess, dev and tgt_dev can be already freed !! */
2299
2300 out:
2301         TRACE_EXIT_RES(res);
2302         return res;
2303 }
2304
2305 static int scst_send_for_exec(struct scst_cmd **active_cmd)
2306 {
2307         int res;
2308         struct scst_cmd *cmd = *active_cmd;
2309         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2310         typeof(tgt_dev->expected_sn) expected_sn;
2311
2312         TRACE_ENTRY();
2313
2314         if (unlikely(cmd->internal))
2315                 goto exec;
2316
2317         if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2318                 goto exec;
2319
2320         sBUG_ON(!cmd->sn_set);
2321
2322         expected_sn = tgt_dev->expected_sn;
2323         /* Optimized for lockless fast path */
2324         if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2325                 spin_lock_irq(&tgt_dev->sn_lock);
2326
2327                 tgt_dev->def_cmd_count++;
2328                 /*
2329                  * Memory barrier is needed here to implement lockless fast
2330                  * path. We need the exact order of read and write between
2331                  * def_cmd_count and expected_sn. Otherwise, we can miss case,
2332                  * when expected_sn was changed to be equal to cmd->sn while
2333                  * we are queuing cmd the deferred list after the expected_sn
2334                  * below. It will lead to a forever stuck command. But with
2335                  * the barrier in such case __scst_check_deferred_commands()
2336                  * will be called and it will take sn_lock, so we will be
2337                  * synchronized.
2338                  */
2339                 smp_mb();
2340
2341                 expected_sn = tgt_dev->expected_sn;
2342                 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2343                         if (unlikely(test_bit(SCST_CMD_ABORTED,
2344                                               &cmd->cmd_flags))) {
2345                                 /* Necessary to allow aborting out of sn cmds */
2346                                 TRACE_MGMT_DBG("Aborting out of sn cmd %p "
2347                                         "(tag %llu, sn %u)", cmd,
2348                                         (long long unsigned)cmd->tag, cmd->sn);
2349                                 tgt_dev->def_cmd_count--;
2350                                 scst_set_cmd_abnormal_done_state(cmd);
2351                                 res = SCST_CMD_STATE_RES_CONT_SAME;
2352                         } else {
2353                                 TRACE_SN("Deferring cmd %p (sn=%d, set %d, "
2354                                         "expected_sn=%d)", cmd, cmd->sn,
2355                                         cmd->sn_set, expected_sn);
2356                                 list_add_tail(&cmd->sn_cmd_list_entry,
2357                                               &tgt_dev->deferred_cmd_list);
2358                                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2359                         }
2360                         spin_unlock_irq(&tgt_dev->sn_lock);
2361                         goto out;
2362                 } else {
2363                         TRACE_SN("Somebody incremented expected_sn %d, "
2364                                 "continuing", expected_sn);
2365                         tgt_dev->def_cmd_count--;
2366                         spin_unlock_irq(&tgt_dev->sn_lock);
2367                 }
2368         }
2369
2370 exec:
2371         res = scst_exec(active_cmd);
2372
2373 out:
2374         TRACE_EXIT_HRES(res);
2375         return res;
2376 }
2377
2378 /* No locks supposed to be held */
2379 static int scst_check_sense(struct scst_cmd *cmd)
2380 {
2381         int res = 0;
2382         struct scst_device *dev = cmd->dev;
2383
2384         TRACE_ENTRY();
2385
2386         if (unlikely(cmd->ua_ignore))
2387                 goto out;
2388
2389         /* If we had internal bus reset behind us, set the command error UA */
2390         if ((dev->scsi_dev != NULL) &&
2391             unlikely(cmd->host_status == DID_RESET) &&
2392             scst_is_ua_command(cmd)) {
2393                 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2394                       dev->scsi_dev->was_reset, cmd->host_status);
2395                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
2396                 /* It looks like it is safe to clear was_reset here */
2397                 dev->scsi_dev->was_reset = 0;
2398         }
2399
2400         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2401             SCST_SENSE_VALID(cmd->sense)) {
2402                 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2403                         cmd->sense_valid_len);
2404
2405                 /* Check Unit Attention Sense Key */
2406                 if (scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
2407                         if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2408                                         SCST_SENSE_ASC_VALID,
2409                                         0, SCST_SENSE_ASC_UA_RESET, 0)) {
2410                                 if (cmd->double_ua_possible) {
2411                                         TRACE(TRACE_MGMT_MINOR, "Double UA "
2412                                                 "detected for device %p", dev);
2413                                         TRACE(TRACE_MGMT_MINOR, "Retrying cmd"
2414                                                 " %p (tag %llu)", cmd,
2415                                                 (long long unsigned)cmd->tag);
2416
2417                                         cmd->status = 0;
2418                                         cmd->msg_status = 0;
2419                                         cmd->host_status = DID_OK;
2420                                         cmd->driver_status = 0;
2421
2422                                         mempool_free(cmd->sense,
2423                                                      scst_sense_mempool);
2424                                         cmd->sense = NULL;
2425
2426                                         scst_check_restore_sg_buff(cmd);
2427
2428                                         sBUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
2429                                         cmd->data_direction =
2430                                                 cmd->dbl_ua_orig_data_direction;
2431                                         cmd->resp_data_len =
2432                                                 cmd->dbl_ua_orig_resp_data_len;
2433
2434                                         cmd->state = SCST_CMD_STATE_REAL_EXEC;
2435                                         cmd->retry = 1;
2436                                         res = 1;
2437                                         goto out;
2438                                 }
2439                         }
2440                         scst_dev_check_set_UA(dev, cmd, cmd->sense,
2441                                 cmd->sense_valid_len);
2442                 }
2443         }
2444
2445         if (unlikely(cmd->double_ua_possible)) {
2446                 if (scst_is_ua_command(cmd)) {
2447                         TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
2448                                 "cmd %p)", dev, cmd);
2449                         /*
2450                          * Lock used to protect other flags in the bitfield
2451                          * (just in case, actually). Those flags can't be
2452                          * changed in parallel, because the device is
2453                          * serialized.
2454                          */
2455                         spin_lock_bh(&dev->dev_lock);
2456                         dev->dev_double_ua_possible = 0;
2457                         spin_unlock_bh(&dev->dev_lock);
2458                 }
2459         }
2460
2461 out:
2462         TRACE_EXIT_RES(res);
2463         return res;
2464 }
2465
2466 static int scst_check_auto_sense(struct scst_cmd *cmd)
2467 {
2468         int res = 0;
2469
2470         TRACE_ENTRY();
2471
2472         if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2473             (!SCST_SENSE_VALID(cmd->sense) ||
2474              SCST_NO_SENSE(cmd->sense))) {
2475                 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2476                       "cmd->status=%x, cmd->msg_status=%x, "
2477                       "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
2478                       cmd->status, cmd->msg_status, cmd->host_status,
2479                       cmd->driver_status, cmd);
2480                 res = 1;
2481         } else if (unlikely(cmd->host_status)) {
2482                 if ((cmd->host_status == DID_REQUEUE) ||
2483                     (cmd->host_status == DID_IMM_RETRY) ||
2484                     (cmd->host_status == DID_SOFT_ERROR) ||
2485                     (cmd->host_status == DID_ABORT)) {
2486                         scst_set_busy(cmd);
2487                 } else {
2488                         TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2489                                 "received, returning HARDWARE ERROR instead "
2490                                 "(cmd %p)", cmd->host_status, cmd);
2491                         scst_set_cmd_error(cmd,
2492                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
2493                 }
2494         }
2495
2496         TRACE_EXIT_RES(res);
2497         return res;
2498 }
2499
2500 static int scst_pre_dev_done(struct scst_cmd *cmd)
2501 {
2502         int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2503
2504         TRACE_ENTRY();
2505
2506         if (unlikely(scst_check_auto_sense(cmd))) {
2507                 PRINT_INFO("Command finished with CHECK CONDITION, but "
2508                             "without sense data (opcode 0x%x), issuing "
2509                             "REQUEST SENSE", cmd->cdb[0]);
2510                 rc = scst_prepare_request_sense(cmd);
2511                 if (rc == 0)
2512                         res = SCST_CMD_STATE_RES_CONT_NEXT;
2513                 else {
2514                         PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
2515                                     "returning HARDWARE ERROR");
2516                         scst_set_cmd_error(cmd,
2517                                 SCST_LOAD_SENSE(scst_sense_hardw_error));
2518                 }
2519                 goto out;
2520         } else if (unlikely(scst_check_sense(cmd)))
2521                 goto out;
2522
2523         if (likely(scsi_status_is_good(cmd->status))) {
2524                 unsigned char type = cmd->dev->type;
2525                 if (unlikely((cmd->cdb[0] == MODE_SENSE ||
2526                               cmd->cdb[0] == MODE_SENSE_10)) &&
2527                     (cmd->tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
2528                      cmd->dev->rd_only) &&
2529                     (type == TYPE_DISK ||
2530                      type == TYPE_WORM ||
2531                      type == TYPE_MOD ||
2532                      type == TYPE_TAPE)) {
2533                         int32_t length;
2534                         uint8_t *address;
2535                         bool err = false;
2536
2537                         length = scst_get_buf_first(cmd, &address);
2538                         if (length < 0) {
2539                                 PRINT_ERROR("%s", "Unable to get "
2540                                         "MODE_SENSE buffer");
2541                                 scst_set_cmd_error(cmd,
2542                                         SCST_LOAD_SENSE(
2543                                                 scst_sense_hardw_error));
2544                                 err = true;
2545                         } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2546                                 address[2] |= 0x80;   /* Write Protect*/
2547                         else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2548                                 address[3] |= 0x80;   /* Write Protect*/
2549                         scst_put_buf(cmd, address);
2550
2551                         if (err)
2552                                 goto out;
2553                 }
2554
2555                 /*
2556                  * Check and clear NormACA option for the device, if necessary,
2557                  * since we don't support ACA
2558                  */
2559                 if (unlikely((cmd->cdb[0] == INQUIRY)) &&
2560                     /* Std INQUIRY data (no EVPD) */
2561                     !(cmd->cdb[1] & SCST_INQ_EVPD) &&
2562                     (cmd->resp_data_len > SCST_INQ_BYTE3)) {
2563                         uint8_t *buffer;
2564                         int buflen;
2565                         bool err = false;
2566
2567                         /* ToDo: all pages ?? */
2568                         buflen = scst_get_buf_first(cmd, &buffer);
2569                         if (buflen > SCST_INQ_BYTE3) {
2570 #ifdef CONFIG_SCST_EXTRACHECKS
2571                                 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2572                                         PRINT_INFO("NormACA set for device: "
2573                                             "lun=%lld, type 0x%02x. Clear it, "
2574                                             "since it's unsupported.",
2575                                             (long long unsigned int)cmd->lun,
2576                                             buffer[0]);
2577                                 }
2578 #endif
2579                                 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2580                         } else if (buflen != 0) {
2581                                 PRINT_ERROR("%s", "Unable to get INQUIRY "
2582                                     "buffer");
2583                                 scst_set_cmd_error(cmd,
2584                                        SCST_LOAD_SENSE(scst_sense_hardw_error));
2585                                 err = true;
2586                         }
2587                         if (buflen > 0)
2588                                 scst_put_buf(cmd, buffer);
2589
2590                         if (err)
2591                                 goto out;
2592                 }
2593
2594                 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2595                     (cmd->cdb[0] == MODE_SELECT_10) ||
2596                     (cmd->cdb[0] == LOG_SELECT))) {
2597                         TRACE(TRACE_SCSI,
2598                                 "MODE/LOG SELECT succeeded (LUN %lld)",
2599                                 (long long unsigned int)cmd->lun);
2600                         cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2601                         goto out;
2602                 }
2603         } else {
2604                 if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
2605                         if (!test_bit(SCST_TGT_DEV_RESERVED,
2606                                         &cmd->tgt_dev->tgt_dev_flags)) {
2607                                 struct scst_tgt_dev *tgt_dev_tmp;
2608                                 struct scst_device *dev = cmd->dev;
2609
2610                                 TRACE(TRACE_SCSI,
2611                                         "Real RESERVE failed lun=%lld, "
2612                                         "status=%x",
2613                                         (long long unsigned int)cmd->lun,
2614                                         cmd->status);
2615                                 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2616                                         cmd->sense_valid_len);
2617
2618                                 /* Clearing the reservation */
2619                                 spin_lock_bh(&dev->dev_lock);
2620                                 list_for_each_entry(tgt_dev_tmp,
2621                                                     &dev->dev_tgt_dev_list,
2622                                                     dev_tgt_dev_list_entry) {
2623                                         clear_bit(SCST_TGT_DEV_RESERVED,
2624                                                 &tgt_dev_tmp->tgt_dev_flags);
2625                                 }
2626                                 dev->dev_reserved = 0;
2627                                 spin_unlock_bh(&dev->dev_lock);
2628                         }
2629                 }
2630
2631                 /* Check for MODE PARAMETERS CHANGED UA */
2632                 if ((cmd->dev->scsi_dev != NULL) &&
2633                     (cmd->status == SAM_STAT_CHECK_CONDITION) &&
2634                     scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
2635                     scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2636                                         SCST_SENSE_ASCx_VALID,
2637                                         0, 0x2a, 0x01)) {
2638                         TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun "
2639                                 "%lld)", (long long unsigned int)cmd->lun);
2640                         cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2641                         goto out;
2642                 }
2643         }
2644
2645         cmd->state = SCST_CMD_STATE_DEV_DONE;
2646
2647 out:
2648         TRACE_EXIT_RES(res);
2649         return res;
2650 }
2651
2652 static int scst_mode_select_checks(struct scst_cmd *cmd)
2653 {
2654         int res = SCST_CMD_STATE_RES_CONT_SAME;
2655         int atomic = scst_cmd_atomic(cmd);
2656
2657         TRACE_ENTRY();
2658
2659         if (likely(scsi_status_is_good(cmd->status))) {
2660                 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2661                     (cmd->cdb[0] == MODE_SELECT_10) ||
2662                     (cmd->cdb[0] == LOG_SELECT))) {
2663                         struct scst_device *dev = cmd->dev;
2664                         int sl;
2665                         uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
2666
2667                         if (atomic && (dev->scsi_dev != NULL)) {
2668                                 TRACE_DBG("%s", "MODE/LOG SELECT: thread "
2669                                         "context required");
2670                                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2671                                 goto out;
2672                         }
2673
2674                         TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2675                                 "setting the SELECT UA (lun=%lld)",
2676                                 (long long unsigned int)cmd->lun);
2677
2678                         spin_lock_bh(&dev->dev_lock);
2679                         if (cmd->cdb[0] == LOG_SELECT) {
2680                                 sl = scst_set_sense(sense_buffer,
2681                                         sizeof(sense_buffer),
2682                                         dev->d_sense,
2683                                         UNIT_ATTENTION, 0x2a, 0x02);
2684                         } else {
2685                                 sl = scst_set_sense(sense_buffer,
2686                                         sizeof(sense_buffer),
2687                                         dev->d_sense,
2688                                         UNIT_ATTENTION, 0x2a, 0x01);
2689                         }
2690                         scst_dev_check_set_local_UA(dev, cmd, sense_buffer, sl);
2691                         spin_unlock_bh(&dev->dev_lock);
2692
2693                         if (dev->scsi_dev != NULL)
2694                                 scst_obtain_device_parameters(dev);
2695                 }
2696         } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
2697                     scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
2698                      /* mode parameters changed */
2699                     (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2700                                         SCST_SENSE_ASCx_VALID,
2701                                         0, 0x2a, 0x01) ||
2702                      scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2703                                         SCST_SENSE_ASC_VALID,
2704                                         0, 0x29, 0) /* reset */ ||
2705                      scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2706                                         SCST_SENSE_ASC_VALID,
2707                                         0, 0x28, 0) /* medium changed */ ||
2708                      /* cleared by another ini (just in case) */
2709                      scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2710                                         SCST_SENSE_ASC_VALID,
2711                                         0, 0x2F, 0))) {
2712                 if (atomic) {
2713                         TRACE_DBG("Possible parameters changed UA %x: "
2714                                 "thread context required", cmd->sense[12]);
2715                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2716                         goto out;
2717                 }
2718
2719                 TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
2720                         "(LUN %lld): getting new parameters", cmd->sense[12],
2721                         (long long unsigned int)cmd->lun);
2722
2723                 scst_obtain_device_parameters(cmd->dev);
2724         } else
2725                 sBUG();
2726
2727         cmd->state = SCST_CMD_STATE_DEV_DONE;
2728
2729 out:
2730         TRACE_EXIT_HRES(res);
2731         return res;
2732 }
2733
2734 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
2735 {
2736         if (likely(cmd->sn_set))
2737                 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
2738
2739         scst_make_deferred_commands_active(cmd->tgt_dev);
2740 }
2741
2742 static int scst_dev_done(struct scst_cmd *cmd)
2743 {
2744         int res = SCST_CMD_STATE_RES_CONT_SAME;
2745         int state;
2746         struct scst_device *dev = cmd->dev;
2747
2748         TRACE_ENTRY();
2749
2750         state = SCST_CMD_STATE_PRE_XMIT_RESP;
2751
2752         if (likely(!scst_is_cmd_fully_local(cmd)) &&
2753             likely(dev->handler->dev_done != NULL)) {
2754                 int rc;
2755
2756                 if (unlikely(!dev->handler->dev_done_atomic &&
2757                              scst_cmd_atomic(cmd))) {
2758                         /*
2759                          * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2760                          * optimization.
2761                          */
2762                         TRACE_DBG("Dev handler %s dev_done() needs thread "
2763                               "context, rescheduling", dev->handler->name);
2764                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2765                         goto out;
2766                 }
2767
2768                 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2769                         dev->handler->name, cmd);
2770                 scst_set_cur_start(cmd);
2771                 rc = dev->handler->dev_done(cmd);
2772                 scst_set_dev_done_time(cmd);
2773                 TRACE_DBG("Dev handler %s dev_done() returned %d",
2774                       dev->handler->name, rc);
2775                 if (rc != SCST_CMD_STATE_DEFAULT)
2776                         state = rc;
2777         }
2778
2779         switch (state) {
2780 #ifdef CONFIG_SCST_EXTRACHECKS
2781         case SCST_CMD_STATE_PRE_XMIT_RESP:
2782         case SCST_CMD_STATE_DEV_PARSE:
2783         case SCST_CMD_STATE_PRE_PARSE:
2784         case SCST_CMD_STATE_PREPARE_SPACE:
2785         case SCST_CMD_STATE_RDY_TO_XFER:
2786         case SCST_CMD_STATE_TGT_PRE_EXEC:
2787         case SCST_CMD_STATE_SEND_FOR_EXEC:
2788         case SCST_CMD_STATE_LOCAL_EXEC:
2789         case SCST_CMD_STATE_REAL_EXEC:
2790         case SCST_CMD_STATE_PRE_DEV_DONE:
2791         case SCST_CMD_STATE_MODE_SELECT_CHECKS:
2792         case SCST_CMD_STATE_DEV_DONE:
2793         case SCST_CMD_STATE_XMIT_RESP:
2794         case SCST_CMD_STATE_FINISHED:
2795         case SCST_CMD_STATE_FINISHED_INTERNAL:
2796 #else
2797         default:
2798 #endif
2799                 cmd->state = state;
2800                 break;
2801         case SCST_CMD_STATE_NEED_THREAD_CTX:
2802                 TRACE_DBG("Dev handler %s dev_done() requested "
2803                       "thread context, rescheduling",
2804                       dev->handler->name);
2805                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2806                 break;
2807 #ifdef CONFIG_SCST_EXTRACHECKS
2808         default:
2809                 if (state >= 0) {
2810                         PRINT_ERROR("Dev handler %s dev_done() returned "
2811                                 "invalid cmd state %d",
2812                                 dev->handler->name, state);
2813                 } else {
2814                         PRINT_ERROR("Dev handler %s dev_done() returned "
2815                                 "error %d", dev->handler->name,
2816                                 state);
2817                 }
2818                 scst_set_cmd_error(cmd,
2819                            SCST_LOAD_SENSE(scst_sense_hardw_error));
2820                 scst_set_cmd_abnormal_done_state(cmd);
2821                 break;
2822 #endif
2823         }
2824
2825         if (cmd->needs_unblocking)
2826                 scst_unblock_dev_cmd(cmd);
2827
2828         if (likely(cmd->dec_on_dev_needed))
2829                 scst_dec_on_dev_cmd(cmd);
2830
2831         if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
2832                 scst_inc_check_expected_sn(cmd);
2833
2834         if (unlikely(cmd->internal))
2835                 cmd->state = SCST_CMD_STATE_FINISHED_INTERNAL;
2836
2837 out:
2838         TRACE_EXIT_HRES(res);
2839         return res;
2840 }
2841
2842 static int scst_pre_xmit_response(struct scst_cmd *cmd)
2843 {
2844         int res;
2845
2846         TRACE_ENTRY();
2847
2848         EXTRACHECKS_BUG_ON(cmd->internal);
2849
2850 #ifdef CONFIG_SCST_DEBUG_TM
2851         if (cmd->tm_dbg_delayed &&
2852                         !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2853                 if (scst_cmd_atomic(cmd)) {
2854                         TRACE_MGMT_DBG("%s",
2855                                 "DEBUG_TM delayed cmd needs a thread");
2856                         res = SCST_CMD_STATE_RES_NEED_THREAD;
2857                         return res;
2858                 }
2859                 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2860                         cmd, cmd->tag);
2861                 schedule_timeout_uninterruptible(HZ);
2862         }
2863 #endif
2864
2865         if (likely(cmd->tgt_dev != NULL)) {
2866                 /*
2867                  * Those counters protect from not getting too long processing
2868                  * latency, so we should decrement them after cmd completed.
2869                  */
2870                 atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
2871                 atomic_dec(&cmd->dev->dev_cmd_count);
2872 #ifdef CONFIG_SCST_ORDERED_READS
2873                 /* If expected values not set, expected direction is UNKNOWN */
2874                 if (cmd->expected_data_direction & SCST_DATA_WRITE)
2875                         atomic_dec(&cmd->dev->write_cmd_count);
2876 #endif
2877                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2878                         scst_on_hq_cmd_response(cmd);
2879
2880                 if (unlikely(!cmd->sent_for_exec)) {
2881                         TRACE_SN("cmd %p was not sent to mid-lev"
2882                                 " (sn %d, set %d)",
2883                                 cmd, cmd->sn, cmd->sn_set);
2884                         scst_unblock_deferred(cmd->tgt_dev, cmd);
2885                         cmd->sent_for_exec = 1;
2886                 }
2887         }
2888
2889         cmd->done = 1;
2890         smp_mb(); /* to sync with scst_abort_cmd() */
2891
2892         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2893                 scst_xmit_process_aborted_cmd(cmd);
2894         else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION))
2895                 scst_store_sense(cmd);
2896
2897         if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2898                 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu),"
2899                                 " skipping",
2900                                 cmd, (long long unsigned int)cmd->tag);
2901                 cmd->state = SCST_CMD_STATE_FINISHED;
2902                 res = SCST_CMD_STATE_RES_CONT_SAME;
2903                 goto out;
2904         }
2905
2906         cmd->state = SCST_CMD_STATE_XMIT_RESP;
2907         res = SCST_CMD_STATE_RES_CONT_SAME;
2908
2909 out:
2910         TRACE_EXIT_HRES(res);
2911         return res;
2912 }
2913
2914 static int scst_xmit_response(struct scst_cmd *cmd)
2915 {
2916         struct scst_tgt_template *tgtt = cmd->tgtt;
2917         int res, rc;
2918
2919         TRACE_ENTRY();
2920
2921         EXTRACHECKS_BUG_ON(cmd->internal);
2922
2923         if (unlikely(!tgtt->xmit_response_atomic &&
2924                      scst_cmd_atomic(cmd))) {
2925                 /*
2926                  * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2927                  * optimization.
2928                  */
2929                 TRACE_DBG("Target driver %s xmit_response() needs thread "
2930                               "context, rescheduling", tgtt->name);
2931                 res = SCST_CMD_STATE_RES_NEED_THREAD;
2932                 goto out;
2933         }
2934
2935         while (1) {
2936                 int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
2937
2938                 res = SCST_CMD_STATE_RES_CONT_NEXT;
2939                 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2940
2941                 TRACE_DBG("Calling xmit_response(%p)", cmd);
2942
2943 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2944                 if (trace_flag & TRACE_SND_BOT) {
2945                         int i;
2946                         struct scatterlist *sg;
2947                         if (cmd->tgt_sg != NULL)
2948                                 sg = cmd->tgt_sg;
2949                         else
2950                                 sg = cmd->sg;
2951                         if (sg != NULL) {
2952                                 TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
2953                                         "(sg_cnt %d, sg %p, sg[0].page %p)",
2954                                         cmd, cmd->tgt_sg_cnt, sg,
2955                                         (void *)sg_page(&sg[0]));
2956                                 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
2957                                         PRINT_BUFF_FLAG(TRACE_SND_BOT,
2958                                                 "Xmitting sg", sg_virt(&sg[i]),
2959                                                 sg[i].length);
2960                                 }
2961                         }
2962                 }
2963 #endif
2964
2965                 if (tgtt->on_hw_pending_cmd_timeout != NULL) {
2966                         struct scst_session *sess = cmd->sess;
2967                         cmd->hw_pending_start = jiffies;
2968                         cmd->cmd_hw_pending = 1;
2969                         if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
2970                                 TRACE_DBG("Sched HW pending work for sess %p "
2971                                         "(max time %d)", sess,
2972                                         tgtt->max_hw_pending_time);
2973                                 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
2974                                         &sess->sess_aflags);
2975                                 schedule_delayed_work(&sess->hw_pending_work,
2976                                         tgtt->max_hw_pending_time * HZ);
2977                         }
2978                 }
2979
2980                 scst_set_cur_start(cmd);
2981
2982 #ifdef CONFIG_SCST_DEBUG_RETRY
2983                 if (((scst_random() % 100) == 77))
2984                         rc = SCST_TGT_RES_QUEUE_FULL;
2985                 else
2986 #endif
2987                         rc = tgtt->xmit_response(cmd);
2988                 TRACE_DBG("xmit_response() returned %d", rc);
2989
2990                 if (likely(rc == SCST_TGT_RES_SUCCESS))
2991                         goto out;
2992
2993                 scst_set_xmit_time(cmd);
2994
2995                 cmd->cmd_hw_pending = 0;
2996
2997                 /* Restore the previous state */
2998                 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2999
3000                 switch (rc) {
3001                 case SCST_TGT_RES_QUEUE_FULL:
3002                         if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
3003                                 break;
3004                         else
3005                                 continue;
3006
3007                 case SCST_TGT_RES_NEED_THREAD_CTX:
3008                         TRACE_DBG("Target driver %s xmit_response() "
3009                               "requested thread context, rescheduling",
3010                               tgtt->name);
3011                         res = SCST_CMD_STATE_RES_NEED_THREAD;
3012                         break;
3013
3014                 default:
3015                         goto out_error;
3016                 }
3017                 break;
3018         }
3019
3020 out:
3021         /* Caution: cmd can be already dead here */
3022         TRACE_EXIT_HRES(res);
3023         return res;
3024
3025 out_error:
3026         if (rc == SCST_TGT_RES_FATAL_ERROR) {
3027                 PRINT_ERROR("Target driver %s xmit_response() returned "
3028                         "fatal error", tgtt->name);
3029         } else {
3030                 PRINT_ERROR("Target driver %s xmit_response() returned "
3031                         "invalid value %d", tgtt->name, rc);
3032         }
3033         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
3034         cmd->state = SCST_CMD_STATE_FINISHED;
3035         res = SCST_CMD_STATE_RES_CONT_SAME;
3036         goto out;
3037 }
3038
3039 void scst_tgt_cmd_done(struct scst_cmd *cmd,
3040         enum scst_exec_context pref_context)
3041 {
3042         TRACE_ENTRY();
3043
3044         sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
3045
3046         scst_set_xmit_time(cmd);
3047
3048         cmd->cmd_hw_pending = 0;
3049
3050         cmd->state = SCST_CMD_STATE_FINISHED;
3051         scst_process_redirect_cmd(cmd, pref_context, 1);
3052
3053         TRACE_EXIT();
3054         return;
3055 }
3056 EXPORT_SYMBOL(scst_tgt_cmd_done);
3057
3058 static int scst_finish_cmd(struct scst_cmd *cmd)
3059 {
3060         int res;
3061         struct scst_session *sess = cmd->sess;
3062
3063         TRACE_ENTRY();
3064
3065         scst_update_lat_stats(cmd);
3066
3067         if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
3068                 if ((cmd->tgt_dev != NULL) &&
3069                     scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
3070                         /* This UA delivery failed, so we need to requeue it */
3071                         if (scst_cmd_atomic(cmd) &&
3072                             scst_is_ua_global(cmd->sense, cmd->sense_valid_len)) {
3073                                 TRACE_MGMT_DBG("Requeuing of global UA for "
3074                                         "failed cmd %p needs a thread", cmd);
3075                                 res = SCST_CMD_STATE_RES_NEED_THREAD;
3076                                 goto out;
3077                         }
3078                         scst_requeue_ua(cmd);
3079                 }
3080         }
3081
3082         atomic_dec(&sess->sess_cmd_count);
3083
3084         spin_lock_irq(&sess->sess_list_lock);
3085         list_del(&cmd->sess_cmd_list_entry);
3086         spin_unlock_irq(&sess->sess_list_lock);
3087
3088         cmd->finished = 1;
3089         smp_mb(); /* to sync with scst_abort_cmd() */
3090
3091         if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
3092                 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
3093                         "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3094                         atomic_read(&scst_cmd_count));
3095
3096                 scst_finish_cmd_mgmt(cmd);
3097         }
3098
3099         __scst_cmd_put(cmd);
3100
3101         res = SCST_CMD_STATE_RES_CONT_NEXT;
3102
3103 out:
3104         TRACE_EXIT_HRES(res);
3105         return res;
3106 }
3107
3108 /*
3109  * No locks, but it must be externally serialized (see comment for
3110  * scst_cmd_init_done() in scst.h)
3111  */
3112 static void scst_cmd_set_sn(struct scst_cmd *cmd)
3113 {
3114         struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3115         unsigned long flags;
3116
3117         TRACE_ENTRY();
3118
3119         if (scst_is_implicit_hq(cmd)) {
3120                 TRACE_SN("Implicit HQ cmd %p", cmd);
3121                 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3122         }
3123
3124         EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
3125
3126         /* Optimized for lockless fast path */
3127
3128         scst_check_debug_sn(cmd);
3129
3130         if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
3131                 /*
3132                  * Not the best way, but good enough until there is a
3133                  * possibility to specify queue type during pass-through
3134                  * commands submission.
3135                  */
3136                 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3137         }
3138
3139         switch (cmd->queue_type) {
3140         case SCST_CMD_QUEUE_SIMPLE:
3141         case SCST_CMD_QUEUE_UNTAGGED:
3142 #ifdef CONFIG_SCST_ORDERED_READS
3143                 if (scst_cmd_is_expected_set(cmd)) {
3144                         if ((cmd->expected_data_direction == SCST_DATA_READ) &&
3145                             (atomic_read(&cmd->dev->write_cmd_count) == 0))
3146                                 goto ordered;
3147                 } else
3148                         goto ordered;
3149 #endif
3150                 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
3151                         /*
3152                          * atomic_inc_return() implies memory barrier to sync
3153                          * with scst_inc_expected_sn()
3154                          */
3155                         if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
3156                                 tgt_dev->curr_sn++;
3157                                 TRACE_SN("Incremented curr_sn %d",
3158                                         tgt_dev->curr_sn);
3159                         }
3160                         cmd->sn_slot = tgt_dev->cur_sn_slot;
3161                         cmd->sn = tgt_dev->curr_sn;
3162
3163                         tgt_dev->prev_cmd_ordered = 0;
3164                 } else {
3165                         TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
3166                                 "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
3167                         goto ordered;
3168                 }
3169                 break;
3170
3171         case SCST_CMD_QUEUE_ORDERED:
3172                 TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
3173 ordered:
3174                 if (!tgt_dev->prev_cmd_ordered) {
3175                         spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3176                         if (tgt_dev->num_free_sn_slots >= 0) {
3177                                 tgt_dev->num_free_sn_slots--;
3178                                 if (tgt_dev->num_free_sn_slots >= 0) {
3179                                         int i = 0;
3180                                         /* Commands can finish in any order, so
3181                                          * we don't know which slot is empty.
3182                                          */
3183                                         while (1) {
3184                                                 tgt_dev->cur_sn_slot++;
3185                                                 if (tgt_dev->cur_sn_slot ==
3186                                                       tgt_dev->sn_slots + ARRAY_SIZE(tgt_dev->sn_slots))
3187                                                         tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
3188
3189                                                 if (atomic_read(tgt_dev->cur_sn_slot) == 0)
3190                                                         break;
3191
3192                                                 i++;
3193                                                 sBUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
3194                                         }
3195                                         TRACE_SN("New cur SN slot %zd",
3196                                                 tgt_dev->cur_sn_slot -
3197                                                 tgt_dev->sn_slots);
3198                                 }
3199                         }
3200                         spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3201                 }
3202                 tgt_dev->prev_cmd_ordered = 1;
3203                 tgt_dev->curr_sn++;
3204                 cmd->sn = tgt_dev->curr_sn;
3205                 break;
3206
3207         case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
3208                 TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
3209                 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3210                 tgt_dev->hq_cmd_count++;
3211                 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3212                 cmd->hq_cmd_inced = 1;
3213                 goto out;
3214
3215         default:
3216                 sBUG();
3217         }
3218
3219         TRACE_SN("cmd(%p)->sn: %d (tgt_dev %p, *cur_sn_slot %d, "
3220                 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
3221                 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
3222                 atomic_read(tgt_dev->cur_sn_slot),
3223                 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
3224                 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
3225
3226         cmd->sn_set = 1;
3227
3228 out:
3229         TRACE_EXIT();
3230         return;
3231 }
3232
3233 /*
3234  * Returns 0 on success, > 0 when we need to wait for unblock,
3235  * < 0 if there is no device (lun) or device type handler.
3236  *
3237  * No locks, but might be on IRQ, protection is done by the
3238  * suspended activity.
3239  */
3240 static int scst_translate_lun(struct scst_cmd *cmd)
3241 {
3242         struct scst_tgt_dev *tgt_dev = NULL;
3243         int res;
3244
3245         TRACE_ENTRY();
3246
3247         /* See comment about smp_mb() in scst_suspend_activity() */
3248         __scst_get(1);
3249
3250         if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
3251                 struct list_head *sess_tgt_dev_list_head =
3252                         &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
3253                 TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
3254                         (long long unsigned int)cmd->lun);
3255                 res = -1;
3256                 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3257                                 sess_tgt_dev_list_entry) {
3258                         if (tgt_dev->lun == cmd->lun) {
3259                                 TRACE_DBG("tgt_dev %p found", tgt_dev);
3260
3261                                 if (unlikely(tgt_dev->dev->handler ==
3262                                                 &scst_null_devtype)) {
3263                                         PRINT_INFO("Dev handler for device "
3264                                           "%lld is NULL, the device will not "
3265                                           "be visible remotely",
3266                                            (long long unsigned int)cmd->lun);
3267                                         break;
3268                                 }
3269
3270                                 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
3271                                 cmd->tgt_dev = tgt_dev;
3272                                 cmd->dev = tgt_dev->dev;
3273
3274                                 res = 0;
3275                                 break;
3276                         }
3277                 }
3278                 if (res != 0) {
3279                         TRACE(TRACE_MINOR,
3280                                 "tgt_dev for LUN %lld not found, command to "
3281                                 "unexisting LU?",
3282                                 (long long unsigned int)cmd->lun);
3283                         __scst_put();
3284                 }
3285         } else {
3286                 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3287                 __scst_put();
3288                 res = 1;
3289         }
3290
3291         TRACE_EXIT_RES(res);
3292         return res;
3293 }
3294
3295 /*
3296  * No locks, but might be on IRQ
3297  *
3298  * Returns 0 on success, > 0 when we need to wait for unblock,
3299  * < 0 if there is no device (lun) or device type handler.
3300  */
3301 static int __scst_init_cmd(struct scst_cmd *cmd)
3302 {
3303         int res = 0;
3304
3305         TRACE_ENTRY();
3306
3307         res = scst_translate_lun(cmd);
3308         if (likely(res == 0)) {
3309                 int cnt;
3310                 bool failure = false;
3311
3312                 cmd->state = SCST_CMD_STATE_PRE_PARSE;
3313
3314                 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
3315                 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
3316                         TRACE(TRACE_FLOW_CONTROL,
3317                                 "Too many pending commands (%d) in "
3318                                 "session, returning BUSY to initiator \"%s\"",
3319                                 cnt, (cmd->sess->initiator_name[0] == '\0') ?
3320                                   "Anonymous" : cmd->sess->initiator_name);
3321                         failure = true;
3322                 }
3323
3324                 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
3325                 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
3326                         if (!failure) {
3327                                 TRACE(TRACE_FLOW_CONTROL,
3328                                         "Too many pending device "
3329                                         "commands (%d), returning BUSY to "
3330                                         "initiator \"%s\"", cnt,
3331                                         (cmd->sess->initiator_name[0] == '\0') ?
3332                                                 "Anonymous" :
3333                                                 cmd->sess->initiator_name);
3334                                 failure = true;
3335                         }
3336                 }
3337
3338 #ifdef CONFIG_SCST_ORDERED_READS
3339                 /* If expected values not set, expected direction is UNKNOWN */
3340                 if (cmd->expected_data_direction & SCST_DATA_WRITE)
3341                         atomic_inc(&cmd->dev->write_cmd_count);
3342 #endif
3343
3344                 if (unlikely(failure))
3345                         goto out_busy;
3346
3347                 if (!cmd->set_sn_on_restart_cmd)
3348                         scst_cmd_set_sn(cmd);
3349         } else if (res < 0) {
3350                 TRACE_DBG("Finishing cmd %p", cmd);
3351                 scst_set_cmd_error(cmd,
3352                            SCST_LOAD_SENSE(scst_sense_lun_not_supported));
3353                 scst_set_cmd_abnormal_done_state(cmd);
3354         } else
3355                 goto out;
3356
3357 out:
3358         TRACE_EXIT_RES(res);
3359         return res;
3360
3361 out_busy:
3362         scst_set_busy(cmd);
3363         scst_set_cmd_abnormal_done_state(cmd);
3364         goto out;
3365 }
3366
3367 /* Called under scst_init_lock and IRQs disabled */
3368 static void scst_do_job_init(void)
3369         __releases(&scst_init_lock)
3370         __acquires(&scst_init_lock)
3371 {
3372         struct scst_cmd *cmd;
3373         int susp;
3374
3375         TRACE_ENTRY();
3376
3377 restart:
3378         /*
3379          * There is no need for read barrier here, because we don't care where
3380          * this check will be done.
3381          */
3382         susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
3383         if (scst_init_poll_cnt > 0)
3384                 scst_init_poll_cnt--;
3385
3386         list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
3387                 int rc;
3388                 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
3389                         continue;
3390                 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3391                         spin_unlock_irq(&scst_init_lock);
3392                         rc = __scst_init_cmd(cmd);
3393                         spin_lock_irq(&scst_init_lock);
3394                         if (rc > 0) {
3395                                 TRACE_MGMT_DBG("%s",
3396                                         "FLAG SUSPENDED set, restarting");
3397                                 goto restart;
3398                         }
3399                 } else {
3400                         TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
3401                                        cmd, (long long unsigned int)cmd->tag);
3402                         scst_set_cmd_abnormal_done_state(cmd);
3403                 }
3404
3405                 /*
3406                  * Deleting cmd from init cmd list after __scst_init_cmd()
3407                  * is necessary to keep the check in scst_init_cmd() correct
3408                  * to preserve the commands order.
3409                  *
3410                  * We don't care about the race, when init cmd list is empty
3411                  * and one command detected that it just was not empty, so
3412                  * it's inserting to it, but another command at the same time
3413                  * seeing init cmd list empty and goes directly, because it
3414                  * could affect only commands from the same initiator to the
3415                  * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
3416                  * the order in case of simultaneous such calls anyway.
3417                  */
3418                 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
3419                 smp_wmb(); /* enforce the required order */
3420                 list_del(&cmd->cmd_list_entry);
3421                 spin_unlock(&scst_init_lock);
3422
3423                 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3424                 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
3425                 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3426                         list_add(&cmd->cmd_list_entry,
3427                                 &cmd->cmd_lists->active_cmd_list);
3428                 else
3429                         list_add_tail(&cmd->cmd_list_entry,
3430                                 &cmd->cmd_lists->active_cmd_list);
3431                 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3432                 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3433
3434                 spin_lock(&scst_init_lock);
3435                 goto restart;
3436         }
3437
3438         /* It isn't really needed, but let's keep it */
3439         if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3440                 goto restart;
3441
3442         TRACE_EXIT();
3443         return;
3444 }
3445
3446 static inline int test_init_cmd_list(void)
3447 {
3448         int res = (!list_empty(&scst_init_cmd_list) &&
3449                    !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3450                   unlikely(kthread_should_stop()) ||
3451                   (scst_init_poll_cnt > 0);
3452         return res;
3453 }
3454
3455 int scst_init_thread(void *arg)
3456 {
3457         TRACE_ENTRY();
3458
3459         PRINT_INFO("Init thread started, PID %d", current->pid);
3460
3461         current->flags |= PF_NOFREEZE;
3462
3463         set_user_nice(current, -10);
3464
3465         spin_lock_irq(&scst_init_lock);
3466         while (!kthread_should_stop()) {
3467                 wait_queue_t wait;
3468                 init_waitqueue_entry(&wait, current);
3469
3470                 if (!test_init_cmd_list()) {
3471                         add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
3472                                                  &wait);
3473                         for (;;) {
3474                                 set_current_state(TASK_INTERRUPTIBLE);
3475                                 if (test_init_cmd_list())
3476                                         break;
3477                                 spin_unlock_irq(&scst_init_lock);
3478                                 schedule();
3479                                 spin_lock_irq(&scst_init_lock);
3480                         }
3481                         set_current_state(TASK_RUNNING);
3482                         remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
3483                 }
3484                 scst_do_job_init();
3485         }
3486         spin_unlock_irq(&scst_init_lock);
3487
3488         /*
3489          * If kthread_should_stop() is true, we are guaranteed to be
3490          * on the module unload, so scst_init_cmd_list must be empty.
3491          */
3492         sBUG_ON(!list_empty(&scst_init_cmd_list));
3493
3494         PRINT_INFO("Init thread PID %d finished", current->pid);
3495
3496         TRACE_EXIT();
3497   &