Fixed the following checkpatch complaints:
[mirror/scst/.git] / qla2x00t / qla2x00-target / qla2x00t.c
1 /*
2  *  qla2x00t.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
7  *  Copyright (C) 2006 - 2009 ID7 Ltd.
8  *
9  *  QLogic 22xx/23xx/24xx/25xx FC target driver.
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation, version 2
14  *  of the License.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  *  GNU General Public License for more details.
20  */
21
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/types.h>
25 #include <linux/version.h>
26 #include <linux/blkdev.h>
27 #include <linux/interrupt.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <linux/pci.h>
31 #include <linux/delay.h>
32 #include <linux/seq_file.h>
33 #include <linux/list.h>
34
35 #include <scst.h>
36
37 #include "qla2x00t.h"
38
39 #ifndef CONFIG_SCSI_QLA2XXX_TARGET
40 #error "CONFIG_SCSI_QLA2XXX_TARGET is NOT DEFINED"
41 #endif
42
43 #ifdef CONFIG_SCST_DEBUG
44 #define Q2T_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE | TRACE_PID | \
45         TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_MINOR | \
46         TRACE_MGMT_DEBUG | TRACE_MINOR | TRACE_SPECIAL)
47 #else
48 # ifdef CONFIG_SCST_TRACING
49 #define Q2T_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MINOR | \
50         TRACE_SPECIAL)
51 # endif
52 #endif
53
54 static int q2x_target_detect(struct scst_tgt_template *templ);
55 static int q24_target_detect(struct scst_tgt_template *templ);
56 static int q2t_target_release(struct scst_tgt *scst_tgt);
57 static int q2x_xmit_response(struct scst_cmd *scst_cmd);
58 static int q24_xmit_response(struct scst_cmd *scst_cmd);
59 static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd);
60 static void q2t_on_free_cmd(struct scst_cmd *scst_cmd);
61 static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *mcmd);
62
63 /* Predefs for callbacks handed to qla2xxx(target) */
64 static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *pkt);
65 static void q2t_response_pkt(scsi_qla_host_t *ha, response_t *pkt);
66 static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha,
67         uint16_t *mailbox);
68 static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle);
69 static int q2t_host_action(scsi_qla_host_t *ha,
70         qla2x_tgt_host_action_t action);
71 static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport);
72 static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport);
73 static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
74         int lun_size, int fn, void *iocb, int flags);
75 static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
76         atio_entry_t *atio, int ha_locked);
77 static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
78         atio7_entry_t *atio, int ha_locked);
79 static void q2t_reject_free_srr_imm(scsi_qla_host_t *ha, struct srr_imm *imm,
80         int ha_lock);
81 static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset);
82 static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only);
83 static void q2t_on_hw_pending_cmd_timeout(struct scst_cmd *scst_cmd);
84 static int q2t_unreg_sess(struct q2t_sess *sess);
85
86 /*
87  * Global Variables
88  */
89
90 static struct scst_tgt_template tgt2x_template = {
91         .name = "qla2x00tgt",
92         .sg_tablesize = 0,
93         .use_clustering = 1,
94 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
95         .xmit_response_atomic = 0,
96         .rdy_to_xfer_atomic = 0,
97 #else
98         .xmit_response_atomic = 1,
99         .rdy_to_xfer_atomic = 1,
100 #endif
101 #if SCST_VERSION_CODE >= SCST_VERSION(1, 0, 2, 0)
102         .max_hw_pending_time = Q2T_MAX_HW_PENDING_TIME,
103 #endif
104         .detect = q2x_target_detect,
105         .release = q2t_target_release,
106         .xmit_response = q2x_xmit_response,
107         .rdy_to_xfer = q2t_rdy_to_xfer,
108         .on_free_cmd = q2t_on_free_cmd,
109         .task_mgmt_fn_done = q2t_task_mgmt_fn_done,
110 #if SCST_VERSION_CODE >= SCST_VERSION(1, 0, 2, 0)
111         .on_hw_pending_cmd_timeout = q2t_on_hw_pending_cmd_timeout,
112 #endif
113 };
114
115 static struct scst_tgt_template tgt24_template = {
116         .name = "qla24xx-tgt",
117         .sg_tablesize = 0,
118         .use_clustering = 1,
119         .no_proc_entry = 1,
120 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
121         .xmit_response_atomic = 0,
122         .rdy_to_xfer_atomic = 0,
123 #else
124         .xmit_response_atomic = 1,
125         .rdy_to_xfer_atomic = 1,
126 #endif
127 #if SCST_VERSION_CODE >= SCST_VERSION(1, 0, 2, 0)
128         .max_hw_pending_time = Q2T_MAX_HW_PENDING_TIME,
129 #endif
130         .detect = q24_target_detect,
131         .release = q2t_target_release,
132         .xmit_response = q24_xmit_response,
133         .rdy_to_xfer = q2t_rdy_to_xfer,
134         .on_free_cmd = q2t_on_free_cmd,
135         .task_mgmt_fn_done = q2t_task_mgmt_fn_done,
136 #if SCST_VERSION_CODE >= SCST_VERSION(1, 0, 2, 0)
137         .on_hw_pending_cmd_timeout = q2t_on_hw_pending_cmd_timeout,
138 #endif
139 };
140
141 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
142 #define trace_flag q2t_trace_flag
143 static unsigned long q2t_trace_flag = Q2T_DEFAULT_LOG_FLAGS;
144 #endif
145
146 static struct kmem_cache *q2t_cmd_cachep;
147 static struct qla_target tgt_data;
148 static struct kmem_cache *q2t_mgmt_cmd_cachep;
149 static mempool_t *q2t_mgmt_cmd_mempool;
150
151 static DECLARE_RWSEM(q2t_unreg_rwsem);
152
153 /* It's not yet supported */
154 static inline int scst_cmd_get_ppl_offset(struct scst_cmd *scst_cmd)
155 {
156         return 0;
157 }
158
159 /* ha->hardware_lock supposed to be held on entry */
160 static inline void q2t_sess_get(struct q2t_sess *sess)
161 {
162         sess->sess_ref++;
163         TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref);
164 }
165
166 /* ha->hardware_lock supposed to be held on entry */
167 static inline void q2t_sess_put(struct q2t_sess *sess)
168 {
169         TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref-1);
170         sBUG_ON(sess->sess_ref == 0);
171
172         sess->sess_ref--;
173         if (sess->sess_ref == 0)
174                 q2t_unreg_sess(sess);
175 }
176
177 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
178 static inline struct q2t_sess *q2t_find_sess_by_loop_id(struct q2t_tgt *tgt,
179         uint16_t lid)
180 {
181         struct q2t_sess *sess;
182         sBUG_ON(tgt == NULL);
183         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
184                 if (lid == (sess->loop_id))
185                         return sess;
186         }
187         return NULL;
188 }
189
190 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
191 static inline struct q2t_sess *q2t_find_sess_by_s_id(struct q2t_tgt *tgt,
192         const uint8_t *s_id)
193 {
194         struct q2t_sess *sess;
195         sBUG_ON(tgt == NULL);
196         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
197                 if ((sess->s_id.b.al_pa == s_id[2]) &&
198                     (sess->s_id.b.area == s_id[1]) &&
199                     (sess->s_id.b.domain == s_id[0]))
200                         return sess;
201         }
202         return NULL;
203 }
204
205 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
206 static inline struct q2t_sess *q2t_find_sess_by_s_id_le(struct q2t_tgt *tgt,
207         const uint8_t *s_id)
208 {
209         struct q2t_sess *sess;
210         sBUG_ON(tgt == NULL);
211         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
212                 if ((sess->s_id.b.al_pa == s_id[0]) &&
213                     (sess->s_id.b.area == s_id[1]) &&
214                     (sess->s_id.b.domain == s_id[2]))
215                         return sess;
216         }
217         return NULL;
218 }
219
220
221 /* ha->hardware_lock supposed to be held on entry */
222 static inline void q2t_exec_queue(scsi_qla_host_t *ha)
223 {
224         tgt_data.isp_cmd(ha);
225 }
226
227 /* Might release hw lock, then reaquire!! */
228 static inline int q2t_issue_marker(scsi_qla_host_t *ha, int ha_locked)
229 {
230         /* Send marker if required */
231         if (unlikely(ha->marker_needed != 0)) {
232                 int rc = tgt_data.issue_marker(ha, ha_locked);
233                 if (rc != QLA_SUCCESS) {
234                         PRINT_ERROR("qla2x00tgt(%ld): issue_marker() "
235                                 "failed", ha->instance);
236                 }
237                 return rc;
238         }
239         return QLA_SUCCESS;
240 }
241
242 /*
243  * Registers with initiator driver (but target mode isn't enabled till
244  * it's turned on via sysfs)
245  */
246 static int q2x_target_detect(struct scst_tgt_template *templ)
247 {
248         int res;
249         struct qla_tgt_initiator itd = {
250                 .magic = QLA2X_TARGET_MAGIC,
251                 .tgt24_atio_pkt = q24_atio_pkt,
252                 .tgt_response_pkt = q2t_response_pkt,
253                 .tgt2x_ctio_completion = q2x_ctio_completion,
254                 .tgt_async_event = q2t_async_event,
255                 .tgt_host_action = q2t_host_action,
256                 .tgt_fc_port_added = q2t_fc_port_added,
257                 .tgt_fc_port_deleted = q2t_fc_port_deleted,
258         };
259
260         TRACE_ENTRY();
261
262         res = qla2xxx_tgt_register_driver(&itd, &tgt_data);
263         if (res != 0) {
264                 PRINT_ERROR("Unable to register driver: %d", res);
265                 goto out;
266         }
267
268         if (tgt_data.magic != QLA2X_INITIATOR_MAGIC) {
269                 PRINT_ERROR("Wrong version of the initiator part: %d",
270                             tgt_data.magic);
271                 res = -EINVAL;
272         }
273
274         PRINT_INFO("%s", "Target mode driver for QLogic 2x00 controller "
275                 "registered successfully");
276
277 out:
278         TRACE_EXIT();
279         return res;
280 }
281
282 static int q24_target_detect(struct scst_tgt_template *templ)
283 {
284         /* Nothing to do */
285         return 0;
286 }
287
288 static void q2t_free_session_done(struct scst_session *scst_sess)
289 {
290         struct q2t_sess *sess;
291         struct q2t_tgt *tgt;
292         scsi_qla_host_t *ha;
293         unsigned long flags;
294
295         TRACE_ENTRY();
296
297         sBUG_ON(scst_sess == NULL);
298         sess = (struct q2t_sess *)scst_sess_get_tgt_priv(scst_sess);
299         sBUG_ON(sess == NULL);
300         tgt = sess->tgt;
301
302         TRACE_MGMT_DBG("Unregistration of sess %p finished", sess);
303
304         kfree(sess);
305
306         if (tgt == NULL)
307                 goto out;
308
309         TRACE_DBG("empty(sess_list) %d sess_count %d",
310               list_empty(&tgt->sess_list), tgt->sess_count);
311
312         ha = tgt->ha;
313
314         /*
315          * We need to protect against race, when tgt is freed before or
316          * inside wake_up()
317          */
318         spin_lock_irqsave(&ha->hardware_lock, flags);
319         tgt->sess_count--;
320         if (tgt->sess_count == 0)
321                 wake_up_all(&tgt->waitQ);
322         spin_unlock_irqrestore(&ha->hardware_lock, flags);
323
324 out:
325         TRACE_EXIT();
326         return;
327 }
328
329 /* ha->hardware_lock supposed to be held on entry */
330 static int q2t_unreg_sess(struct q2t_sess *sess)
331 {
332         int res = 1;
333
334         TRACE_ENTRY();
335
336         sBUG_ON(sess == NULL);
337         sBUG_ON(sess->sess_ref != 0);
338
339         TRACE_MGMT_DBG("Deleting sess %p from tgt %p", sess, sess->tgt);
340         list_del(&sess->sess_list_entry);
341
342         if (sess->deleted)
343                 list_del(&sess->del_list_entry);
344
345         PRINT_INFO("qla2x00tgt(%ld): %ssession for loop_id %d deleted",
346                 sess->tgt->ha->instance, sess->local ? "local " : "",
347                 sess->loop_id);
348
349         scst_unregister_session(sess->scst_sess, 0, q2t_free_session_done);
350
351         TRACE_EXIT_RES(res);
352         return res;
353 }
354
355 /* ha->hardware_lock supposed to be held on entry */
356 static int q2t_reset(scsi_qla_host_t *ha, void *iocb, int mcmd)
357 {
358         struct q2t_sess *sess;
359         int loop_id;
360         uint16_t lun = 0;
361         int res = 0;
362
363         TRACE_ENTRY();
364
365         if (IS_FWI2_CAPABLE(ha)) {
366                 notify24xx_entry_t *n = (notify24xx_entry_t *)iocb;
367                 loop_id = le16_to_cpu(n->nport_handle);
368         } else
369                 loop_id = GET_TARGET_ID(ha, (notify_entry_t *)iocb);
370
371         if (loop_id == 0xFFFF) {
372                 /* Global event */
373                 q2t_clear_tgt_db(ha->tgt, 1);
374                 if (!list_empty(&ha->tgt->sess_list)) {
375                         sess = list_entry(ha->tgt->sess_list.next,
376                                 typeof(*sess), sess_list_entry);
377                         switch (mcmd) {
378                         case Q2T_NEXUS_LOSS_SESS:
379                                 mcmd = Q2T_NEXUS_LOSS;
380                                 break;
381
382                         case Q2T_ABORT_ALL_SESS:
383                                 mcmd = Q2T_ABORT_ALL;
384                                 break;
385
386                         case Q2T_NEXUS_LOSS:
387                         case Q2T_ABORT_ALL:
388                                 break;
389
390                         default:
391                                 PRINT_ERROR("qla2x00tgt(%ld): Not allowed "
392                                         "command %x in %s", ha->instance,
393                                         mcmd, __func__);
394                                 sess = NULL;
395                                 break;
396                         }
397                 } else
398                         sess = NULL;
399         } else
400                 sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
401
402         if (sess == NULL) {
403                 res = -ESRCH;
404                 ha->tgt->tm_to_unknown = 1;
405                 goto out;
406         }
407
408         TRACE_MGMT_DBG("scsi(%ld): resetting (session %p, "
409                 "mcmd %x, loop_id %d)", ha->host_no, sess, mcmd, loop_id);
410
411         res = q2t_issue_task_mgmt(sess, (uint8_t *)&lun, sizeof(lun),
412                         mcmd, iocb, Q24_MGMT_SEND_NACK);
413
414 out:
415         TRACE_EXIT_RES(res);
416         return res;
417 }
418
419 /* ha->hardware_lock supposed to be held on entry */
420 static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only)
421 {
422         struct q2t_sess *sess, *sess_tmp;
423
424         TRACE_ENTRY();
425
426         TRACE(TRACE_MGMT, "Clearing targets DB %p", tgt);
427
428         list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list,
429                                         sess_list_entry) {
430                 if (local_only && !sess->local)
431                         continue;
432                 if (local_only && sess->local)
433                         TRACE_MGMT_DBG("Putting local session %p", sess);
434                 q2t_sess_put(sess);
435         }
436
437         /* At this point tgt could be already dead */
438
439         TRACE_MGMT_DBG("Finished clearing tgt %p DB", tgt);
440
441         TRACE_EXIT();
442         return;
443 }
444
445 /* Called in a thread context */
446 static void q2t_alloc_session_done(struct scst_session *scst_sess,
447                                    void *data, int result)
448 {
449         TRACE_ENTRY();
450
451         if (result != 0) {
452                 struct q2t_sess *sess = (struct q2t_sess *)data;
453                 struct q2t_tgt *tgt = sess->tgt;
454                 scsi_qla_host_t *ha = tgt->ha;
455                 unsigned long flags;
456
457                 PRINT_INFO("qla2x00tgt(%ld): Session initialization failed",
458                            ha->instance);
459
460                 spin_lock_irqsave(&ha->hardware_lock, flags);
461                 q2t_sess_put(sess);
462                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
463         }
464
465         TRACE_EXIT();
466         return;
467 }
468
469 static void q2t_del_sess_timer_fn(unsigned long arg)
470 {
471         struct q2t_tgt *tgt = (struct q2t_tgt *)arg;
472         scsi_qla_host_t *ha = tgt->ha;
473         struct q2t_sess *sess;
474         unsigned long flags;
475
476         TRACE_ENTRY();
477
478         spin_lock_irqsave(&ha->hardware_lock, flags);
479         while (!list_empty(&tgt->del_sess_list)) {
480                 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
481                                 del_list_entry);
482                 if (time_after_eq(jiffies, sess->expires)) {
483                         /*
484                          * sess will be deleted from del_sess_list in
485                          * q2t_unreg_sess()
486                          */
487                         TRACE_MGMT_DBG("Timeout: sess %p about to be deleted",
488                                 sess);
489                         q2t_sess_put(sess);
490                 } else {
491                         tgt->sess_del_timer.expires = sess->expires;
492                         add_timer(&tgt->sess_del_timer);
493                         break;
494                 }
495         }
496         spin_unlock_irqrestore(&ha->hardware_lock, flags);
497
498         TRACE_EXIT();
499         return;
500 }
501
502 /*
503  * Must be called under tgt_mutex.
504  *
505  * Adds an extra ref to allow to drop hw lock after adding sess to the list.
506  * Caller must put it.
507  */
508 static struct q2t_sess *q2t_create_sess(scsi_qla_host_t *ha, fc_port_t *fcport,
509         bool local)
510 {
511         char *wwn_str;
512         const int wwn_str_len = 3*WWN_SIZE+2;
513         struct q2t_tgt *tgt = ha->tgt;
514         struct q2t_sess *sess;
515
516         TRACE_ENTRY();
517
518         /* Check to avoid double sessions */
519         spin_lock_irq(&ha->hardware_lock);
520         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
521                 if ((sess->port_name[0] == fcport->port_name[0]) &&
522                     (sess->port_name[1] == fcport->port_name[1]) &&
523                     (sess->port_name[2] == fcport->port_name[2]) &&
524                     (sess->port_name[3] == fcport->port_name[3]) &&
525                     (sess->port_name[4] == fcport->port_name[4]) &&
526                     (sess->port_name[5] == fcport->port_name[5]) &&
527                     (sess->port_name[6] == fcport->port_name[6]) &&
528                     (sess->port_name[7] == fcport->port_name[7])) {
529                         TRACE_MGMT_DBG("Double sess %p found (s_id %x:%x:%x, "
530                                 "loop_id %d), updating to d_id %x:%x:%x, "
531                                 "loop_id %d", sess, sess->s_id.b.al_pa,
532                                 sess->s_id.b.area, sess->s_id.b.domain,
533                                 sess->loop_id, fcport->d_id.b.al_pa,
534                                 fcport->d_id.b.area, fcport->d_id.b.domain,
535                                 fcport->loop_id);
536
537                         if (sess->deleted) {
538                                 list_del(&sess->del_list_entry);
539                                 sess->deleted = 0;
540                         }
541
542                         sess->s_id = fcport->d_id;
543                         sess->loop_id = fcport->loop_id;
544                         sess->conf_compl_supported = fcport->conf_compl_supported;
545                         if (sess->local && !local)
546                                 sess->local = false;
547                         spin_unlock_irq(&ha->hardware_lock);
548                         goto out;
549                 }
550         }
551         spin_unlock_irq(&ha->hardware_lock);
552
553         /* We are under tgt_mutex, so a new sess can't be added behind us */
554
555         sess = kzalloc(sizeof(*sess), GFP_KERNEL);
556         if (sess == NULL) {
557                 PRINT_ERROR("qla2x00tgt(%ld): session allocation failed, "
558                         "all commands from port %02x:%02x:%02x:%02x:"
559                         "%02x:%02x:%02x:%02x will be refused", ha->instance,
560                         fcport->port_name[0], fcport->port_name[1],
561                         fcport->port_name[2], fcport->port_name[3],
562                         fcport->port_name[4], fcport->port_name[5],
563                         fcport->port_name[6], fcport->port_name[7]);
564                 goto out;
565         }
566
567         sess->sess_ref = 2; /* plus 1 extra ref, see above */
568         sess->tgt = ha->tgt;
569         sess->s_id = fcport->d_id;
570         sess->loop_id = fcport->loop_id;
571         sess->conf_compl_supported = fcport->conf_compl_supported;
572         sess->local = local;
573         BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
574         memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
575
576         wwn_str = kmalloc(wwn_str_len, GFP_KERNEL);
577         if (wwn_str == NULL) {
578                 PRINT_ERROR("qla2x00tgt(%ld): Allocation of wwn_str failed. "
579                         "All commands from port %02x:%02x:%02x:%02x:%02x:%02x:"
580                         "%02x:%02x will be refused", ha->instance,
581                         fcport->port_name[0], fcport->port_name[1],
582                         fcport->port_name[2], fcport->port_name[3],
583                         fcport->port_name[4], fcport->port_name[5],
584                         fcport->port_name[6], fcport->port_name[7]);
585                 goto out_free_sess;
586         }
587
588         sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
589                 fcport->port_name[0], fcport->port_name[1],
590                 fcport->port_name[2], fcport->port_name[3],
591                 fcport->port_name[4], fcport->port_name[5],
592                 fcport->port_name[6], fcport->port_name[7]);
593
594         /* Let's do the session creation async'ly */
595         sess->scst_sess = scst_register_session(tgt->scst_tgt, 1, wwn_str,
596                 sess, q2t_alloc_session_done);
597
598         if (sess->scst_sess == NULL) {
599                 PRINT_CRIT_ERROR("qla2x00tgt(%ld): scst_register_session() "
600                         "failed for host %ld (wwn %s, loop_id %d), all "
601                         "commands from it will be refused", ha->instance,
602                         ha->host_no, wwn_str, fcport->loop_id);
603                 goto out_free_sess_wwn;
604         }
605         scst_sess_set_tgt_priv(sess->scst_sess, sess);
606
607         spin_lock_irq(&ha->hardware_lock);
608         TRACE_MGMT_DBG("Adding sess %p to tgt %p", sess, tgt);
609         list_add_tail(&sess->sess_list_entry, &tgt->sess_list);
610         tgt->sess_count++;
611         spin_unlock_irq(&ha->hardware_lock);
612
613         PRINT_INFO("qla2x00tgt(%ld): %ssession for wwn %s (loop_id %d, "
614                 "s_id %x:%x:%x, confirmed completion %ssupported) added",
615                 ha->instance, local ? "local " : "", wwn_str, fcport->loop_id,
616                 sess->s_id.b.al_pa, sess->s_id.b.area, sess->s_id.b.domain,
617                 sess->conf_compl_supported ? "" : "not ");
618
619         kfree(wwn_str);
620
621 out:
622         TRACE_EXIT_HRES(sess);
623         return sess;
624
625 out_free_sess_wwn:
626         kfree(wwn_str);
627         /* go through */
628
629 out_free_sess:
630         kfree(sess);
631         sess = NULL;
632         goto out;
633 }
634
635 static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport)
636 {
637         struct q2t_tgt *tgt;
638         struct q2t_sess *sess;
639
640         TRACE_ENTRY();
641
642         mutex_lock(&ha->tgt_mutex);
643
644         tgt = ha->tgt;
645
646         if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
647                 goto out_unlock;
648
649         if (tgt->tgt_shutdown)
650                 goto out_unlock;
651
652         spin_lock_irq(&ha->hardware_lock);
653
654         sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
655         if (sess == NULL) {
656                 spin_unlock_irq(&ha->hardware_lock);
657                 sess = q2t_create_sess(ha, fcport, false);
658                 spin_lock_irq(&ha->hardware_lock);
659                 if (sess != NULL)
660                         q2t_sess_put(sess); /* put the extra creation ref */
661         } else {
662                 if (sess->deleted) {
663                         list_del(&sess->del_list_entry);
664                         sess->deleted = 0;
665
666                         PRINT_INFO("qla2x00tgt(%ld): session for port %02x:"
667                                 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
668                                 "reappeared", ha->instance, fcport->port_name[0],
669                                 fcport->port_name[1], fcport->port_name[2],
670                                 fcport->port_name[3], fcport->port_name[4],
671                                 fcport->port_name[5], fcport->port_name[6],
672                                 fcport->port_name[7], sess->loop_id);
673                         TRACE_MGMT_DBG("Appeared sess %p", sess);
674                 } else if (sess->local) {
675                         TRACE(TRACE_MGMT, "qla2x00tgt(%ld): local session for "
676                                 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
677                                 "(loop ID %d) became global", ha->instance,
678                                 fcport->port_name[0], fcport->port_name[1],
679                                 fcport->port_name[2], fcport->port_name[3],
680                                 fcport->port_name[4], fcport->port_name[5],
681                                 fcport->port_name[6], fcport->port_name[7],
682                                 sess->loop_id);
683                 }
684                 sess->local = 0;
685         }
686
687         spin_unlock_irq(&ha->hardware_lock);
688
689 out_unlock:
690         mutex_unlock(&ha->tgt_mutex);
691
692         TRACE_EXIT();
693         return;
694 }
695
696 static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport)
697 {
698         struct q2t_tgt *tgt;
699         struct q2t_sess *sess;
700         uint32_t dev_loss_tmo;
701
702         TRACE_ENTRY();
703
704         mutex_lock(&ha->tgt_mutex);
705
706         tgt = ha->tgt;
707
708         if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
709                 goto out_unlock;
710
711         dev_loss_tmo = ha->port_down_retry_count + 5;
712
713         if (tgt->tgt_shutdown)
714                 goto out_unlock;
715
716         spin_lock_irq(&ha->hardware_lock);
717
718         sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
719         if (sess == NULL)
720                 goto out_unlock_ha;
721
722         if (!sess->deleted) {
723                 int add_tmr;
724
725                 add_tmr = list_empty(&tgt->del_sess_list);
726
727                 TRACE_MGMT_DBG("Scheduling sess %p to deletion", sess);
728                 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
729                 sess->deleted = 1;
730
731                 PRINT_INFO("qla2x00tgt(%ld): %ssession for port %02x:%02x:%02x:"
732                         "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
733                         "deletion in %d secs", ha->instance,
734                         sess->local ? "local " : "",
735                         fcport->port_name[0], fcport->port_name[1],
736                         fcport->port_name[2], fcport->port_name[3],
737                         fcport->port_name[4], fcport->port_name[5],
738                         fcport->port_name[6], fcport->port_name[7],
739                         sess->loop_id, dev_loss_tmo);
740
741                 sess->expires = jiffies + dev_loss_tmo * HZ;
742                 if (add_tmr)
743                         mod_timer(&tgt->sess_del_timer, sess->expires);
744         }
745
746 out_unlock_ha:
747         spin_unlock_irq(&ha->hardware_lock);
748
749 out_unlock:
750         mutex_unlock(&ha->tgt_mutex);
751
752         TRACE_EXIT();
753         return;
754 }
755
756 static inline int test_tgt_sess_count(struct q2t_tgt *tgt)
757 {
758         unsigned long flags;
759         int res;
760
761         /*
762          * We need to protect against race, when tgt is freed before or
763          * inside wake_up()
764          */
765         spin_lock_irqsave(&tgt->ha->hardware_lock, flags);
766         TRACE_DBG("tgt %p, empty(sess_list)=%d sess_count=%d",
767               tgt, list_empty(&tgt->sess_list), tgt->sess_count);
768         res = (tgt->sess_count == 0);
769         spin_unlock_irqrestore(&tgt->ha->hardware_lock, flags);
770
771         return res;
772 }
773
774 /* Must be called under read locked q2t_unreg_rwsem */
775 static int q2t_target_release(struct scst_tgt *scst_tgt)
776 {
777         int res = 0;
778         struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
779         scsi_qla_host_t *ha = tgt->ha;
780
781         TRACE_ENTRY();
782
783         /*
784          * Mutex needed to sync with q2t_fc_port_[added,deleted].
785          * Lock is needed, because we still can get an incoming packet.
786          */
787
788         mutex_lock(&ha->tgt_mutex);
789         spin_lock_irq(&ha->hardware_lock);
790         tgt->tgt_shutdown = 1;
791         q2t_clear_tgt_db(tgt, false);
792         spin_unlock_irq(&ha->hardware_lock);
793         mutex_unlock(&ha->tgt_mutex);
794
795         del_timer_sync(&tgt->sess_del_timer);
796
797         TRACE_MGMT_DBG("Waiting for sess works (tgt %p)", tgt);
798         spin_lock_irq(&tgt->sess_work_lock);
799         while (!list_empty(&tgt->sess_works_list)) {
800                 spin_unlock_irq(&tgt->sess_work_lock);
801                 flush_scheduled_work();
802                 spin_lock_irq(&tgt->sess_work_lock);
803         }
804         spin_unlock_irq(&tgt->sess_work_lock);
805
806         TRACE_MGMT_DBG("Waiting for tgt %p: list_empty(sess_list)=%d "
807                 "sess_count=%d", tgt, list_empty(&tgt->sess_list),
808                 tgt->sess_count);
809
810         wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
811
812         /* Big hammer */
813         if (!ha->host_shutting_down)
814                 tgt_data.disable_tgt_mode(ha);
815
816         /* Wait for sessions to clear out (just in case) */
817         wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
818
819         TRACE_MGMT_DBG("Waiting for %d IRQ commands to complete (tgt %p)",
820                 tgt->irq_cmd_count, tgt);
821
822         mutex_lock(&ha->tgt_mutex);
823         spin_lock_irq(&ha->hardware_lock);
824         while (tgt->irq_cmd_count != 0) {
825                 spin_unlock_irq(&ha->hardware_lock);
826                 udelay(2);
827                 spin_lock_irq(&ha->hardware_lock);
828         }
829         scst_tgt_set_tgt_priv(scst_tgt, NULL);
830         ha->tgt = NULL;
831         spin_unlock_irq(&ha->hardware_lock);
832         mutex_unlock(&ha->tgt_mutex);
833
834         TRACE_MGMT_DBG("Release of tgt %p finished", tgt);
835
836         kfree(tgt);
837
838         TRACE_EXIT_RES(res);
839         return res;
840 }
841
842 /*
843  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
844  */
845 static void q2x_modify_command_count(scsi_qla_host_t *ha, int cmd_count,
846         int imm_count)
847 {
848         modify_lun_entry_t *pkt;
849
850         TRACE_ENTRY();
851
852         TRACE_DBG("Sending MODIFY_LUN (ha=%p, cmd=%d, imm=%d)",
853                   ha, cmd_count, imm_count);
854
855         /* Sending marker isn't necessary, since we called from ISR */
856
857         pkt = (modify_lun_entry_t *)tgt_data.req_pkt(ha);
858         if (pkt == NULL) {
859                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
860                         "request packet", ha->instance, __func__);
861                 goto out;
862         }
863
864         ha->tgt->modify_lun_expected++;
865
866         pkt->entry_type = MODIFY_LUN_TYPE;
867         pkt->entry_count = 1;
868         if (cmd_count < 0) {
869                 pkt->operators = MODIFY_LUN_CMD_SUB;    /* Subtract from command count */
870                 pkt->command_count = -cmd_count;
871         } else if (cmd_count > 0) {
872                 pkt->operators = MODIFY_LUN_CMD_ADD;    /* Add to command count */
873                 pkt->command_count = cmd_count;
874         }
875
876         if (imm_count < 0) {
877                 pkt->operators |= MODIFY_LUN_IMM_SUB;
878                 pkt->immed_notify_count = -imm_count;
879         } else if (imm_count > 0) {
880                 pkt->operators |= MODIFY_LUN_IMM_ADD;
881                 pkt->immed_notify_count = imm_count;
882         }
883
884         pkt->timeout = 0;       /* Use default */
885
886         TRACE_BUFFER("MODIFY LUN packet data", pkt, REQUEST_ENTRY_SIZE);
887
888         q2t_exec_queue(ha);
889
890 out:
891         TRACE_EXIT();
892         return;
893 }
894
895 /*
896  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
897  */
898 static void q2x_send_notify_ack(scsi_qla_host_t *ha, notify_entry_t *iocb,
899         uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
900         uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
901 {
902         nack_entry_t *ntfy;
903
904         TRACE_ENTRY();
905
906         TRACE_DBG("Sending NOTIFY_ACK (ha=%p)", ha);
907
908         /* Send marker if required */
909         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
910                 goto out;
911
912         ntfy = (nack_entry_t *)tgt_data.req_pkt(ha);
913         if (ntfy == NULL) {
914                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
915                         "request packet", ha->instance, __func__);
916                 goto out;
917         }
918
919         if (ha->tgt != NULL)
920                 ha->tgt->notify_ack_expected++;
921
922         ntfy->entry_type = NOTIFY_ACK_TYPE;
923         ntfy->entry_count = 1;
924         SET_TARGET_ID(ha, ntfy->target, GET_TARGET_ID(ha, iocb));
925         ntfy->status = iocb->status;
926         ntfy->task_flags = iocb->task_flags;
927         ntfy->seq_id = iocb->seq_id;
928         /* Do not increment here, the chip isn't decrementing */
929         /* ntfy->flags = __constant_cpu_to_le16(NOTIFY_ACK_RES_COUNT); */
930         ntfy->flags |= cpu_to_le16(add_flags);
931         ntfy->srr_rx_id = iocb->srr_rx_id;
932         ntfy->srr_rel_offs = iocb->srr_rel_offs;
933         ntfy->srr_ui = iocb->srr_ui;
934         ntfy->srr_flags = cpu_to_le16(srr_flags);
935         ntfy->srr_reject_code = cpu_to_le16(srr_reject_code);
936         ntfy->srr_reject_code_expl = srr_explan;
937         ntfy->ox_id = iocb->ox_id;
938
939         if (resp_code_valid) {
940                 ntfy->resp_code = cpu_to_le16(resp_code);
941                 ntfy->flags |= __constant_cpu_to_le16(
942                         NOTIFY_ACK_TM_RESP_CODE_VALID);
943         }
944
945         TRACE(TRACE_SCSI, "Sending Notify Ack Seq %#x -> I %#x St %#x RC %#x",
946               le16_to_cpu(iocb->seq_id), GET_TARGET_ID(ha, iocb),
947               le16_to_cpu(iocb->status), le16_to_cpu(ntfy->resp_code));
948         TRACE_BUFFER("Notify Ack packet data", ntfy, REQUEST_ENTRY_SIZE);
949
950         q2t_exec_queue(ha);
951
952 out:
953         TRACE_EXIT();
954         return;
955 }
956
957 /*
958  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
959  */
960 static void q24_send_abts_resp(scsi_qla_host_t *ha,
961         const abts24_recv_entry_t *abts, uint32_t status, bool ids_reversed)
962 {
963         abts24_resp_entry_t *resp;
964         uint32_t f_ctl;
965         uint8_t *p;
966
967         TRACE_ENTRY();
968
969         TRACE_DBG("Sending task mgmt ABTS response (ha=%p, atio=%p, "
970                 "status=%x", ha, abts, status);
971
972         /* Send marker if required */
973         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
974                 goto out;
975
976         resp = (abts24_resp_entry_t *)tgt_data.req_pkt(ha);
977         if (resp == NULL) {
978                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
979                         "request packet", ha->instance, __func__);
980                 goto out;
981         }
982
983         resp->entry_type = ABTS_RESP_24XX;
984         resp->entry_count = 1;
985         resp->nport_handle = abts->nport_handle;
986         resp->sof_type = abts->sof_type;
987         resp->exchange_address = abts->exchange_address;
988         resp->fcp_hdr_le = abts->fcp_hdr_le;
989         f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
990                         F_CTL_LAST_SEQ | F_CTL_END_SEQ |
991                         F_CTL_SEQ_INITIATIVE);
992         p = (uint8_t *)&f_ctl;
993         resp->fcp_hdr_le.f_ctl[0] = *p++;
994         resp->fcp_hdr_le.f_ctl[1] = *p++;
995         resp->fcp_hdr_le.f_ctl[2] = *p;
996         if (ids_reversed) {
997                 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
998                 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
999                 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1000                 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1001                 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1002                 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1003         } else {
1004                 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1005                 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1006                 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1007                 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1008                 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1009                 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1010         }
1011         resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1012         if (status == SCST_MGMT_STATUS_SUCCESS) {
1013                 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1014                 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1015                 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1016                 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1017                 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1018                 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1019         } else {
1020                 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1021                 resp->payload.ba_rjt.reason_code =
1022                         BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1023                 /* Other bytes are zero */
1024         }
1025
1026         TRACE_BUFFER("ABTS RESP packet data", resp, REQUEST_ENTRY_SIZE);
1027
1028         ha->tgt->abts_resp_expected++;
1029
1030         q2t_exec_queue(ha);
1031
1032 out:
1033         TRACE_EXIT();
1034         return;
1035 }
1036
1037 /*
1038  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1039  */
1040 static void q24_retry_term_exchange(scsi_qla_host_t *ha,
1041         abts24_resp_fw_entry_t *entry)
1042 {
1043         ctio7_status1_entry_t *ctio;
1044
1045         TRACE_ENTRY();
1046
1047         TRACE_DBG("Sending retry TERM EXCH CTIO7 (ha=%p)", ha);
1048
1049         /* Send marker if required */
1050         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1051                 goto out;
1052
1053         ctio = (ctio7_status1_entry_t *)tgt_data.req_pkt(ha);
1054         if (ctio == NULL) {
1055                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1056                         "request packet", ha->instance, __func__);
1057                 goto out;
1058         }
1059
1060         /*
1061          * We've got on entrance firmware's response on by us generated
1062          * ABTS response. So, in it ID fields are reversed.
1063          */
1064
1065         ctio->common.entry_type = CTIO_TYPE7;
1066         ctio->common.entry_count = 1;
1067         ctio->common.nport_handle = entry->nport_handle;
1068         ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1069         ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1070         ctio->common.initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1071         ctio->common.initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1072         ctio->common.initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1073         ctio->common.exchange_addr = entry->exchange_addr_to_abort;
1074         ctio->flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1075         ctio->ox_id = entry->fcp_hdr_le.ox_id;
1076
1077         TRACE_BUFFER("CTIO7 retry TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
1078
1079         q2t_exec_queue(ha);
1080
1081         q24_send_abts_resp(ha, (abts24_recv_entry_t *)entry,
1082                 SCST_MGMT_STATUS_SUCCESS, true);
1083
1084 out:
1085         TRACE_EXIT();
1086         return;
1087 }
1088
1089 /*
1090  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1091  */
1092 static void q24_handle_abts(scsi_qla_host_t *ha, abts24_recv_entry_t *abts)
1093 {
1094         uint32_t tag;
1095         int rc;
1096         struct q2t_mgmt_cmd *mcmd;
1097         struct q2t_sess *sess;
1098
1099         TRACE_ENTRY();
1100
1101         if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1102                 PRINT_ERROR("qla2x00tgt(%ld): ABTS: Abort Sequence not "
1103                         "supported", ha->instance);
1104                 goto out_err;
1105         }
1106
1107         tag = abts->exchange_addr_to_abort;
1108
1109         if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1110                 TRACE_MGMT_DBG("qla2x00tgt(%ld): ABTS: Unknown Exchange "
1111                         "Address received", ha->instance);
1112                 goto out_err;
1113         }
1114
1115         TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): task abort (s_id=%x:%x:%x, "
1116                 "tag=%d, param=%x)", ha->instance, abts->fcp_hdr_le.s_id[0],
1117                 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[2], tag,
1118                 le32_to_cpu(abts->fcp_hdr_le.parameter));
1119
1120         sess = q2t_find_sess_by_s_id_le(ha->tgt, abts->fcp_hdr_le.s_id);
1121         if (sess == NULL) {
1122                 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task abort for unexisting "
1123                         "session", ha->instance);
1124                 ha->tgt->tm_to_unknown = 1;
1125                 goto out_err;
1126         }
1127
1128         mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
1129         if (mcmd == NULL) {
1130                 PRINT_ERROR("%s: Allocation of ABORT cmd failed", __func__);
1131                 goto out_err;
1132         }
1133         memset(mcmd, 0, sizeof(*mcmd));
1134
1135         mcmd->sess = sess;
1136         memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1137
1138         rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
1139                 SCST_ATOMIC, mcmd);
1140         if (rc != 0) {
1141                 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_tag() failed: %d",
1142                             ha->instance, rc);
1143                 goto out_err_free;
1144         }
1145
1146 out:
1147         TRACE_EXIT();
1148         return;
1149
1150 out_err_free:
1151         mempool_free(mcmd, q2t_mgmt_cmd_mempool);
1152
1153 out_err:
1154         q24_send_abts_resp(ha, abts, SCST_MGMT_STATUS_REJECTED, false);
1155         goto out;
1156 }
1157
1158 /*
1159  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1160  */
1161 static void q24_send_task_mgmt_ctio(scsi_qla_host_t *ha,
1162         struct q2t_mgmt_cmd *mcmd, uint32_t resp_code)
1163 {
1164         const atio7_entry_t *atio = &mcmd->orig_iocb.atio7;
1165         ctio7_status1_entry_t *ctio;
1166
1167         TRACE_ENTRY();
1168
1169         TRACE_DBG("Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x",
1170                   ha, atio, resp_code);
1171
1172         /* Send marker if required */
1173         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1174                 goto out;
1175
1176         ctio = (ctio7_status1_entry_t *)tgt_data.req_pkt(ha);
1177         if (ctio == NULL) {
1178                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1179                         "request packet", ha->instance, __func__);
1180                 goto out;
1181         }
1182
1183         ctio->common.entry_type = CTIO_TYPE7;
1184         ctio->common.entry_count = 1;
1185         ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1186         ctio->common.nport_handle = mcmd->sess->loop_id;
1187         ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1188         ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
1189         ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
1190         ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
1191         ctio->common.exchange_addr = atio->exchange_addr;
1192         ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
1193                 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
1194         ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
1195         ctio->scsi_status = cpu_to_le16(resp_code);
1196
1197         TRACE_BUFFER("CTIO7 TASK MGMT packet data", ctio, REQUEST_ENTRY_SIZE);
1198
1199         q2t_exec_queue(ha);
1200
1201 out:
1202         TRACE_EXIT();
1203         return;
1204 }
1205
1206 /*
1207  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1208  */
1209 static void q24_send_notify_ack(scsi_qla_host_t *ha,
1210         notify24xx_entry_t *iocb, uint16_t srr_flags,
1211         uint8_t srr_reject_code, uint8_t srr_explan)
1212 {
1213         nack24xx_entry_t *nack;
1214
1215         TRACE_ENTRY();
1216
1217         TRACE_DBG("Sending NOTIFY_ACK24 (ha=%p)", ha);
1218
1219         /* Send marker if required */
1220         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1221                 goto out;
1222
1223         if (ha->tgt != NULL)
1224                 ha->tgt->notify_ack_expected++;
1225
1226         nack = (nack24xx_entry_t *)tgt_data.req_pkt(ha);
1227         if (nack == NULL) {
1228                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1229                         "request packet", ha->instance, __func__);
1230                 goto out;
1231         }
1232
1233         nack->entry_type = NOTIFY_ACK_TYPE;
1234         nack->entry_count = 1;
1235         nack->nport_handle = iocb->nport_handle;
1236         if (le16_to_cpu(iocb->status) == IMM_NTFY_ELS) {
1237                 nack->flags = iocb->flags &
1238                         __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1239         }
1240         nack->srr_rx_id = iocb->srr_rx_id;
1241         nack->status = iocb->status;
1242         nack->status_subcode = iocb->status_subcode;
1243         nack->exchange_address = iocb->exchange_address;
1244         nack->srr_rel_offs = iocb->srr_rel_offs;
1245         nack->srr_ui = iocb->srr_ui;
1246         nack->srr_flags = cpu_to_le16(srr_flags);
1247         nack->srr_reject_code = srr_reject_code;
1248         nack->srr_reject_code_expl = srr_explan;
1249         nack->ox_id = iocb->ox_id;
1250
1251         TRACE(TRACE_SCSI, "Sending 24xx Notify Ack %d", nack->status);
1252         TRACE_BUFFER("24xx Notify Ack packet data", nack, sizeof(*nack));
1253
1254         q2t_exec_queue(ha);
1255
1256 out:
1257         TRACE_EXIT();
1258         return;
1259 }
1260
1261 int q2t_convert_to_fc_tm_status(int scst_mstatus)
1262 {
1263         int res;
1264
1265         switch (scst_mstatus) {
1266         case SCST_MGMT_STATUS_SUCCESS:
1267                 res = FC_TM_SUCCESS;
1268                 break;
1269         case SCST_MGMT_STATUS_TASK_NOT_EXIST:
1270                 res = FC_TM_BAD_CMD;
1271                 break;
1272         case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
1273         case SCST_MGMT_STATUS_REJECTED:
1274                 res = FC_TM_REJECT;
1275                 break;
1276         case SCST_MGMT_STATUS_LUN_NOT_EXIST:
1277         case SCST_MGMT_STATUS_FAILED:
1278         default:
1279                 res = FC_TM_FAILED;
1280                 break;
1281         }
1282
1283         TRACE_EXIT_RES(res);
1284         return res;
1285 }
1286
1287 /* SCST Callback */
1288 static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
1289 {
1290         struct q2t_mgmt_cmd *mcmd;
1291         unsigned long flags;
1292         scsi_qla_host_t *ha;
1293
1294         TRACE_ENTRY();
1295
1296         TRACE_MGMT_DBG("scst_mcmd (%p) status %#x state %#x", scst_mcmd,
1297                 scst_mcmd->status, scst_mcmd->state);
1298
1299         mcmd = scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
1300         if (unlikely(mcmd == NULL)) {
1301                 PRINT_ERROR("scst_mcmd %p tgt_spec is NULL", mcmd);
1302                 goto out;
1303         }
1304
1305         ha = mcmd->sess->tgt->ha;
1306
1307         spin_lock_irqsave(&ha->hardware_lock, flags);
1308         if (IS_FWI2_CAPABLE(ha)) {
1309                 if (mcmd->flags == Q24_MGMT_SEND_NACK) {
1310                         q24_send_notify_ack(ha,
1311                                 &mcmd->orig_iocb.notify_entry24, 0, 0, 0);
1312                 } else {
1313                         if (scst_mcmd->fn == SCST_ABORT_TASK)
1314                                 q24_send_abts_resp(ha, &mcmd->orig_iocb.abts,
1315                                         scst_mgmt_cmd_get_status(scst_mcmd),
1316                                         false);
1317                         else
1318                                 q24_send_task_mgmt_ctio(ha, mcmd,
1319                                         q2t_convert_to_fc_tm_status(
1320                                                 scst_mgmt_cmd_get_status(scst_mcmd)));
1321                 }
1322         } else {
1323                 int resp_code = q2t_convert_to_fc_tm_status(
1324                                         scst_mgmt_cmd_get_status(scst_mcmd));
1325                 q2x_send_notify_ack(ha, &mcmd->orig_iocb.notify_entry, 0,
1326                         resp_code, 1, 0, 0, 0);
1327         }
1328         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1329
1330         scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
1331         mempool_free(mcmd, q2t_mgmt_cmd_mempool);
1332
1333 out:
1334         TRACE_EXIT();
1335         return;
1336 }
1337
1338 /* No locks */
1339 static int q2t_pci_map_calc_cnt(struct q2t_prm *prm)
1340 {
1341         int res = 0;
1342
1343         sBUG_ON(prm->cmd->sg_cnt == 0);
1344
1345         prm->sg = (struct scatterlist *)prm->cmd->sg;
1346         prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, prm->cmd->sg,
1347                 prm->cmd->sg_cnt, prm->cmd->dma_data_direction);
1348         if (unlikely(prm->seg_cnt == 0))
1349                 goto out_err;
1350         /*
1351          * If greater than four sg entries then we need to allocate
1352          * the continuation entries
1353          */
1354         if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) {
1355                 prm->req_cnt += (uint16_t)(prm->seg_cnt -
1356                                 prm->tgt->datasegs_per_cmd) /
1357                                 prm->tgt->datasegs_per_cont;
1358                 if (((uint16_t)(prm->seg_cnt - prm->tgt->datasegs_per_cmd)) %
1359                     prm->tgt->datasegs_per_cont)
1360                         prm->req_cnt++;
1361         }
1362
1363 out:
1364         TRACE_DBG("seg_cnt=%d, req_cnt=%d, res=%d", prm->seg_cnt,
1365                 prm->req_cnt, res);
1366         return res;
1367
1368 out_err:
1369         PRINT_ERROR("qla2x00tgt(%ld): PCI mapping failed: sg_cnt=%d",
1370                 prm->tgt->ha->instance, prm->cmd->sg_cnt);
1371         res = -1;
1372         goto out;
1373 }
1374
1375 static int q2t_check_reserve_free_req(scsi_qla_host_t *ha, uint32_t req_cnt)
1376 {
1377         int res = SCST_TGT_RES_SUCCESS;
1378         device_reg_t __iomem *reg = ha->iobase;
1379         uint32_t cnt;
1380
1381         TRACE_ENTRY();
1382
1383         if (ha->req_q_cnt < (req_cnt + 2)) {
1384                 if (IS_FWI2_CAPABLE(ha))
1385                         cnt = (uint16_t)RD_REG_DWORD(
1386                                     &reg->isp24.req_q_out);
1387                 else
1388                         cnt = qla2x00_debounce_register(
1389                                     ISP_REQ_Q_OUT(ha, &reg->isp));
1390                 TRACE_DBG("Request ring circled: cnt=%d, "
1391                         "ha->req_ring_index=%d, ha->req_q_cnt=%d, req_cnt=%d",
1392                         cnt, ha->req_ring_index, ha->req_q_cnt, req_cnt);
1393                 if  (ha->req_ring_index < cnt)
1394                         ha->req_q_cnt = cnt - ha->req_ring_index;
1395                 else
1396                         ha->req_q_cnt = ha->request_q_length -
1397                             (ha->req_ring_index - cnt);
1398         }
1399
1400         if (unlikely(ha->req_q_cnt < (req_cnt + 2))) {
1401                 TRACE(TRACE_OUT_OF_MEM, "There is no room in the request ring: "
1402                         "ha->req_ring_index=%d, ha->req_q_cnt=%d, req_cnt=%d",
1403                         ha->req_ring_index, ha->req_q_cnt, req_cnt);
1404                 res = SCST_TGT_RES_QUEUE_FULL;
1405                 goto out;
1406         }
1407
1408         ha->req_q_cnt -= req_cnt;
1409
1410 out:
1411         TRACE_EXIT_RES(res);
1412         return res;
1413 }
1414
1415 /*
1416  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1417  */
1418 static inline void *q2t_get_req_pkt(scsi_qla_host_t *ha)
1419 {
1420         /* Adjust ring index. */
1421         ha->req_ring_index++;
1422         if (ha->req_ring_index == ha->request_q_length) {
1423                 ha->req_ring_index = 0;
1424                 ha->request_ring_ptr = ha->request_ring;
1425         } else {
1426                 ha->request_ring_ptr++;
1427         }
1428         return (cont_entry_t *)ha->request_ring_ptr;
1429 }
1430
1431 /* ha->hardware_lock supposed to be held on entry */
1432 static inline uint32_t q2t_make_handle(scsi_qla_host_t *ha)
1433 {
1434         uint32_t h;
1435
1436         h = ha->current_handle;
1437         /* always increment cmd handle */
1438         do {
1439                 ++h;
1440                 if (h > MAX_OUTSTANDING_COMMANDS)
1441                         h = 1; /* 0 is Q2T_NULL_HANDLE */
1442                 if (h == ha->current_handle) {
1443                         TRACE(TRACE_OUT_OF_MEM,
1444                               "Ran out of empty cmd slots in ha %p", ha);
1445                         h = Q2T_NULL_HANDLE;
1446                         break;
1447                 }
1448         } while ((h == Q2T_NULL_HANDLE) ||
1449                  (h == Q2T_SKIP_HANDLE) ||
1450                  (ha->cmds[h-1] != NULL));
1451
1452         if (h != Q2T_NULL_HANDLE)
1453                 ha->current_handle = h;
1454
1455         return h;
1456 }
1457
1458 /* ha->hardware_lock supposed to be held on entry */
1459 static void q2x_build_ctio_pkt(struct q2t_prm *prm)
1460 {
1461         uint32_t h;
1462         ctio_entry_t *pkt;
1463         scsi_qla_host_t *ha = prm->tgt->ha;
1464
1465         pkt = (ctio_entry_t *)ha->request_ring_ptr;
1466         prm->pkt = pkt;
1467         memset(pkt, 0, sizeof(*pkt));
1468
1469         if (prm->tgt->tgt_enable_64bit_addr)
1470                 pkt->common.entry_type = CTIO_A64_TYPE;
1471         else
1472                 pkt->common.entry_type = CONTINUE_TGT_IO_TYPE;
1473
1474         pkt->common.entry_count = (uint8_t)prm->req_cnt;
1475
1476         h = q2t_make_handle(ha);
1477         if (h != Q2T_NULL_HANDLE)
1478                 ha->cmds[h-1] = prm->cmd;
1479
1480         pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
1481         pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1482
1483         /* Set initiator ID */
1484         h = GET_TARGET_ID(ha, &prm->cmd->atio.atio2x);
1485         SET_TARGET_ID(ha, pkt->common.target, h);
1486
1487         pkt->common.rx_id = prm->cmd->atio.atio2x.rx_id;
1488         pkt->common.relative_offset = cpu_to_le32(prm->cmd->offset);
1489
1490         TRACE(TRACE_DEBUG|TRACE_SCSI,
1491               "handle(scst_cmd) -> %08x, timeout %d L %#x -> I %#x E %#x",
1492               pkt->common.handle, Q2T_TIMEOUT,
1493               le16_to_cpu(prm->cmd->atio.atio2x.lun),
1494               GET_TARGET_ID(ha, &pkt->common), pkt->common.rx_id);
1495 }
1496
1497 /* ha->hardware_lock supposed to be held on entry */
1498 static int q24_build_ctio_pkt(struct q2t_prm *prm)
1499 {
1500         uint32_t h;
1501         ctio7_status0_entry_t *pkt;
1502         scsi_qla_host_t *ha = prm->tgt->ha;
1503         atio7_entry_t *atio = &prm->cmd->atio.atio7;
1504         int res = SCST_TGT_RES_SUCCESS;
1505
1506         TRACE_ENTRY();
1507
1508         pkt = (ctio7_status0_entry_t *)ha->request_ring_ptr;
1509         prm->pkt = pkt;
1510         memset(pkt, 0, sizeof(*pkt));
1511
1512         pkt->common.entry_type = CTIO_TYPE7;
1513         pkt->common.entry_count = (uint8_t)prm->req_cnt;
1514
1515         h = q2t_make_handle(ha);
1516         if (unlikely(h == Q2T_NULL_HANDLE)) {
1517                 /*
1518                  * CTIO type 7 from the firmware doesn't provide a way to
1519                  * know the initiator's LOOP ID, hence we can't find
1520                  * the session and, so, the command.
1521                  */
1522                 res = SCST_TGT_RES_QUEUE_FULL;
1523                 goto out;
1524         } else
1525                 ha->cmds[h-1] = prm->cmd;
1526
1527         pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
1528         pkt->common.nport_handle = prm->cmd->loop_id;
1529         pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1530         pkt->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
1531         pkt->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
1532         pkt->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
1533         pkt->common.exchange_addr = atio->exchange_addr;
1534         pkt->flags |= (atio->attr << 9);
1535         pkt->ox_id = swab16(atio->fcp_hdr.ox_id);
1536         pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
1537
1538 out:
1539         TRACE(TRACE_DEBUG|TRACE_SCSI, "handle(scst_cmd) -> %08x, timeout %d "
1540                 "ox_id %#x", pkt->common.handle, Q2T_TIMEOUT,
1541                 le16_to_cpu(pkt->ox_id));
1542         TRACE_EXIT_RES(res);
1543         return res;
1544 }
1545
1546 /*
1547  * ha->hardware_lock supposed to be held on entry. We have already made sure
1548  * that there is sufficient amount of request entries to not drop it.
1549  */
1550 static void q2t_load_cont_data_segments(struct q2t_prm *prm)
1551 {
1552         int cnt;
1553         uint32_t *dword_ptr;
1554         int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1555
1556         TRACE_ENTRY();
1557
1558         /* Build continuation packets */
1559         while (prm->seg_cnt > 0) {
1560                 cont_a64_entry_t *cont_pkt64 =
1561                         (cont_a64_entry_t *)q2t_get_req_pkt(prm->tgt->ha);
1562
1563                 /*
1564                  * Make sure that from cont_pkt64 none of
1565                  * 64-bit specific fields used for 32-bit
1566                  * addressing. Cast to (cont_entry_t *) for
1567                  * that.
1568                  */
1569
1570                 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1571
1572                 cont_pkt64->entry_count = 1;
1573                 cont_pkt64->sys_define = 0;
1574
1575                 if (enable_64bit_addressing) {
1576                         cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1577                         dword_ptr =
1578                             (uint32_t *)&cont_pkt64->dseg_0_address;
1579                 } else {
1580                         cont_pkt64->entry_type = CONTINUE_TYPE;
1581                         dword_ptr =
1582                             (uint32_t *)&((cont_entry_t *)
1583                                             cont_pkt64)->dseg_0_address;
1584                 }
1585
1586                 /* Load continuation entry data segments */
1587                 for (cnt = 0;
1588                      cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1589                      cnt++, prm->seg_cnt--) {
1590                         *dword_ptr++ =
1591                             cpu_to_le32(pci_dma_lo32
1592                                         (sg_dma_address(prm->sg)));
1593                         if (enable_64bit_addressing) {
1594                                 *dword_ptr++ =
1595                                     cpu_to_le32(pci_dma_hi32
1596                                                 (sg_dma_address
1597                                                  (prm->sg)));
1598                         }
1599                         *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1600
1601                         TRACE_SG("S/G Segment Cont. phys_addr=%llx:%llx, len=%d",
1602                               (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
1603                               (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
1604                               (int)sg_dma_len(prm->sg));
1605
1606                         prm->sg++;
1607                 }
1608
1609                 TRACE_BUFFER("Continuation packet data",
1610                              cont_pkt64, REQUEST_ENTRY_SIZE);
1611         }
1612
1613         TRACE_EXIT();
1614         return;
1615 }
1616
1617 /*
1618  * ha->hardware_lock supposed to be held on entry. We have already made sure
1619  * that there is sufficient amount of request entries to not drop it.
1620  */
1621 static void q2x_load_data_segments(struct q2t_prm *prm)
1622 {
1623         int cnt;
1624         uint32_t *dword_ptr;
1625         int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1626         ctio_common_entry_t *pkt = (ctio_common_entry_t *)prm->pkt;
1627
1628         TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
1629               le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
1630
1631         pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
1632
1633         /* Setup packet address segment pointer */
1634         dword_ptr = pkt->dseg_0_address;
1635
1636         if (prm->seg_cnt == 0) {
1637                 /* No data transfer */
1638                 *dword_ptr++ = 0;
1639                 *dword_ptr = 0;
1640
1641                 TRACE_BUFFER("No data, CTIO packet data", pkt,
1642                         REQUEST_ENTRY_SIZE);
1643                 goto out;
1644         }
1645
1646         /* Set total data segment count */
1647         pkt->dseg_count = cpu_to_le16(prm->seg_cnt);
1648
1649         /* If scatter gather */
1650         TRACE_SG("%s", "Building S/G data segments...");
1651         /* Load command entry data segments */
1652         for (cnt = 0;
1653              (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1654              cnt++, prm->seg_cnt--) {
1655                 *dword_ptr++ =
1656                     cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1657                 if (enable_64bit_addressing) {
1658                         *dword_ptr++ =
1659                             cpu_to_le32(pci_dma_hi32
1660                                         (sg_dma_address(prm->sg)));
1661                 }
1662                 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1663
1664                 TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
1665                       (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
1666                       (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
1667                       (int)sg_dma_len(prm->sg));
1668
1669                 prm->sg++;
1670         }
1671
1672         TRACE_BUFFER("Scatter/gather, CTIO packet data", pkt,
1673                 REQUEST_ENTRY_SIZE);
1674
1675         q2t_load_cont_data_segments(prm);
1676
1677 out:
1678         return;
1679 }
1680
1681 /*
1682  * ha->hardware_lock supposed to be held on entry. We have already made sure
1683  * that there is sufficient amount of request entries to not drop it.
1684  */
1685 static void q24_load_data_segments(struct q2t_prm *prm)
1686 {
1687         int cnt;
1688         uint32_t *dword_ptr;
1689         int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1690         ctio7_status0_entry_t *pkt = (ctio7_status0_entry_t *)prm->pkt;
1691
1692         TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
1693               le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
1694
1695         pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
1696
1697         /* Setup packet address segment pointer */
1698         dword_ptr = pkt->dseg_0_address;
1699
1700         if (prm->seg_cnt == 0) {
1701                 /* No data transfer */
1702                 *dword_ptr++ = 0;
1703                 *dword_ptr = 0;
1704
1705                 TRACE_BUFFER("No data, CTIO7 packet data", pkt,
1706                         REQUEST_ENTRY_SIZE);
1707                 goto out;
1708         }
1709
1710         /* Set total data segment count */
1711         pkt->common.dseg_count = cpu_to_le16(prm->seg_cnt);
1712
1713         /* If scatter gather */
1714         TRACE_SG("%s", "Building S/G data segments...");
1715         /* Load command entry data segments */
1716         for (cnt = 0;
1717              (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1718              cnt++, prm->seg_cnt--) {
1719                 *dword_ptr++ =
1720                     cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1721                 if (enable_64bit_addressing) {
1722                         *dword_ptr++ =
1723                             cpu_to_le32(pci_dma_hi32(
1724                                         sg_dma_address(prm->sg)));
1725                 }
1726                 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1727
1728                 TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
1729                       (long long unsigned int)pci_dma_hi32(sg_dma_address(
1730                                                                 prm->sg)),
1731                       (long long unsigned int)pci_dma_lo32(sg_dma_address(
1732                                                                 prm->sg)),
1733                       (int)sg_dma_len(prm->sg));
1734
1735                 prm->sg++;
1736         }
1737
1738         q2t_load_cont_data_segments(prm);
1739
1740 out:
1741         return;
1742 }
1743
1744 static inline int q2t_has_data(struct q2t_cmd *cmd)
1745 {
1746         return cmd->bufflen > 0;
1747 }
1748
1749 static int q2t_pre_xmit_response(struct q2t_cmd *cmd,
1750         struct q2t_prm *prm, int xmit_type, unsigned long *flags)
1751 {
1752         int res;
1753         struct q2t_tgt *tgt = cmd->tgt;
1754         scsi_qla_host_t *ha;
1755         uint16_t full_req_cnt;
1756         struct scst_cmd *scst_cmd = cmd->scst_cmd;
1757
1758         TRACE_ENTRY();
1759
1760         if (unlikely(cmd->aborted)) {
1761                 scsi_qla_host_t *ha = tgt->ha;
1762
1763                 TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): terminating exchange "
1764                         "for aborted cmd=%p (scst_cmd=%p, tag=%d)",
1765                         ha->instance, cmd, scst_cmd, cmd->tag);
1766
1767                 cmd->state = Q2T_STATE_ABORTED;
1768                 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
1769
1770                 if (IS_FWI2_CAPABLE(ha))
1771                         q24_send_term_exchange(ha, cmd, &cmd->atio.atio7, 0);
1772                 else
1773                         q2x_send_term_exchange(ha, cmd, &cmd->atio.atio2x, 0);
1774                 /* !! At this point cmd could be already freed !! */
1775                 res = Q2T_PRE_XMIT_RESP_CMD_ABORTED;
1776                 goto out;
1777         }
1778
1779         TRACE(TRACE_SCSI, "tag=%lld", scst_cmd_get_tag(scst_cmd));
1780
1781         prm->cmd = cmd;
1782         prm->tgt = tgt;
1783         prm->rq_result = scst_cmd_get_status(scst_cmd);
1784         prm->sense_buffer = scst_cmd_get_sense_buffer(scst_cmd);
1785         prm->sense_buffer_len = scst_cmd_get_sense_buffer_len(scst_cmd);
1786         prm->sg = NULL;
1787         prm->seg_cnt = -1;
1788         prm->req_cnt = 1;
1789         prm->add_status_pkt = 0;
1790         ha = tgt->ha;
1791
1792         TRACE_DBG("rq_result=%x, xmit_type=%x", prm->rq_result, xmit_type);
1793         if (prm->rq_result != 0)
1794                 TRACE_BUFFER("Sense", prm->sense_buffer, prm->sense_buffer_len);
1795
1796         /* Send marker if required */
1797         if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
1798                 res = SCST_TGT_RES_FATAL_ERROR;
1799                 goto out;
1800         }
1801
1802         TRACE_DBG("CTIO start: ha(%d)", (int)ha->instance);
1803
1804         if ((xmit_type & Q2T_XMIT_DATA) && q2t_has_data(cmd)) {
1805                 if  (q2t_pci_map_calc_cnt(prm) != 0) {
1806                         res = SCST_TGT_RES_QUEUE_FULL;
1807                         goto out;
1808                 }
1809         }
1810
1811         full_req_cnt = prm->req_cnt;
1812
1813         if (xmit_type & Q2T_XMIT_STATUS) {
1814                 if (cmd->data_direction != SCST_DATA_WRITE) {
1815                         int expected;
1816                         if (IS_FWI2_CAPABLE(ha))
1817                                 expected = be32_to_cpu(cmd->
1818                                              atio.atio7.fcp_cmnd.data_length);
1819                         else
1820                                 expected = le32_to_cpu(cmd->
1821                                                 atio.atio2x.data_length);
1822                         prm->residual = expected -
1823                                 scst_cmd_get_resp_data_len(scst_cmd);
1824                         if (prm->residual > 0) {
1825                                 TRACE_DBG("Residual underflow: %d (tag %lld, "
1826                                         "op %x, expected %d, resp_data_len "
1827                                         "%d, bufflen %d, rq_result %x)",
1828                                         prm->residual, scst_cmd->tag,
1829                                         scst_cmd->cdb[0], expected,
1830                                         scst_cmd_get_resp_data_len(scst_cmd),
1831                                         cmd->bufflen, prm->rq_result);
1832                                 prm->rq_result |= SS_RESIDUAL_UNDER;
1833                         } else if (prm->residual < 0) {
1834                                 TRACE_DBG("Residual overflow: %d (tag %lld, "
1835                                         "op %x, expected %d, resp_data_len "
1836                                         "%d, bufflen %d, rq_result %x)",
1837                                         prm->residual, scst_cmd->tag,
1838                                         scst_cmd->cdb[0], expected,
1839                                         scst_cmd_get_resp_data_len(scst_cmd),
1840                                         cmd->bufflen, prm->rq_result);
1841                                 prm->rq_result |= SS_RESIDUAL_OVER;
1842                                 prm->residual = -prm->residual;
1843                         }
1844                 }
1845
1846                 /*
1847                  * If Q2T_XMIT_DATA is not set, add_status_pkt will be ignored
1848                  * in *xmit_response() below
1849                  */
1850                 if (q2t_has_data(cmd)) {
1851                         if (SCST_SENSE_VALID(prm->sense_buffer) ||
1852                             (IS_FWI2_CAPABLE(ha) &&
1853                              (prm->rq_result != 0))) {
1854                                 prm->add_status_pkt = 1;
1855                                 full_req_cnt++;
1856                         }
1857                 }
1858         }
1859
1860         TRACE_DBG("req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d",
1861                 prm->req_cnt, full_req_cnt, prm->add_status_pkt);
1862
1863         /* Acquire ring specific lock */
1864         spin_lock_irqsave(&ha->hardware_lock, *flags);
1865
1866         /* Does F/W have an IOCBs for this request */
1867         res = q2t_check_reserve_free_req(ha, full_req_cnt);
1868         if (unlikely(res != SCST_TGT_RES_SUCCESS) &&
1869             (xmit_type & Q2T_XMIT_DATA))
1870                 goto out_unlock_free_unmap;
1871
1872 out:
1873         TRACE_EXIT_RES(res);
1874         return res;
1875
1876 out_unlock_free_unmap:
1877         if (q2t_has_data(cmd))
1878                 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
1879                      cmd->dma_data_direction);
1880
1881         /* Release ring specific lock */
1882         spin_unlock_irqrestore(&ha->hardware_lock, *flags);
1883         goto out;
1884 }
1885
1886 static inline int q2t_need_explicit_conf(scsi_qla_host_t *ha,
1887         struct q2t_cmd *cmd, int sending_sense)
1888 {
1889         if (ha->enable_class_2)
1890                 return 0;
1891
1892         if (sending_sense)
1893                 return cmd->conf_compl_supported;
1894         else
1895                 return ha->enable_explicit_conf && cmd->conf_compl_supported;
1896 }
1897
1898 static void q2x_init_ctio_ret_entry(ctio_ret_entry_t *ctio_m1,
1899         struct q2t_prm *prm)
1900 {
1901         TRACE_ENTRY();
1902
1903         prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len,
1904                                     (uint32_t)sizeof(ctio_m1->sense_data));
1905
1906         ctio_m1->flags = __constant_cpu_to_le16(OF_SSTS | OF_FAST_POST |
1907                                      OF_NO_DATA | OF_SS_MODE_1);
1908         ctio_m1->flags |= __constant_cpu_to_le16(OF_INC_RC);
1909         if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1910                 ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
1911                                         OF_CONF_REQ);
1912         }
1913         ctio_m1->scsi_status = cpu_to_le16(prm->rq_result);
1914         ctio_m1->residual = cpu_to_le32(prm->residual);
1915         if (SCST_SENSE_VALID(prm->sense_buffer)) {
1916                 if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1917                         ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
1918                                                 OF_CONF_REQ);
1919                 }
1920                 ctio_m1->scsi_status |= __constant_cpu_to_le16(
1921                                                 SS_SENSE_LEN_VALID);
1922                 ctio_m1->sense_length = cpu_to_le16(prm->sense_buffer_len);
1923                 memcpy(ctio_m1->sense_data, prm->sense_buffer,
1924                        prm->sense_buffer_len);
1925         } else {
1926                 memset(ctio_m1->sense_data, 0, sizeof(ctio_m1->sense_data));
1927                 ctio_m1->sense_length = 0;
1928         }
1929
1930         /* Sense with len > 26, is it possible ??? */
1931
1932         TRACE_EXIT();
1933         return;
1934 }
1935
1936 static int __q2x_xmit_response(struct q2t_cmd *cmd, int xmit_type)
1937 {
1938         int res;
1939         unsigned long flags;
1940         scsi_qla_host_t *ha;
1941         struct q2t_prm prm;
1942         ctio_common_entry_t *pkt;
1943
1944         TRACE_ENTRY();
1945
1946         memset(&prm, 0, sizeof(prm));
1947
1948         res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
1949         if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
1950                 if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
1951                         res = SCST_TGT_RES_SUCCESS;
1952                 goto out;
1953         }
1954
1955         /* Here ha->hardware_lock already locked */
1956
1957         ha = prm.tgt->ha;
1958
1959         q2x_build_ctio_pkt(&prm);
1960         pkt = (ctio_common_entry_t *)prm.pkt;
1961
1962         if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
1963                 pkt->flags |= __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_IN);
1964                 pkt->flags |= __constant_cpu_to_le16(OF_INC_RC);
1965
1966                 q2x_load_data_segments(&prm);
1967
1968                 if (prm.add_status_pkt == 0) {
1969                         if (xmit_type & Q2T_XMIT_STATUS) {
1970                                 pkt->scsi_status = cpu_to_le16(prm.rq_result);
1971                                 pkt->residual = cpu_to_le32(prm.residual);
1972                                 pkt->flags |= __constant_cpu_to_le16(OF_SSTS);
1973                                 if (q2t_need_explicit_conf(ha, cmd, 0)) {
1974                                         pkt->flags |= __constant_cpu_to_le16(
1975                                                         OF_EXPL_CONF |
1976                                                         OF_CONF_REQ);
1977                                 }
1978                         }
1979                 } else {
1980                         /*
1981                          * We have already made sure that there is sufficient
1982                          * amount of request entries to not drop HW lock in
1983                          * req_pkt().
1984                          */
1985                         ctio_ret_entry_t *ctio_m1 =
1986                                 (ctio_ret_entry_t *)q2t_get_req_pkt(ha);
1987
1988                         TRACE_DBG("%s", "Building additional status packet");
1989
1990                         memcpy(ctio_m1, pkt, sizeof(*ctio_m1));
1991                         ctio_m1->entry_count = 1;
1992                         ctio_m1->dseg_count = 0;
1993
1994                         /* Real finish is ctio_m1's finish */
1995                         pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
1996                         pkt->flags &= ~__constant_cpu_to_le16(OF_INC_RC);
1997
1998                         q2x_init_ctio_ret_entry(ctio_m1, &prm);
1999                         TRACE_BUFFER("Status CTIO packet data", ctio_m1,
2000                                 REQUEST_ENTRY_SIZE);
2001                 }
2002         } else
2003                 q2x_init_ctio_ret_entry((ctio_ret_entry_t *)pkt, &prm);
2004
2005         cmd->state = Q2T_STATE_PROCESSED;       /* Mid-level is done processing */
2006
2007         TRACE_BUFFER("Xmitting", pkt, REQUEST_ENTRY_SIZE);
2008
2009         q2t_exec_queue(ha);
2010
2011         /* Release ring specific lock */
2012         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2013
2014 out:
2015         TRACE_EXIT_RES(res);
2016         return res;
2017 }
2018
2019 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
2020 static void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type)
2021 {
2022 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
2023         if ((*xmit_type & Q2T_XMIT_STATUS) && (scst_random() % 200) == 50) {
2024                 *xmit_type &= ~Q2T_XMIT_STATUS;
2025                 TRACE_MGMT_DBG("Dropping cmd %p (tag %d) status", cmd,
2026                         cmd->tag);
2027         }
2028 #endif
2029
2030         if (q2t_has_data(cmd) && (cmd->sg_cnt > 1) &&
2031             ((scst_random() % 100) == 20)) {
2032                 int i, leave = 0;
2033                 unsigned int tot_len = 0;
2034
2035                 while (leave == 0)
2036                         leave = scst_random() % cmd->sg_cnt;
2037
2038                 for (i = 0; i < leave; i++)
2039                         tot_len += cmd->sg[i].length;
2040
2041                 TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer tail to len %d, "
2042                         "sg_cnt %d (cmd->bufflen %d, cmd->sg_cnt %d)", cmd,
2043                         cmd->tag, tot_len, leave, cmd->bufflen, cmd->sg_cnt);
2044
2045                 cmd->bufflen = tot_len;
2046                 cmd->sg_cnt = leave;
2047         }
2048
2049         if (q2t_has_data(cmd) && ((scst_random() % 100) == 70)) {
2050                 unsigned int offset = scst_random() % cmd->bufflen;
2051
2052                 TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer head "
2053                         "to offset %d (cmd->bufflen %d)", cmd, cmd->tag,
2054                         offset, cmd->bufflen);
2055                 if (offset == 0)
2056                         *xmit_type &= ~Q2T_XMIT_DATA;
2057                 else if (q2t_cut_cmd_data_head(cmd, offset)) {
2058                         TRACE_MGMT_DBG("q2t_cut_cmd_data_head() failed (tag %d)",
2059                                 cmd->tag);
2060                 }
2061         }
2062 }
2063 #else
2064 static inline void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type) {}
2065 #endif
2066
2067 static int q2x_xmit_response(struct scst_cmd *scst_cmd)
2068 {
2069         int xmit_type = Q2T_XMIT_DATA;
2070         int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2071         struct q2t_cmd *cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2072
2073 #ifdef CONFIG_SCST_EXTRACHECKS
2074         sBUG_ON(!q2t_has_data(cmd) && !is_send_status);
2075 #endif
2076
2077 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2078         if (scst_cmd_atomic(scst_cmd))
2079                 return SCST_TGT_RES_NEED_THREAD_CTX;
2080 #endif
2081
2082         if (is_send_status)
2083                 xmit_type |= Q2T_XMIT_STATUS;
2084
2085         cmd->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2086         cmd->sg = scst_cmd_get_sg(scst_cmd);
2087         cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2088         cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
2089         cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
2090         cmd->offset = scst_cmd_get_ppl_offset(scst_cmd);
2091         cmd->aborted = scst_cmd_aborted(scst_cmd);
2092
2093         q2t_check_srr_debug(cmd, &xmit_type);
2094
2095         TRACE_DBG("is_send_status=%x, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2096                 "cmd->data_direction=%d", is_send_status, cmd->bufflen,
2097                 cmd->sg_cnt, cmd->data_direction);
2098
2099         return __q2x_xmit_response(cmd, xmit_type);
2100 }
2101
2102 static void q24_init_ctio_ret_entry(ctio7_status0_entry_t *ctio,
2103         struct q2t_prm *prm)
2104 {
2105         ctio7_status1_entry_t *ctio1;
2106
2107         TRACE_ENTRY();
2108
2109         prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len,
2110                                     (uint32_t)sizeof(ctio1->sense_data));
2111         ctio->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2112         if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2113                 ctio->flags |= __constant_cpu_to_le16(
2114                                 CTIO7_FLAGS_EXPLICIT_CONFORM |
2115                                 CTIO7_FLAGS_CONFORM_REQ);
2116         }
2117         ctio->residual = cpu_to_le32(prm->residual);
2118         ctio->scsi_status = cpu_to_le16(prm->rq_result);
2119         if (SCST_SENSE_VALID(prm->sense_buffer)) {
2120                 int i;
2121                 ctio1 = (ctio7_status1_entry_t *)ctio;
2122                 if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2123                         ctio1->flags |= __constant_cpu_to_le16(
2124                                 CTIO7_FLAGS_EXPLICIT_CONFORM |
2125                                 CTIO7_FLAGS_CONFORM_REQ);
2126                 }
2127                 ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2128                 ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2129                 ctio1->scsi_status |= __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
2130                 ctio1->sense_length = cpu_to_le16(prm->sense_buffer_len);
2131                 for (i = 0; i < prm->sense_buffer_len/4; i++)
2132                         ((uint32_t *)ctio1->sense_data)[i] =
2133                                 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2134 #if 0
2135                 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
2136                         static int q;
2137                         if (q < 10) {
2138                                 PRINT_INFO("qla2x00tgt(%ld): %d bytes of sense "
2139                                         "lost", prm->tgt->ha->instance,
2140                                         prm->sense_buffer_len % 4);
2141                                 q++;
2142                         }
2143                 }
2144 #endif
2145         } else {
2146                 ctio1 = (ctio7_status1_entry_t *)ctio;
2147                 ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2148                 ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2149                 ctio1->sense_length = 0;
2150                 memset(ctio1->sense_data, 0, sizeof(ctio1->sense_data));
2151         }
2152
2153         /* Sense with len > 24, is it possible ??? */
2154
2155         TRACE_EXIT();
2156         return;
2157 }
2158
2159 static int __q24_xmit_response(struct q2t_cmd *cmd, int xmit_type)
2160 {
2161         int res;
2162         unsigned long flags;
2163         scsi_qla_host_t *ha;
2164         struct q2t_prm prm;
2165         ctio7_status0_entry_t *pkt;
2166
2167         TRACE_ENTRY();
2168
2169         memset(&prm, 0, sizeof(prm));
2170
2171         res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
2172         if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
2173                 if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
2174                         res = SCST_TGT_RES_SUCCESS;
2175                 goto out;
2176         }
2177
2178         /* Here ha->hardware_lock already locked */
2179
2180         ha = prm.tgt->ha;
2181
2182         res = q24_build_ctio_pkt(&prm);
2183         if (unlikely(res != SCST_TGT_RES_SUCCESS))
2184                 goto out_unmap_unlock;
2185
2186         pkt = (ctio7_status0_entry_t *)prm.pkt;
2187
2188         if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
2189                 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2190                                 CTIO7_FLAGS_STATUS_MODE_0);
2191
2192                 q24_load_data_segments(&prm);
2193
2194                 if (prm.add_status_pkt == 0) {
2195                         if (xmit_type & Q2T_XMIT_STATUS) {
2196                                 pkt->scsi_status = cpu_to_le16(prm.rq_result);
2197                                 pkt->residual = cpu_to_le32(prm.residual);
2198                                 pkt->flags |= __constant_cpu_to_le16(
2199                                                 CTIO7_FLAGS_SEND_STATUS);
2200                                 if (q2t_need_explicit_conf(ha, cmd, 0)) {
2201                                         pkt->flags |= __constant_cpu_to_le16(
2202                                                 CTIO7_FLAGS_EXPLICIT_CONFORM |
2203                                                 CTIO7_FLAGS_CONFORM_REQ);
2204                                 }
2205                         }
2206                 } else {
2207                         /*
2208                          * We have already made sure that there is sufficient
2209                          * amount of request entries to not drop HW lock in
2210                          * req_pkt().
2211                          */
2212                         ctio7_status1_entry_t *ctio =
2213                                 (ctio7_status1_entry_t *)q2t_get_req_pkt(ha);
2214
2215                         TRACE_DBG("%s", "Building additional status packet");
2216
2217                         memcpy(ctio, pkt, sizeof(*ctio));
2218                         ctio->common.entry_count = 1;
2219                         ctio->common.dseg_count = 0;
2220                         ctio->flags &= ~__constant_cpu_to_le16(
2221                                                 CTIO7_FLAGS_DATA_IN);
2222
2223                         /* Real finish is ctio_m1's finish */
2224                         pkt->common.handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2225                         pkt->flags |= __constant_cpu_to_le16(
2226                                         CTIO7_FLAGS_DONT_RET_CTIO);
2227                         q24_init_ctio_ret_entry((ctio7_status0_entry_t *)ctio,
2228                                                         &prm);
2229                         TRACE_BUFFER("Status CTIO7", ctio, REQUEST_ENTRY_SIZE);
2230                 }
2231         } else
2232                 q24_init_ctio_ret_entry(pkt, &prm);
2233
2234         cmd->state = Q2T_STATE_PROCESSED;       /* Mid-level is done processing */
2235
2236         TRACE_BUFFER("Xmitting CTIO7", pkt, REQUEST_ENTRY_SIZE);
2237
2238         q2t_exec_queue(ha);
2239
2240 out_unlock:
2241         /* Release ring specific lock */
2242         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2243
2244 out:
2245         TRACE_EXIT_RES(res);
2246         return res;
2247
2248 out_unmap_unlock:
2249         if (q2t_has_data(cmd))
2250                 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2251                         cmd->dma_data_direction);
2252         goto out_unlock;
2253 }
2254
2255 static int q24_xmit_response(struct scst_cmd *scst_cmd)
2256 {
2257         int xmit_type = Q2T_XMIT_DATA;
2258         int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2259         struct q2t_cmd *cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2260
2261 #ifdef CONFIG_SCST_EXTRACHECKS
2262         sBUG_ON(!q2t_has_data(cmd) && !is_send_status);
2263 #endif
2264
2265 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2266         if (scst_cmd_atomic(scst_cmd))
2267                 return SCST_TGT_RES_NEED_THREAD_CTX;
2268 #endif
2269
2270         if (is_send_status)
2271                 xmit_type |= Q2T_XMIT_STATUS;
2272
2273         cmd->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2274         cmd->sg = scst_cmd_get_sg(scst_cmd);
2275         cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2276         cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
2277         cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
2278         cmd->offset = scst_cmd_get_ppl_offset(scst_cmd);
2279         cmd->aborted = scst_cmd_aborted(scst_cmd);
2280
2281         q2t_check_srr_debug(cmd, &xmit_type);
2282
2283         TRACE_DBG("is_send_status=%x, bufflen=%d, sg_cnt=%d, "
2284                 "data_direction=%d, offset=%d", is_send_status, cmd->bufflen,
2285                 cmd->sg_cnt, cmd->data_direction, cmd->offset);
2286
2287         return __q24_xmit_response(cmd, xmit_type);
2288 }
2289
2290 static int __q2t_rdy_to_xfer(struct q2t_cmd *cmd)
2291 {
2292         int res = SCST_TGT_RES_SUCCESS;
2293         unsigned long flags;
2294         scsi_qla_host_t *ha;
2295         struct q2t_tgt *tgt = cmd->tgt;
2296         struct q2t_prm prm;
2297         void *p;
2298
2299         TRACE_ENTRY();
2300
2301         memset(&prm, 0, sizeof(prm));
2302         prm.cmd = cmd;
2303         prm.tgt = tgt;
2304         prm.sg = NULL;
2305         prm.req_cnt = 1;
2306         ha = tgt->ha;
2307
2308         /* Send marker if required */
2309         if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
2310                 res = SCST_TGT_RES_FATAL_ERROR;
2311                 goto out;
2312         }
2313
2314         TRACE_DBG("CTIO_start: ha(%d)", (int)ha->instance);
2315
2316         /* Calculate number of entries and segments required */
2317         if (q2t_pci_map_calc_cnt(&prm) != 0) {
2318                 res = SCST_TGT_RES_QUEUE_FULL;
2319                 goto out;
2320         }
2321
2322         /* Acquire ring specific lock */
2323         spin_lock_irqsave(&ha->hardware_lock, flags);
2324
2325         /* Does F/W have an IOCBs for this request */
2326         res = q2t_check_reserve_free_req(ha, prm.req_cnt);
2327         if (res != SCST_TGT_RES_SUCCESS)
2328                 goto out_unlock_free_unmap;
2329
2330         if (IS_FWI2_CAPABLE(ha)) {
2331                 ctio7_status0_entry_t *pkt;
2332                 res = q24_build_ctio_pkt(&prm);
2333                 if (unlikely(res != SCST_TGT_RES_SUCCESS))
2334                         goto out_unlock_free_unmap;
2335                 pkt = (ctio7_status0_entry_t *)prm.pkt;
2336                 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2337                                 CTIO7_FLAGS_STATUS_MODE_0);
2338                 q24_load_data_segments(&prm);
2339                 p = pkt;
2340         } else {
2341                 ctio_common_entry_t *pkt;
2342                 q2x_build_ctio_pkt(&prm);
2343                 pkt = (ctio_common_entry_t *)prm.pkt;
2344                 pkt->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_OUT);
2345                 q2x_load_data_segments(&prm);
2346                 p = pkt;
2347         }
2348
2349         cmd->state = Q2T_STATE_NEED_DATA;
2350
2351         TRACE_BUFFER("Xfering", p, REQUEST_ENTRY_SIZE);
2352
2353         q2t_exec_queue(ha);
2354
2355 out_unlock:
2356         /* Release ring specific lock */
2357         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2358
2359 out:
2360         TRACE_EXIT_RES(res);
2361         return res;
2362
2363 out_unlock_free_unmap:
2364         if (q2t_has_data(cmd)) {
2365                 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2366                      cmd->dma_data_direction);
2367         }
2368         goto out_unlock;
2369 }
2370
2371 static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd)
2372 {
2373         int res;
2374         struct q2t_cmd *cmd;
2375
2376         TRACE_ENTRY();
2377
2378         TRACE(TRACE_SCSI, "tag=%lld", scst_cmd_get_tag(scst_cmd));
2379
2380         cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2381         cmd->bufflen = scst_cmd_get_bufflen(scst_cmd);
2382         cmd->sg = scst_cmd_get_sg(scst_cmd);
2383         cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2384         cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
2385         cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
2386
2387         res = __q2t_rdy_to_xfer(cmd);
2388
2389         TRACE_EXIT();
2390         return res;
2391 }
2392
2393 /* If hardware_lock held on entry, might drop it, then reaquire */
2394 static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2395         atio_entry_t *atio, int ha_locked)
2396 {
2397         ctio_ret_entry_t *ctio;
2398         unsigned long flags = 0; /* to stop compiler's warning */
2399         int do_tgt_cmd_done = 0;
2400
2401         TRACE_ENTRY();
2402
2403         TRACE_DBG("Sending TERM EXCH CTIO (ha=%p)", ha);
2404
2405         /* Send marker if required */
2406         if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
2407                 goto out;
2408
2409         if (!ha_locked)
2410                 spin_lock_irqsave(&ha->hardware_lock, flags);
2411
2412         ctio = (ctio_ret_entry_t *)tgt_data.req_pkt(ha);
2413         if (ctio == NULL) {
2414                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
2415                         "request packet", ha->instance, __func__);
2416                 goto out_unlock;
2417         }
2418
2419         ctio->entry_type = CTIO_RET_TYPE;
2420         ctio->entry_count = 1;
2421         if (cmd != NULL) {
2422                 if (cmd->state < Q2T_STATE_PROCESSED) {
2423                         PRINT_ERROR("qla2x00tgt(%ld): Terminating cmd %p with "
2424                                 "incorrect state %d", ha->instance, cmd,
2425                                 cmd->state);
2426                 } else
2427                         do_tgt_cmd_done = 1;
2428         }
2429         ctio->handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2430
2431         /* Set IDs */
2432         SET_TARGET_ID(ha, ctio->target, GET_TARGET_ID(ha, atio));
2433         ctio->rx_id = atio->rx_id;
2434
2435         /* Most likely, it isn't needed */
2436         ctio->residual = atio->data_length;
2437         if (ctio->residual != 0)
2438                 ctio->scsi_status |= SS_RESIDUAL_UNDER;
2439
2440         ctio->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_TERM_EXCH |
2441                         OF_NO_DATA | OF_SS_MODE_1);
2442         ctio->flags |= __constant_cpu_to_le16(OF_INC_RC);
2443
2444         TRACE_BUFFER("CTIO TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
2445
2446         q2t_exec_queue(ha);
2447
2448 out_unlock:
2449         if (!ha_locked)
2450                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2451
2452         if (do_tgt_cmd_done) {
2453                 if (!ha_locked && !in_interrupt()) {
2454                         msleep(250); /* just in case */
2455                         scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
2456                 } else
2457                         scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
2458                 /* !! At this point cmd could be already freed !! */
2459         }
2460
2461 out:
2462         TRACE_EXIT();
2463         return;
2464 }
2465
2466 /* If hardware_lock held on entry, might drop it, then reaquire */
2467 static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2468         atio7_entry_t *atio, int ha_locked)
2469 {
2470         ctio7_status1_entry_t *ctio;
2471         unsigned long flags = 0; /* to stop compiler's warning */
2472         int do_tgt_cmd_done = 0;
2473
2474         TRACE_ENTRY();
2475
2476         TRACE_DBG("Sending TERM EXCH CTIO7 (ha=%p)", ha);
2477
2478         /* Send marker if required */
2479         if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
2480                 goto out;
2481
2482         if (!ha_locked)
2483                 spin_lock_irqsave(&ha->hardware_lock, flags);
2484
2485         ctio = (ctio7_status1_entry_t *)tgt_data.req_pkt(ha);
2486         if (ctio == NULL) {
2487                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
2488                         "request packet", ha->instance, __func__);
2489                 goto out_unlock;
2490         }
2491
2492         ctio->common.entry_type = CTIO_TYPE7;
2493         ctio->common.entry_count = 1;
2494         if (cmd != NULL) {
2495                 ctio->common.nport_handle = cmd->loop_id;
2496                 if (cmd->state < Q2T_STATE_PROCESSED) {
2497                         PRINT_ERROR("qla2x00tgt(%ld): Terminating cmd %p with "
2498                                 "incorrect state %d", ha->instance, cmd,
2499                                  cmd->state);
2500                 } else
2501                         do_tgt_cmd_done = 1;
2502         } else
2503                 ctio->common.nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
2504         ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2505         ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
2506         ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
2507         ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
2508         ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
2509         ctio->common.exchange_addr = atio->exchange_addr;
2510         ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
2511                 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
2512         ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
2513
2514         /* Most likely, it isn't needed */
2515         ctio->residual = atio->fcp_cmnd.data_length;
2516         if (ctio->residual != 0)
2517                 ctio->scsi_status |= SS_RESIDUAL_UNDER;
2518
2519         TRACE_BUFFER("CTIO7 TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
2520
2521         q2t_exec_queue(ha);
2522
2523 out_unlock:
2524         if (!ha_locked)
2525                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2526
2527         if (do_tgt_cmd_done) {
2528                 if (!ha_locked && !in_interrupt()) {
2529                         msleep(250); /* just in case */
2530                         scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
2531                 } else
2532                         scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
2533                 /* !! At this point cmd could be already freed !! */
2534         }
2535
2536 out:
2537         TRACE_EXIT();
2538         return;
2539 }
2540
2541 static inline void q2t_free_cmd(struct q2t_cmd *cmd)
2542 {
2543         if (unlikely(cmd->free_sg))
2544                 kfree(cmd->sg);
2545         kmem_cache_free(q2t_cmd_cachep, cmd);
2546 }
2547
2548 static void q2t_on_free_cmd(struct scst_cmd *scst_cmd)
2549 {
2550         struct q2t_cmd *cmd;
2551
2552         TRACE_ENTRY();
2553
2554         TRACE(TRACE_SCSI, "Freeing command %p, tag %lld", scst_cmd,
2555                 scst_cmd_get_tag(scst_cmd));
2556
2557         cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2558         scst_cmd_set_tgt_priv(scst_cmd, NULL);
2559
2560         q2t_free_cmd(cmd);
2561
2562         TRACE_EXIT();
2563         return;
2564 }
2565
2566 /* ha->hardware_lock supposed to be held on entry */
2567 static int q2t_prepare_srr_ctio(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2568         void *ctio)
2569 {
2570         struct srr_ctio *sc;
2571         struct q2t_tgt *tgt = ha->tgt;
2572         int res = 0;
2573         struct srr_imm *imm;
2574
2575         tgt->ctio_srr_id++;
2576
2577         TRACE_MGMT_DBG("qla2x00tgt(%ld): CTIO with SRR "
2578                 "status received", ha->instance);
2579
2580         if (ctio == NULL) {
2581                 PRINT_ERROR("qla2x00tgt(%ld): SRR CTIO, "
2582                         "but ctio is NULL", ha->instance);
2583                 res = -EINVAL;
2584                 goto out;
2585         }
2586
2587         sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2588         if (sc != NULL) {
2589                 sc->cmd = cmd;
2590                 /* IRQ is already OFF */
2591                 spin_lock(&tgt->srr_lock);
2592                 sc->srr_id = tgt->ctio_srr_id;
2593                 list_add_tail(&sc->srr_list_entry,
2594                         &tgt->srr_ctio_list);
2595                 TRACE_MGMT_DBG("CTIO SRR %p added (id %d)",
2596                         sc, sc->srr_id);
2597                 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2598                         int found = 0;
2599                         list_for_each_entry(imm, &tgt->srr_imm_list,
2600                                         srr_list_entry) {
2601                                 if (imm->srr_id == sc->srr_id) {
2602                                         found = 1;
2603                                         break;
2604                                 }
2605                         }
2606                         if (found) {
2607                                 TRACE_MGMT_DBG("%s", "Scheduling srr work");
2608                                 schedule_work(&tgt->srr_work);
2609                         } else {
2610                                 PRINT_ERROR("qla2x00tgt(%ld): imm_srr_id "
2611                                         "== ctio_srr_id (%d), but there is no "
2612                                         "corresponding SRR IMM, deleting CTIO "
2613                                         "SRR %p", ha->instance, tgt->ctio_srr_id,
2614                                         sc);
2615                                 list_del(&sc->srr_list_entry);
2616                                 spin_unlock(&tgt->srr_lock);
2617
2618                                 kfree(sc);
2619                                 res = -EINVAL;
2620                                 goto out;
2621                         }
2622                 }
2623                 spin_unlock(&tgt->srr_lock);
2624         } else {
2625                 struct srr_imm *ti;
2626                 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Unable to "
2627                     "allocate SRR CTIO entry", ha->instance);
2628                 spin_lock(&tgt->srr_lock);
2629                 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2630                                         srr_list_entry) {
2631                         if (imm->srr_id == tgt->ctio_srr_id) {
2632                                 TRACE_MGMT_DBG("IMM SRR %p deleted "
2633                                         "(id %d)", imm, imm->srr_id);
2634                                 list_del(&imm->srr_list_entry);
2635                                 q2t_reject_free_srr_imm(ha, imm, 1);
2636                         }
2637                 }
2638                 spin_unlock(&tgt->srr_lock);
2639                 res = -ENOMEM;
2640                 goto out;
2641         }
2642
2643 out:
2644         TRACE_EXIT_RES(res);
2645         return res;
2646 }
2647
2648 /*
2649  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2650  */
2651 static int q2t_term_ctio_exchange(scsi_qla_host_t *ha, void *ctio,
2652         struct q2t_cmd *cmd, uint32_t status)
2653 {
2654         int term = 0;
2655
2656         if (IS_FWI2_CAPABLE(ha)) {
2657                 if (ctio != NULL) {
2658                         ctio7_fw_entry_t *c = (ctio7_fw_entry_t *)ctio;
2659                         term = !(c->flags &
2660                                 __constant_cpu_to_le16(OF_TERM_EXCH));
2661                 } else
2662                         term = 1;
2663                 if (term) {
2664                         q24_send_term_exchange(ha, cmd,
2665                                 &cmd->atio.atio7, 1);
2666                 }
2667         } else {
2668                 if (status != CTIO_SUCCESS)
2669                         q2x_modify_command_count(ha, 1, 0);
2670 #if 0 /* seems, it isn't needed */
2671                 if (ctio != NULL) {
2672                         ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
2673                         term = !(c->flags &
2674                                 __constant_cpu_to_le16(
2675                                         CTIO7_FLAGS_TERMINATE));
2676                 } else
2677                         term = 1;
2678                 if (term) {
2679                         q2x_send_term_exchange(ha, cmd,
2680                                 &cmd->atio.atio2x, 1);
2681                 }
2682 #endif
2683         }
2684         return term;
2685 }
2686
2687 /* ha->hardware_lock supposed to be held on entry */
2688 static inline struct q2t_cmd *q2t_get_cmd(scsi_qla_host_t *ha, uint32_t handle)
2689 {
2690         handle--;
2691         if (ha->cmds[handle] != NULL) {
2692                 struct q2t_cmd *cmd = ha->cmds[handle];
2693                 ha->cmds[handle] = NULL;
2694                 return cmd;
2695         } else
2696                 return NULL;
2697 }
2698
2699 /* ha->hardware_lock supposed to be held on entry */
2700 static struct q2t_cmd *q2t_ctio_to_cmd(scsi_qla_host_t *ha, uint32_t handle,
2701         void *ctio)
2702 {
2703         struct q2t_cmd *cmd = NULL;
2704
2705         /* Clear out internal marks */
2706         handle &= ~(CTIO_COMPLETION_HANDLE_MARK | CTIO_INTERMEDIATE_HANDLE_MARK);
2707
2708         if (handle != Q2T_NULL_HANDLE) {
2709                 if (unlikely(handle == Q2T_SKIP_HANDLE)) {
2710                         TRACE_DBG("%s", "SKIP_HANDLE CTIO");
2711                         goto out;
2712                 }
2713                 /* handle-1 is actually used */
2714                 if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
2715                         PRINT_ERROR("qla2x00tgt(%ld): Wrong handle %x "
2716                                 "received", ha->instance, handle);
2717                         goto out;
2718                 }
2719                 cmd = q2t_get_cmd(ha, handle);
2720                 if (unlikely(cmd == NULL)) {
2721                         PRINT_WARNING("qla2x00tgt(%ld): Suspicious: unable to "
2722                                    "find the command with handle %x",
2723                                    ha->instance, handle);
2724                         goto out;
2725                 }
2726         } else if (ctio != NULL) {
2727                 uint16_t loop_id;
2728                 int tag;
2729                 struct q2t_sess *sess;
2730                 struct scst_cmd *scst_cmd;
2731
2732                 if (IS_FWI2_CAPABLE(ha)) {
2733                         /* We can't get loop ID from CTIO7 */
2734                         PRINT_ERROR("qla2x00tgt(%ld): Wrong CTIO received: "
2735                                 "QLA24xx doesn't support NULL handles",
2736                                 ha->instance);
2737                         goto out;
2738                 } else {
2739                         ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
2740                         loop_id = GET_TARGET_ID(ha, c);
2741                         tag = c->rx_id;
2742                 }
2743
2744                 sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
2745                 if (sess == NULL) {
2746                         PRINT_WARNING("qla2x00tgt(%ld): Suspicious: "
2747                                    "ctio_completion for non-existing session "
2748                                    "(loop_id %d, tag %d)",
2749                                    ha->instance, loop_id, tag);
2750                         goto out;
2751                 }
2752
2753                 scst_cmd = scst_find_cmd_by_tag(sess->scst_sess, tag);
2754                 if (scst_cmd == NULL) {
2755                         PRINT_WARNING("qla2x00tgt(%ld): Suspicious: unable to "
2756                              "find the command with tag %d (loop_id %d)",
2757                              ha->instance, tag, loop_id);
2758                         goto out;
2759                 }
2760
2761                 cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2762                 TRACE_DBG("Found q2t_cmd %p (tag %d)", cmd, tag);
2763         }
2764
2765 out:
2766         return cmd;
2767 }
2768
2769 /*
2770  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2771  */
2772 static void q2t_do_ctio_completion(scsi_qla_host_t *ha, uint32_t handle,
2773         uint32_t status, void *ctio)
2774 {
2775         struct scst_cmd *scst_cmd;
2776         struct q2t_cmd *cmd;
2777         enum scst_exec_context context;
2778
2779         TRACE_ENTRY();
2780
2781 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2782         context = SCST_CONTEXT_THREAD;
2783 #else
2784         context = SCST_CONTEXT_TASKLET;
2785 #endif
2786
2787         TRACE(TRACE_DEBUG|TRACE_SCSI, "handle(ctio %p status %#x) <- %08x",
2788               ctio, status, handle);
2789
2790         if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2791                 /* That could happen only in case of an error/reset/abort */
2792                 if (status != CTIO_SUCCESS) {
2793                         TRACE_MGMT_DBG("Intermediate CTIO received (status %x)",
2794                                 status);
2795                 }
2796                 goto out;
2797         }
2798
2799         cmd = q2t_ctio_to_cmd(ha, handle, ctio);
2800         if (cmd == NULL) {
2801                 if (status != CTIO_SUCCESS)
2802                         q2t_term_ctio_exchange(ha, ctio, NULL, status);
2803                 goto out;
2804         }
2805
2806         scst_cmd = cmd->scst_cmd;
2807
2808         if (unlikely(status != CTIO_SUCCESS)) {
2809                 switch (status & 0xFFFF) {
2810                 case CTIO_LIP_RESET:
2811                 case CTIO_TARGET_RESET:
2812                 case CTIO_ABORTED:
2813                 case CTIO_TIMEOUT:
2814                 case CTIO_INVALID_RX_ID:
2815                         /* they are OK */
2816                         TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): CTIO with "
2817                                 "status %#x received, state %x, scst_cmd %p, "
2818                                 "op %x (LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2819                                 "TIMEOUT=b, INVALID_RX_ID=8)", ha->instance,
2820                                 status, cmd->state, scst_cmd, scst_cmd->cdb[0]);
2821                         break;
2822
2823                 case CTIO_PORT_LOGGED_OUT:
2824                 case CTIO_PORT_UNAVAILABLE:
2825                         PRINT_INFO("qla2x00tgt(%ld): CTIO with PORT LOGGED "
2826                                 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2827                                 "received (state %x, scst_cmd %p, op %x)",
2828                                 ha->instance, status, cmd->state, scst_cmd,
2829                                 scst_cmd->cdb[0]);
2830                         break;
2831
2832                 case CTIO_SRR_RECEIVED:
2833                         if (q2t_prepare_srr_ctio(ha, cmd, ctio) != 0)
2834                                 break;
2835                         else
2836                                 goto out;
2837
2838                 default:
2839                         PRINT_ERROR("qla2x00tgt(%ld): CTIO with error status "
2840                                 "0x%x received (state %x, scst_cmd %p, op %x)",
2841                                 ha->instance, status, cmd->state, scst_cmd,
2842                                 scst_cmd->cdb[0]);
2843                         break;
2844                 }
2845
2846                 if (cmd->state != Q2T_STATE_NEED_DATA)
2847                         if (q2t_term_ctio_exchange(ha, ctio, cmd, status))
2848                                 goto out;
2849         }
2850
2851         if (cmd->state == Q2T_STATE_PROCESSED) {
2852                 TRACE_DBG("Command %p finished", cmd);
2853                 if (q2t_has_data(cmd)) {
2854                         pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2855                                 cmd->dma_data_direction);
2856                 }
2857         } else if (cmd->state == Q2T_STATE_NEED_DATA) {
2858                 int rx_status = SCST_RX_STATUS_SUCCESS;
2859
2860                 cmd->state = Q2T_STATE_DATA_IN;
2861
2862                 if (unlikely(status != CTIO_SUCCESS))
2863                         rx_status = SCST_RX_STATUS_ERROR;
2864
2865                 TRACE_DBG("Data received, context %x, rx_status %d",
2866                       context, rx_status);
2867
2868                 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2869                                 cmd->dma_data_direction);
2870
2871                 scst_rx_data(scst_cmd, rx_status, context);
2872                 goto out;
2873         } else if (cmd->state == Q2T_STATE_ABORTED) {
2874                 TRACE_MGMT_DBG("Aborted command %p (tag %d) finished", cmd,
2875                         cmd->tag);
2876         } else {
2877                 PRINT_ERROR("qla2x00tgt(%ld): A command in state (%d) should "
2878                         "not return a CTIO complete", ha->instance, cmd->state);
2879         }
2880
2881         if (unlikely(status != CTIO_SUCCESS)) {
2882                 TRACE_MGMT_DBG("%s", "Finishing failed CTIO");
2883                 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
2884         }
2885
2886         scst_tgt_cmd_done(scst_cmd, context);
2887
2888 out:
2889         TRACE_EXIT();
2890         return;
2891 }
2892
2893 /* ha->hardware_lock supposed to be held on entry */
2894 /* called via callback from qla2xxx */
2895 static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle)
2896 {
2897         struct q2t_tgt *tgt = ha->tgt;
2898
2899         TRACE_ENTRY();
2900
2901         if (likely(tgt != NULL)) {
2902                 tgt->irq_cmd_count++;
2903                 q2t_do_ctio_completion(ha, handle, CTIO_SUCCESS, NULL);
2904                 tgt->irq_cmd_count--;
2905         } else {
2906                 TRACE_DBG("CTIO, but target mode not enabled (ha %p handle "
2907                         "%#x)", ha, handle);
2908         }
2909
2910         TRACE_EXIT();
2911         return;
2912 }
2913
2914 /* ha->hardware_lock is supposed to be held on entry */
2915 static int q2x_do_send_cmd_to_scst(struct q2t_cmd *cmd)
2916 {
2917         int res = 0;
2918         struct q2t_sess *sess = cmd->sess;
2919         uint16_t lun;
2920         atio_entry_t *atio = &cmd->atio.atio2x;
2921         scst_data_direction dir;
2922         int context;
2923
2924         TRACE_ENTRY();
2925
2926         /* make it be in network byte order */
2927         lun = swab16(le16_to_cpu(atio->lun));
2928         cmd->scst_cmd = scst_rx_cmd(sess->scst_sess, (uint8_t *)&lun,
2929                                     sizeof(lun), atio->cdb, Q2T_MAX_CDB_LEN,
2930                                     SCST_ATOMIC);
2931
2932         if (cmd->scst_cmd == NULL) {
2933                 PRINT_ERROR("%s", "qla2x00tgt: scst_rx_cmd() failed");
2934                 res = -EFAULT;
2935                 goto out;
2936         }
2937
2938         cmd->tag = atio->rx_id;
2939         scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
2940         scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
2941
2942         if (atio->execution_codes & ATIO_EXEC_READ)
2943                 dir = SCST_DATA_READ;
2944         else if (atio->execution_codes & ATIO_EXEC_WRITE)
2945                 dir = SCST_DATA_WRITE;
2946         else
2947                 dir = SCST_DATA_NONE;
2948         scst_cmd_set_expected(cmd->scst_cmd, dir,
2949                 le32_to_cpu(atio->data_length));
2950
2951         switch (atio->task_codes) {
2952         case ATIO_SIMPLE_QUEUE:
2953                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2954                 break;
2955         case ATIO_HEAD_OF_QUEUE:
2956                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2957                 break;
2958         case ATIO_ORDERED_QUEUE:
2959                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2960                 break;
2961         case ATIO_ACA_QUEUE:
2962                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
2963                 break;
2964         case ATIO_UNTAGGED:
2965                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
2966                 break;
2967         default:
2968                 PRINT_ERROR("qla2x00tgt: unknown task code %x, use "
2969                         "ORDERED instead", atio->task_codes);
2970                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2971                 break;
2972         }
2973
2974 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2975         context = SCST_CONTEXT_THREAD;
2976 #else
2977         context = SCST_CONTEXT_TASKLET;
2978 #endif
2979
2980         TRACE_DBG("Context %x", context);
2981         TRACE(TRACE_SCSI, "START Command (tag %d, queue_type %d)",
2982                 cmd->tag, cmd->scst_cmd->queue_type);
2983         scst_cmd_init_done(cmd->scst_cmd, context);
2984
2985 out:
2986         TRACE_EXIT_RES(res);
2987         return res;
2988 }
2989
2990 /* ha->hardware_lock is supposed to be held on entry */
2991 static int q24_do_send_cmd_to_scst(struct q2t_cmd *cmd)
2992 {
2993         int res = 0;
2994         struct q2t_sess *sess = cmd->sess;
2995         atio7_entry_t *atio = &cmd->atio.atio7;
2996         scst_data_direction dir;
2997         int context;
2998
2999         TRACE_ENTRY();
3000
3001         cmd->scst_cmd = scst_rx_cmd(sess->scst_sess,
3002                 (uint8_t *)&atio->fcp_cmnd.lun, sizeof(atio->fcp_cmnd.lun),
3003                 atio->fcp_cmnd.cdb, Q2T_MAX_CDB_LEN, SCST_ATOMIC);
3004
3005         if (cmd->scst_cmd == NULL) {
3006                 PRINT_ERROR("%s", "qla2x00tgt: scst_rx_cmd() failed");
3007                 res = -EFAULT;
3008                 goto out;
3009         }
3010
3011         cmd->tag = atio->exchange_addr;
3012         scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
3013         scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
3014
3015         if (atio->fcp_cmnd.rddata)
3016                 dir = SCST_DATA_READ;
3017         else if (atio->fcp_cmnd.wrdata)
3018                 dir = SCST_DATA_WRITE;
3019         else
3020                 dir = SCST_DATA_NONE;
3021         scst_cmd_set_expected(cmd->scst_cmd, dir,
3022                 be32_to_cpu(atio->fcp_cmnd.data_length));
3023
3024         switch (atio->fcp_cmnd.task_attr) {
3025         case ATIO_SIMPLE_QUEUE:
3026                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3027                 break;
3028         case ATIO_HEAD_OF_QUEUE:
3029                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3030                 break;
3031         case ATIO_ORDERED_QUEUE:
3032                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3033                 break;
3034         case ATIO_ACA_QUEUE:
3035                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
3036                 break;
3037         case ATIO_UNTAGGED:
3038                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
3039                 break;
3040         default:
3041                 PRINT_ERROR("qla2x00tgt: unknown task code %x, use "
3042                         "ORDERED instead", atio->fcp_cmnd.task_attr);
3043                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3044                 break;
3045         }
3046
3047 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
3048         context = SCST_CONTEXT_THREAD;
3049 #else
3050         context = SCST_CONTEXT_TASKLET;
3051 #endif
3052
3053         TRACE_DBG("Context %x", context);
3054         TRACE(TRACE_SCSI, "START Command %p (tag %d, queue type %x)", cmd,
3055                 cmd->tag, cmd->scst_cmd->queue_type);
3056         scst_cmd_init_done(cmd->scst_cmd, context);
3057
3058 out:
3059         TRACE_EXIT_RES(res);
3060         return res;
3061 }
3062
3063 /* ha->hardware_lock supposed to be held on entry */
3064 static int q2t_do_send_cmd_to_scst(scsi_qla_host_t *ha,
3065         struct q2t_cmd *cmd, struct q2t_sess *sess)
3066 {
3067         int res;
3068
3069         TRACE_ENTRY();
3070
3071         cmd->sess = sess;
3072         cmd->loop_id = sess->loop_id;
3073         cmd->conf_compl_supported = sess->conf_compl_supported;
3074
3075         if (IS_FWI2_CAPABLE(ha))
3076                 res = q24_do_send_cmd_to_scst(cmd);
3077         else
3078                 res = q2x_do_send_cmd_to_scst(cmd);
3079
3080         TRACE_EXIT_RES(res);
3081         return res;
3082 }
3083
3084 /* ha->hardware_lock supposed to be held on entry */
3085 static int q2t_send_cmd_to_scst(scsi_qla_host_t *ha, atio_t *atio)
3086 {
3087         int res = 0;
3088         struct q2t_tgt *tgt = ha->tgt;
3089         struct q2t_sess *sess;
3090         struct q2t_cmd *cmd;
3091
3092         TRACE_ENTRY();
3093
3094         if (unlikely(tgt->tgt_shutdown)) {
3095                 TRACE_MGMT_DBG("New command while device %p is shutting "
3096                         "down", tgt);
3097                 res = -EFAULT;
3098                 goto out;
3099         }
3100
3101         cmd = kmem_cache_zalloc(q2t_cmd_cachep, GFP_ATOMIC);
3102         if (cmd == NULL) {
3103                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of cmd failed");
3104                 res = -ENOMEM;
3105                 goto out;
3106         }
3107
3108         memcpy(&cmd->atio.atio2x, atio, sizeof(*atio));
3109         cmd->state = Q2T_STATE_NEW;
3110         cmd->tgt = ha->tgt;
3111
3112         if (IS_FWI2_CAPABLE(ha)) {
3113                 atio7_entry_t *a = (atio7_entry_t *)atio;
3114                 sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
3115                 if (unlikely(sess == NULL)) {
3116                         TRACE_MGMT_DBG("qla2x00tgt(%ld): Unable to find "
3117                                 "wwn login (s_id %x:%x:%x), trying to create "
3118                                 "it manually", ha->instance,
3119                                 a->fcp_hdr.s_id[0], a->fcp_hdr.s_id[1],
3120                                 a->fcp_hdr.s_id[2]);
3121                         goto out_sched;
3122                 }
3123         } else {
3124                 sess = q2t_find_sess_by_loop_id(tgt,
3125                         GET_TARGET_ID(ha, (atio_entry_t *)atio));
3126                 if (unlikely(sess == NULL)) {
3127                         TRACE_MGMT_DBG("qla2x00tgt(%ld): Unable to find "
3128                                 "wwn login (loop_id=%d), trying to create it "
3129                                 "manually", ha->instance,
3130                                 GET_TARGET_ID(ha, (atio_entry_t *)atio));
3131                         goto out_sched;
3132                 }
3133         }
3134
3135         res = q2t_do_send_cmd_to_scst(ha, cmd, sess);
3136         if (unlikely(res != 0))
3137                 goto out_free_cmd;
3138
3139 out:
3140         TRACE_EXIT_RES(res);
3141         return res;
3142
3143 out_free_cmd:
3144         q2t_free_cmd(cmd);
3145         goto out;
3146
3147 out_sched:
3148         {
3149                 struct q2t_sess_work_param *prm;
3150                 unsigned long flags;
3151
3152                 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
3153                 if (prm == NULL) {
3154                         PRINT_ERROR("%s", "Unable to create session work, "
3155                                 "command will be refused");
3156                         res = -1;
3157                         goto out_free_cmd;
3158                 }
3159
3160                 TRACE_MGMT_DBG("Scheduling work to find session for cmd %p",
3161                         cmd);
3162
3163                 prm->cmd = cmd;
3164
3165                 spin_lock_irqsave(&tgt->sess_work_lock, flags);
3166                 if (list_empty(&tgt->sess_works_list))
3167                         tgt->tm_to_unknown = 0;
3168                 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
3169                 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
3170
3171                 schedule_work(&tgt->sess_work);
3172         }
3173         goto out;
3174 }
3175
3176 /* ha->hardware_lock supposed to be held on entry */
3177 static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
3178         int lun_size, int fn, void *iocb, int flags)
3179 {
3180         int res = 0, rc = -1;
3181         struct q2t_mgmt_cmd *mcmd;
3182
3183         TRACE_ENTRY();
3184
3185         mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
3186         if (mcmd == NULL) {
3187                 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Allocation of management "
3188                         "command failed, some commands and their data could "
3189                         "leak", sess->tgt->ha->instance);
3190                 res = -ENOMEM;
3191                 goto out;
3192         }
3193         memset(mcmd, 0, sizeof(*mcmd));
3194
3195         mcmd->sess = sess;
3196         if (iocb) {
3197                 memcpy(&mcmd->orig_iocb.notify_entry, iocb,
3198                         sizeof(mcmd->orig_iocb.notify_entry));
3199         }
3200         mcmd->flags = flags;
3201
3202         switch (fn) {
3203         case Q2T_CLEAR_ACA:
3204                 TRACE(TRACE_MGMT, "%s", "CLEAR_ACA received");
3205                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_ACA,
3206                                          lun, lun_size, SCST_ATOMIC, mcmd);
3207                 break;
3208
3209         case Q2T_TARGET_RESET:
3210                 TRACE(TRACE_MGMT, "%s", "TARGET_RESET received");
3211                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_TARGET_RESET,
3212                                          lun, lun_size, SCST_ATOMIC, mcmd);
3213                 break;
3214
3215         case Q2T_LUN_RESET:
3216                 TRACE(TRACE_MGMT, "%s", "LUN_RESET received");
3217                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_LUN_RESET,
3218                                          lun, lun_size, SCST_ATOMIC, mcmd);
3219                 break;
3220
3221         case Q2T_CLEAR_TS:
3222                 TRACE(TRACE_MGMT, "%s", "CLEAR_TS received");
3223                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_TASK_SET,
3224                                          lun, lun_size, SCST_ATOMIC, mcmd);
3225                 break;
3226
3227         case Q2T_ABORT_TS:
3228                 TRACE(TRACE_MGMT, "%s", "ABORT_TS received");
3229                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_ABORT_TASK_SET,
3230                                          lun, lun_size, SCST_ATOMIC, mcmd);
3231                 break;
3232
3233         case Q2T_ABORT_ALL:
3234                 TRACE(TRACE_MGMT, "%s", "Doing ABORT_ALL_TASKS");
3235                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
3236                                          SCST_ABORT_ALL_TASKS,
3237                                          lun, lun_size, SCST_ATOMIC, mcmd);
3238                 break;
3239
3240         case Q2T_ABORT_ALL_SESS:
3241                 TRACE(TRACE_MGMT, "%s", "Doing ABORT_ALL_TASKS_SESS");
3242                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
3243                                          SCST_ABORT_ALL_TASKS_SESS,
3244                                          lun, lun_size, SCST_ATOMIC, mcmd);
3245                 break;
3246
3247         case Q2T_NEXUS_LOSS_SESS:
3248                 TRACE(TRACE_MGMT, "%s", "Doing NEXUS_LOSS_SESS");
3249                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS_SESS,
3250                                          lun, lun_size, SCST_ATOMIC, mcmd);
3251                 break;
3252
3253         case Q2T_NEXUS_LOSS:
3254                 TRACE(TRACE_MGMT, "%s", "Doing NEXUS_LOSS");
3255                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS,
3256                                          lun, lun_size, SCST_ATOMIC, mcmd);
3257                 break;
3258
3259         default:
3260                 PRINT_ERROR("qla2x00tgt(%ld): Unknown task mgmt fn 0x%x",
3261                             sess->tgt->ha->instance, fn);
3262                 rc = -1;
3263                 break;
3264         }
3265
3266         if (rc != 0) {
3267                 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_lun() failed: %d",
3268                             sess->tgt->ha->instance, rc);
3269                 res = -EFAULT;
3270                 goto out_free;
3271         }
3272
3273 out:
3274         TRACE_EXIT_RES(res);
3275         return res;
3276
3277 out_free:
3278         mempool_free(mcmd, q2t_mgmt_cmd_mempool);
3279         goto out;
3280 }
3281
3282 /* ha->hardware_lock supposed to be held on entry */
3283 static int q2t_handle_task_mgmt(scsi_qla_host_t *ha, void *iocb)
3284 {
3285         int res = 0;
3286         struct q2t_tgt *tgt;
3287         struct q2t_sess *sess;
3288         uint8_t *lun;
3289         uint16_t lun_data;
3290         int lun_size;
3291         int fn;
3292
3293         TRACE_ENTRY();
3294
3295         tgt = ha->tgt;
3296         if (IS_FWI2_CAPABLE(ha)) {
3297                 atio7_entry_t *a = (atio7_entry_t *)iocb;
3298                 lun = (uint8_t *)&a->fcp_cmnd.lun;
3299                 lun_size = sizeof(a->fcp_cmnd.lun);
3300                 fn = a->fcp_cmnd.task_mgmt_flags;
3301                 sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
3302                 if (sess != NULL) {
3303                         sess->s_id.b.al_pa = a->fcp_hdr.s_id[2];
3304                         sess->s_id.b.area = a->fcp_hdr.s_id[1];
3305                         sess->s_id.b.domain = a->fcp_hdr.s_id[0];
3306                 }
3307         } else {
3308                 notify_entry_t *n = (notify_entry_t *)iocb;
3309                 /* make it be in network byte order */
3310                 lun_data = swab16(le16_to_cpu(n->lun));
3311                 lun = (uint8_t *)&lun_data;
3312                 lun_size = sizeof(lun_data);
3313                 fn = n->task_flags >> IMM_NTFY_TASK_MGMT_SHIFT;
3314                 sess = q2t_find_sess_by_loop_id(tgt, GET_TARGET_ID(ha, n));
3315         }
3316
3317         if (sess == NULL) {
3318                 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task mgmt fn 0x%x for "
3319                         "non-existant session", ha->instance, fn);
3320                 tgt->tm_to_unknown = 1;
3321                 res = -ESRCH;
3322                 goto out;
3323         }
3324
3325         res = q2t_issue_task_mgmt(sess, lun, lun_size, fn, iocb, 0);
3326
3327 out:
3328         TRACE_EXIT_RES(res);
3329         return res;
3330 }
3331
3332 /* ha->hardware_lock supposed to be held on entry */
3333 static int q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb)
3334 {
3335         int res = 0, rc;
3336         struct q2t_mgmt_cmd *mcmd;
3337         struct q2t_sess *sess;
3338         int loop_id;
3339         uint32_t tag;
3340
3341         TRACE_ENTRY();
3342
3343         loop_id = GET_TARGET_ID(ha, iocb);
3344         tag = le16_to_cpu(iocb->seq_id);
3345
3346         sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
3347         if (sess == NULL) {
3348                 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task abort for unexisting "
3349                         "session", ha->instance);
3350                 ha->tgt->tm_to_unknown = 1;
3351                 res = -EFAULT;
3352                 goto out;
3353         }
3354
3355         mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
3356         if (mcmd == NULL) {
3357                 PRINT_ERROR("%s: Allocation of ABORT cmd failed", __func__);
3358                 res = -ENOMEM;
3359                 goto out;
3360         }
3361         memset(mcmd, 0, sizeof(*mcmd));
3362
3363         mcmd->sess = sess;
3364         memcpy(&mcmd->orig_iocb.notify_entry, iocb,
3365                 sizeof(mcmd->orig_iocb.notify_entry));
3366
3367         rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
3368                 SCST_ATOMIC, mcmd);
3369         if (rc != 0) {
3370                 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_tag() failed: %d",
3371                             ha->instance, rc);
3372                 res = -EFAULT;
3373                 goto out_free;
3374         }
3375
3376 out:
3377         TRACE_EXIT_RES(res);
3378         return res;
3379
3380 out_free:
3381         mempool_free(mcmd, q2t_mgmt_cmd_mempool);
3382         goto out;
3383 }
3384
3385 /*
3386  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3387  */
3388 static int q24_handle_els(scsi_qla_host_t *ha, notify24xx_entry_t *iocb)
3389 {
3390         int res = 0;
3391
3392         TRACE_ENTRY();
3393
3394         TRACE(TRACE_MGMT, "ELS opcode %x", iocb->status_subcode);
3395
3396         switch (iocb->status_subcode) {
3397         case ELS_PLOGI:
3398         case ELS_FLOGI:
3399         case ELS_PRLI:
3400         case ELS_LOGO:
3401         case ELS_PRLO:
3402                 res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
3403                 break;
3404
3405         case ELS_PDISC:
3406         case ELS_ADISC:
3407         {
3408                 struct q2t_tgt *tgt = ha->tgt;
3409                 if (tgt->link_reinit_iocb_pending) {
3410                         q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
3411                         tgt->link_reinit_iocb_pending = 0;
3412                 }
3413                 res = 1; /* send notify ack */
3414                 break;
3415         }
3416
3417         default:
3418                 PRINT_ERROR("qla2x00tgt(%ld): Unsupported ELS command %x "
3419                         "received", ha->instance, iocb->status_subcode);
3420                 res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
3421                 break;
3422         }
3423
3424         TRACE_EXIT_RES(res);
3425         return res;
3426 }
3427
3428 static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset)
3429 {
3430         int res = 0;
3431         int cnt, first_sg, first_page = 0, first_page_offs = 0, i;
3432         unsigned int l;
3433         int cur_dst, cur_src;
3434         struct scatterlist *sg;
3435         size_t bufflen = 0;
3436
3437         TRACE_ENTRY();
3438
3439         first_sg = -1;
3440         cnt = 0;
3441         l = 0;
3442         for (i = 0; i < cmd->sg_cnt; i++) {
3443                 l += cmd->sg[i].length;
3444                 if (l > offset) {
3445                         int sg_offs = l - cmd->sg[i].length;
3446                         first_sg = i;
3447                         if (cmd->sg[i].offset == 0) {
3448                                 first_page_offs = offset % PAGE_SIZE;
3449                                 first_page = (offset - sg_offs) >> PAGE_SHIFT;
3450                         } else {
3451                                 TRACE_SG("i=%d, sg[i].offset=%d, "
3452                                         "sg_offs=%d", i, cmd->sg[i].offset, sg_offs);
3453                                 if ((cmd->sg[i].offset + sg_offs) > offset) {
3454                                         first_page_offs = offset - sg_offs;
3455                                         first_page = 0;
3456                                 } else {
3457                                         int sec_page_offs = sg_offs +
3458                                                 (PAGE_SIZE - cmd->sg[i].offset);
3459                                         first_page_offs = sec_page_offs % PAGE_SIZE;
3460                                         first_page = 1 +
3461                                                 ((offset - sec_page_offs) >>
3462                                                         PAGE_SHIFT);
3463                                 }
3464                         }
3465                         cnt = cmd->sg_cnt - i + (first_page_offs != 0);
3466                         break;
3467                 }
3468         }
3469         if (first_sg == -1) {
3470                 PRINT_ERROR("qla2x00tgt(%ld): Wrong offset %d, buf length %d",
3471                         cmd->tgt->ha->instance, offset, cmd->bufflen);
3472                 res = -EINVAL;
3473                 goto out;
3474         }
3475
3476         TRACE_SG("offset=%d, first_sg=%d, first_page=%d, "
3477                 "first_page_offs=%d, cmd->bufflen=%d, cmd->sg_cnt=%d", offset,
3478                 first_sg, first_page, first_page_offs, cmd->bufflen,
3479                 cmd->sg_cnt);
3480
3481         sg = kmalloc(cnt * sizeof(sg[0]), GFP_KERNEL);
3482         if (sg == NULL) {
3483                 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Unable to allocate cut "
3484                         "SG (len %zd)", cmd->tgt->ha->instance,
3485                         cnt * sizeof(sg[0]));
3486                 res = -ENOMEM;
3487                 goto out;
3488         }
3489         sg_init_table(sg, cnt);
3490
3491         cur_dst = 0;
3492         cur_src = first_sg;
3493         if (first_page_offs != 0) {
3494                 int fpgs;
3495                 sg_set_page(&sg[cur_dst], &sg_page(&cmd->sg[cur_src])[first_page],
3496                         PAGE_SIZE - first_page_offs, first_page_offs);
3497                 bufflen += sg[cur_dst].length;
3498                 TRACE_SG("cur_dst=%d, cur_src=%d, sg[].page=%p, "
3499                         "sg[].offset=%d, sg[].length=%d, bufflen=%zu",
3500                         cur_dst, cur_src, sg_page(&sg[cur_dst]), sg[cur_dst].offset,
3501                         sg[cur_dst].length, bufflen);
3502                 cur_dst++;
3503
3504                 fpgs = (cmd->sg[cur_src].length >> PAGE_SHIFT) +
3505                         ((cmd->sg[cur_src].length & ~PAGE_MASK) != 0);
3506                 first_page++;
3507                 if (fpgs > first_page) {
3508                         sg_set_page(&sg[cur_dst],
3509                                 &sg_page(&cmd->sg[cur_src])[first_page],
3510                                 cmd->sg[cur_src].length - PAGE_SIZE*first_page,
3511                                 0);
3512                         TRACE_SG("fpgs=%d, cur_dst=%d, cur_src=%d, "
3513                                 "sg[]