a0dcd8c732bcd500b30acb519be90ab091878057
[mirror/scst/.git] / qla2x00t / qla2x00-target / qla2x00t.c
1 /*
2  *  qla2x00t.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
7  *  Copyright (C) 2006 - 2009 ID7 Ltd.
8  *
9  *  QLogic 22xx/23xx/24xx/25xx FC target driver.
10  *  
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation, version 2
14  *  of the License.
15  * 
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  *  GNU General Public License for more details.
20  */
21
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/types.h>
25 #include <linux/version.h>
26 #include <linux/blkdev.h>
27 #include <linux/interrupt.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <linux/pci.h>
31 #include <linux/delay.h>
32 #include <linux/seq_file.h>
33 #include <linux/list.h>
34
35 #include <scst.h>
36
37 #include "qla2x00t.h"
38
39 #ifndef CONFIG_SCSI_QLA2XXX_TARGET
40 #error "CONFIG_SCSI_QLA2XXX_TARGET is NOT DEFINED"
41 #endif
42
43 #ifdef CONFIG_SCST_DEBUG
44 #define Q2T_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE| TRACE_PID | \
45         TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_MINOR | \
46         TRACE_MGMT_DEBUG | TRACE_MINOR | TRACE_SPECIAL)
47 #else
48 # ifdef CONFIG_SCST_TRACING
49 #define Q2T_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MINOR | \
50         TRACE_SPECIAL)
51 # endif
52 #endif
53
54 static int q2x_target_detect(struct scst_tgt_template *templ);
55 static int q24_target_detect(struct scst_tgt_template *templ);
56 static int q2t_target_release(struct scst_tgt *scst_tgt);
57 static int q2x_xmit_response(struct scst_cmd *scst_cmd);
58 static int q24_xmit_response(struct scst_cmd *scst_cmd);
59 static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd);
60 static void q2t_on_free_cmd(struct scst_cmd *scst_cmd);
61 static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *mcmd);
62
63 /* Predefs for callbacks handed to qla2xxx(target) */
64 static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *pkt);
65 static void q2t_response_pkt(scsi_qla_host_t *ha, response_t *pkt);
66 static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha,
67         uint16_t *mailbox);
68 static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle);
69 static int q2t_host_action(scsi_qla_host_t *ha, 
70         qla2x_tgt_host_action_t action);
71 static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport);
72 static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport);
73 static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
74         int lun_size, int fn, void *iocb, int flags);
75 static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
76         atio_entry_t *atio, int ha_locked);
77 static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
78         atio7_entry_t *atio, int ha_locked);
79 static void q2t_reject_free_srr_imm(scsi_qla_host_t *ha, struct srr_imm *imm,
80         int ha_lock);
81 static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset);
82 static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only);
83 static void q2t_on_hw_pending_cmd_timeout(struct scst_cmd *scst_cmd);
84 static int q2t_unreg_sess(struct q2t_sess *sess);
85
86 /*
87  * Global Variables
88  */
89
90 static struct scst_tgt_template tgt2x_template = {
91         .name = "qla2x00tgt",
92         .sg_tablesize = 0,
93         .use_clustering = 1,
94 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
95         .xmit_response_atomic = 0,
96         .rdy_to_xfer_atomic = 0,
97 #else
98         .xmit_response_atomic = 1,
99         .rdy_to_xfer_atomic = 1,
100 #endif
101 #if SCST_VERSION_CODE >= SCST_VERSION(1, 0, 2, 0)
102         .max_hw_pending_time = Q2T_MAX_HW_PENDING_TIME,
103 #endif
104         .detect = q2x_target_detect,
105         .release = q2t_target_release,
106         .xmit_response = q2x_xmit_response,
107         .rdy_to_xfer = q2t_rdy_to_xfer,
108         .on_free_cmd = q2t_on_free_cmd,
109         .task_mgmt_fn_done = q2t_task_mgmt_fn_done,
110 #if SCST_VERSION_CODE >= SCST_VERSION(1, 0, 2, 0)
111         .on_hw_pending_cmd_timeout = q2t_on_hw_pending_cmd_timeout,
112 #endif
113 };
114
115 static struct scst_tgt_template tgt24_template = {
116         .name = "qla24xx-tgt",
117         .sg_tablesize = 0,
118         .use_clustering = 1,
119         .no_proc_entry = 1,
120 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
121         .xmit_response_atomic = 0,
122         .rdy_to_xfer_atomic = 0,
123 #else
124         .xmit_response_atomic = 1,
125         .rdy_to_xfer_atomic = 1,
126 #endif
127 #if SCST_VERSION_CODE >= SCST_VERSION(1, 0, 2, 0)
128         .max_hw_pending_time = Q2T_MAX_HW_PENDING_TIME,
129 #endif
130         .detect = q24_target_detect,
131         .release = q2t_target_release,
132         .xmit_response = q24_xmit_response,
133         .rdy_to_xfer = q2t_rdy_to_xfer,
134         .on_free_cmd = q2t_on_free_cmd,
135         .task_mgmt_fn_done = q2t_task_mgmt_fn_done,
136 #if SCST_VERSION_CODE >= SCST_VERSION(1, 0, 2, 0)
137         .on_hw_pending_cmd_timeout = q2t_on_hw_pending_cmd_timeout,
138 #endif
139 };
140
141 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
142 #define trace_flag q2t_trace_flag
143 static unsigned long q2t_trace_flag = Q2T_DEFAULT_LOG_FLAGS;
144 #endif
145
146 static struct kmem_cache *q2t_cmd_cachep;
147 static struct qla_target tgt_data;
148 static struct kmem_cache *q2t_mgmt_cmd_cachep;
149 static mempool_t *q2t_mgmt_cmd_mempool;
150
151 static DECLARE_RWSEM(q2t_unreg_rwsem);
152
153 /* It's not yet supported */
154 static inline int scst_cmd_get_ppl_offset(struct scst_cmd *scst_cmd)
155 {
156         return 0;
157 }
158
159 /* ha->hardware_lock supposed to be held on entry */
160 static inline void q2t_sess_get(struct q2t_sess *sess)
161 {
162         sess->sess_ref++;
163         TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref);
164 }
165
166 /* ha->hardware_lock supposed to be held on entry */
167 static inline void q2t_sess_put(struct q2t_sess *sess)
168 {
169         TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref-1);
170         sBUG_ON(sess->sess_ref == 0);
171
172         sess->sess_ref--;
173         if (sess->sess_ref == 0)
174                 q2t_unreg_sess(sess);
175 }
176
177 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
178 static inline struct q2t_sess *q2t_find_sess_by_loop_id(struct q2t_tgt *tgt,
179         uint16_t lid) 
180 {
181         struct q2t_sess *sess;  
182         sBUG_ON(tgt == NULL);
183         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
184                 if (lid == (sess->loop_id))
185                         return sess;
186         }
187         return NULL;
188 }
189
190 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
191 static inline struct q2t_sess *q2t_find_sess_by_s_id(struct q2t_tgt *tgt, 
192         const uint8_t *s_id) 
193 {
194         struct q2t_sess *sess;  
195         sBUG_ON(tgt == NULL);
196         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
197                 if ((sess->s_id.b.al_pa == s_id[2]) && 
198                     (sess->s_id.b.area == s_id[1]) && 
199                     (sess->s_id.b.domain == s_id[0]))
200                         return sess;
201         }
202         return NULL;
203 }
204
205 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
206 static inline struct q2t_sess *q2t_find_sess_by_s_id_le(struct q2t_tgt *tgt, 
207         const uint8_t *s_id) 
208 {
209         struct q2t_sess *sess;  
210         sBUG_ON(tgt == NULL);
211         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
212                 if ((sess->s_id.b.al_pa == s_id[0]) && 
213                     (sess->s_id.b.area == s_id[1]) && 
214                     (sess->s_id.b.domain == s_id[2]))
215                         return sess;
216         }
217         return NULL;
218 }
219
220
221 /* ha->hardware_lock supposed to be held on entry */
222 static inline void q2t_exec_queue(scsi_qla_host_t *ha)
223 {
224         tgt_data.isp_cmd(ha);
225 }
226
227 /* Might release hw lock, then reaquire!! */
228 static inline int q2t_issue_marker(scsi_qla_host_t *ha, int ha_locked)
229 {
230         /* Send marker if required */
231         if (unlikely(ha->marker_needed != 0)) {
232                 int rc = tgt_data.issue_marker(ha, ha_locked);
233                 if (rc != QLA_SUCCESS) {
234                         PRINT_ERROR("qla2x00tgt(%ld): issue_marker() "
235                                 "failed", ha->instance);
236                 }
237                 return rc;
238         }
239         return (QLA_SUCCESS);
240 }
241
242 /* 
243  * Registers with initiator driver (but target mode isn't enabled till
244  * it's turned on via sysfs)
245  */
246 static int q2x_target_detect(struct scst_tgt_template *templ)
247 {
248         int res;
249         struct qla_tgt_initiator itd = {
250                 magic: QLA2X_TARGET_MAGIC,
251                 tgt24_atio_pkt: q24_atio_pkt,
252                 tgt_response_pkt: q2t_response_pkt,
253                 tgt2x_ctio_completion: q2x_ctio_completion,
254                 tgt_async_event: q2t_async_event,
255                 tgt_host_action: q2t_host_action,
256                 tgt_fc_port_added: q2t_fc_port_added,
257                 tgt_fc_port_deleted: q2t_fc_port_deleted,
258         };
259
260         TRACE_ENTRY();
261
262         res = qla2xxx_tgt_register_driver(&itd, &tgt_data);
263         if (res != 0) {
264                 PRINT_ERROR("Unable to register driver: %d", res);
265                 goto out;
266         }
267
268         if (tgt_data.magic != QLA2X_INITIATOR_MAGIC) {
269                 PRINT_ERROR("Wrong version of the initiator part: %d", 
270                             tgt_data.magic);
271                 res = -EINVAL;
272         }
273
274         PRINT_INFO("%s", "Target mode driver for QLogic 2x00 controller "
275                 "registered successfully");
276
277 out:
278         TRACE_EXIT();
279         return res;
280 }
281
282 static int q24_target_detect(struct scst_tgt_template *templ)
283 {
284         /* Nothing to do */
285         return 0;
286 }
287
288 static void q2t_free_session_done(struct scst_session *scst_sess)
289 {
290         struct q2t_sess *sess;
291         struct q2t_tgt *tgt;
292         scsi_qla_host_t *ha;
293         unsigned long flags;
294
295         TRACE_ENTRY();
296
297         sBUG_ON(scst_sess == NULL);
298         sess = (struct q2t_sess *)scst_sess_get_tgt_priv(scst_sess);
299         sBUG_ON(sess == NULL);
300         tgt = sess->tgt;
301
302         TRACE_MGMT_DBG("Unregistration of sess %p finished", sess);
303
304         kfree(sess);
305
306         if (tgt == NULL)
307                 goto out;
308
309         TRACE_DBG("empty(sess_list) %d sess_count %d",
310               list_empty(&tgt->sess_list), tgt->sess_count);
311
312         ha = tgt->ha;
313
314         /*
315          * We need to protect against race, when tgt is freed before or
316          * inside wake_up()
317          */
318         spin_lock_irqsave(&ha->hardware_lock, flags);
319         tgt->sess_count--;
320         if (tgt->sess_count == 0)
321                 wake_up_all(&tgt->waitQ);
322         spin_unlock_irqrestore(&ha->hardware_lock, flags);
323
324 out:
325         TRACE_EXIT();
326         return;
327 }
328
329 /* ha->hardware_lock supposed to be held on entry */
330 static int q2t_unreg_sess(struct q2t_sess *sess)
331 {
332         int res = 1;
333
334         TRACE_ENTRY();
335
336         sBUG_ON(sess == NULL);
337         sBUG_ON(sess->sess_ref != 0);
338
339         TRACE_MGMT_DBG("Deleting sess %p from tgt %p", sess, sess->tgt);
340         list_del(&sess->sess_list_entry);
341
342         if (sess->deleted)
343                 list_del(&sess->del_list_entry);
344
345         PRINT_INFO("qla2x00tgt(%ld): %ssession for loop_id %d deleted",
346                 sess->tgt->ha->instance, sess->local ? "local " : "",
347                 sess->loop_id);
348
349         scst_unregister_session(sess->scst_sess, 0, q2t_free_session_done);
350
351         TRACE_EXIT_RES(res);
352         return res;
353 }
354
355 /* ha->hardware_lock supposed to be held on entry */
356 static int q2t_reset(scsi_qla_host_t *ha, void *iocb, int mcmd)
357 {
358         struct q2t_sess *sess;
359         int loop_id;
360         uint16_t lun = 0;
361         int res = 0;
362
363         TRACE_ENTRY();
364
365         if (IS_FWI2_CAPABLE(ha)) {
366                 notify24xx_entry_t *n = (notify24xx_entry_t *)iocb;
367                 loop_id = le16_to_cpu(n->nport_handle);
368         } else
369                 loop_id = GET_TARGET_ID(ha, (notify_entry_t *)iocb);
370
371         if (loop_id == 0xFFFF) {
372                 /* Global event */
373                 q2t_clear_tgt_db(ha->tgt, 1);
374                 if (!list_empty(&ha->tgt->sess_list)) {
375                         sess = list_entry(ha->tgt->sess_list.next,
376                                 typeof(*sess), sess_list_entry);
377                         switch(mcmd) {
378                         case Q2T_NEXUS_LOSS_SESS:
379                                 mcmd = Q2T_NEXUS_LOSS;
380                                 break;
381
382                         case Q2T_ABORT_ALL_SESS:
383                                 mcmd = Q2T_ABORT_ALL;
384                                 break;
385
386                         case Q2T_NEXUS_LOSS:
387                         case Q2T_ABORT_ALL:
388                                 break;
389
390                         default:
391                                 PRINT_ERROR("qla2x00tgt(%ld): Not allowed "
392                                         "command %x in %s", ha->instance,
393                                         mcmd, __func__);
394                                 sess = NULL;
395                                 break;
396                         }
397                 } else
398                         sess = NULL;
399         } else
400                 sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
401
402         if (sess == NULL) {
403                 res = -ESRCH;
404                 ha->tgt->tm_to_unknown = 1;
405                 goto out;
406         }
407
408         TRACE_MGMT_DBG("scsi(%ld): resetting (session %p, "
409                 "mcmd %x, loop_id %d)", ha->host_no, sess, mcmd, loop_id);
410
411         res = q2t_issue_task_mgmt(sess, (uint8_t *)&lun, sizeof(lun),
412                         mcmd, iocb, Q24_MGMT_SEND_NACK);
413
414 out:
415         TRACE_EXIT_RES(res);
416         return res;
417 }
418
419 /* ha->hardware_lock supposed to be held on entry */
420 static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only)
421 {
422         struct q2t_sess *sess, *sess_tmp;
423
424         TRACE_ENTRY();
425
426         TRACE(TRACE_MGMT, "Clearing targets DB %p", tgt);
427
428         list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list,
429                                         sess_list_entry) {
430                 if (local_only && !sess->local)
431                         continue;
432                 if (local_only && sess->local)
433                         TRACE_MGMT_DBG("Putting local session %p", sess);
434                 q2t_sess_put(sess);
435         }
436
437         /* At this point tgt could be already dead */
438
439         TRACE_MGMT_DBG("Finished clearing tgt %p DB", tgt);
440
441         TRACE_EXIT();
442         return;
443 }
444
445 /* Called in a thread context */
446 static void q2t_alloc_session_done(struct scst_session *scst_sess,
447                                    void *data, int result)
448 {
449         TRACE_ENTRY();
450
451         if (result != 0) {
452                 struct q2t_sess *sess = (struct q2t_sess *)data;
453                 struct q2t_tgt *tgt = sess->tgt;
454                 scsi_qla_host_t *ha = tgt->ha;
455                 unsigned long flags;
456
457                 PRINT_INFO("qla2x00tgt(%ld): Session initialization failed",
458                            ha->instance);
459
460                 spin_lock_irqsave(&ha->hardware_lock, flags);
461                 q2t_sess_put(sess);
462                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
463         }
464
465         TRACE_EXIT();
466         return;
467 }
468
469 static void q2t_del_sess_timer_fn(unsigned long arg)
470 {
471         struct q2t_tgt *tgt = (struct q2t_tgt *)arg;
472         scsi_qla_host_t *ha = tgt->ha;
473         struct q2t_sess *sess;
474         unsigned long flags;
475
476         TRACE_ENTRY();
477
478         spin_lock_irqsave(&ha->hardware_lock, flags);
479         while(!list_empty(&tgt->del_sess_list)) {
480                 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
481                                 del_list_entry);
482                 if (time_after_eq(jiffies, sess->expires)) {
483                         /* 
484                          * sess will be deleted from del_sess_list in
485                          * q2t_unreg_sess()
486                          */
487                         TRACE_MGMT_DBG("Timeout: sess %p about to be deleted",
488                                 sess);
489                         q2t_sess_put(sess);
490                 } else {
491                         tgt->sess_del_timer.expires = sess->expires;
492                         add_timer(&tgt->sess_del_timer);
493                         break;
494                 }
495         }
496         spin_unlock_irqrestore(&ha->hardware_lock, flags);
497
498         TRACE_EXIT();
499         return;
500 }
501
502 /*
503  * Must be called under tgt_mutex.
504  *
505  * Adds an extra ref to allow to drop hw lock after adding sess to the list.
506  * Caller must put it.
507  */
508 static struct q2t_sess *q2t_create_sess(scsi_qla_host_t *ha, fc_port_t *fcport,
509         bool local)
510 {
511         char *wwn_str;
512         const int wwn_str_len = 3*WWN_SIZE+2;
513         struct q2t_tgt *tgt = ha->tgt;
514         struct q2t_sess *sess;
515
516         TRACE_ENTRY();
517
518         /* Check to avoid double sessions */
519         spin_lock_irq(&ha->hardware_lock);
520         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
521                 if ((sess->port_name[0] == fcport->port_name[0]) &&
522                     (sess->port_name[1] == fcport->port_name[1]) &&
523                     (sess->port_name[2] == fcport->port_name[2]) &&
524                     (sess->port_name[3] == fcport->port_name[3]) &&
525                     (sess->port_name[4] == fcport->port_name[4]) &&
526                     (sess->port_name[5] == fcport->port_name[5]) &&
527                     (sess->port_name[6] == fcport->port_name[6]) &&
528                     (sess->port_name[7] == fcport->port_name[7])) {
529                         TRACE_MGMT_DBG("Double sess %p found (s_id %x:%x:%x, "
530                                 "loop_id %d), updating to d_id %x:%x:%x, "
531                                 "loop_id %d", sess, sess->s_id.b.al_pa,
532                                 sess->s_id.b.area, sess->s_id.b.domain,
533                                 sess->loop_id, fcport->d_id.b.al_pa,
534                                 fcport->d_id.b.area, fcport->d_id.b.domain,
535                                 fcport->loop_id);
536
537                         if (sess->deleted) {
538                                 list_del(&sess->del_list_entry);
539                                 sess->deleted = 0;
540                         }
541
542                         sess->s_id = fcport->d_id;
543                         sess->loop_id = fcport->loop_id;
544                         sess->conf_compl_supported = fcport->conf_compl_supported;
545                         if (sess->local && !local)
546                                 sess->local = false;
547                         spin_unlock_irq(&ha->hardware_lock);
548                         goto out;
549                 }
550         }
551         spin_unlock_irq(&ha->hardware_lock);
552
553         /* We are under tgt_mutex, so a new sess can't be added behind us */
554
555         sess = kzalloc(sizeof(*sess), GFP_KERNEL);
556         if (sess == NULL) {
557                 PRINT_ERROR("qla2x00tgt(%ld): session allocation failed, "
558                         "all commands from port %02x:%02x:%02x:%02x:"
559                         "%02x:%02x:%02x:%02x will be refused", ha->instance, 
560                         fcport->port_name[0], fcport->port_name[1], 
561                         fcport->port_name[2], fcport->port_name[3], 
562                         fcport->port_name[4], fcport->port_name[5], 
563                         fcport->port_name[6], fcport->port_name[7]);
564                 goto out;
565         }
566
567         sess->sess_ref = 2; /* plus 1 extra ref, see above */
568         sess->tgt = ha->tgt;
569         sess->s_id = fcport->d_id;
570         sess->loop_id = fcport->loop_id;
571         sess->conf_compl_supported = fcport->conf_compl_supported;
572         sess->local = local;
573         BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
574         memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
575
576         wwn_str = kmalloc(wwn_str_len, GFP_KERNEL);
577         if (wwn_str == NULL) {
578                 PRINT_ERROR("qla2x00tgt(%ld): Allocation of wwn_str failed. "
579                         "All commands from port %02x:%02x:%02x:%02x:%02x:%02x:"
580                         "%02x:%02x will be refused", ha->instance, 
581                         fcport->port_name[0], fcport->port_name[1], 
582                         fcport->port_name[2], fcport->port_name[3], 
583                         fcport->port_name[4], fcport->port_name[5], 
584                         fcport->port_name[6], fcport->port_name[7]);
585                 goto out_free_sess;
586         }
587
588         sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
589                 fcport->port_name[0], fcport->port_name[1], 
590                 fcport->port_name[2], fcport->port_name[3], 
591                 fcport->port_name[4], fcport->port_name[5], 
592                 fcport->port_name[6], fcport->port_name[7]);
593
594         /* Let's do the session creation async'ly */
595         sess->scst_sess = scst_register_session(tgt->scst_tgt, 1, wwn_str,
596                 sess, q2t_alloc_session_done);
597         
598         if (sess->scst_sess == NULL) {
599                 PRINT_CRIT_ERROR("qla2x00tgt(%ld): scst_register_session() "
600                         "failed for host %ld (wwn %s, loop_id %d), all "
601                         "commands from it will be refused", ha->instance,
602                         ha->host_no, wwn_str, fcport->loop_id);
603                 goto out_free_sess_wwn;
604         }
605         scst_sess_set_tgt_priv(sess->scst_sess, sess);
606
607         spin_lock_irq(&ha->hardware_lock);
608         TRACE_MGMT_DBG("Adding sess %p to tgt %p", sess, tgt);
609         list_add_tail(&sess->sess_list_entry, &tgt->sess_list);
610         tgt->sess_count++;
611         spin_unlock_irq(&ha->hardware_lock);
612
613         PRINT_INFO("qla2x00tgt(%ld): %ssession for wwn %s (loop_id %d, "
614                 "s_id %x:%x:%x, confirmed completion %ssupported) added",
615                 ha->instance, local ? "local " : "", wwn_str, fcport->loop_id,
616                 sess->s_id.b.al_pa, sess->s_id.b.area, sess->s_id.b.domain,
617                 sess->conf_compl_supported ? "" : "not ");
618
619         kfree(wwn_str);
620
621 out:
622         TRACE_EXIT_HRES(sess);
623         return sess;
624
625 out_free_sess_wwn:
626         kfree(wwn_str);
627         /* go through */
628
629 out_free_sess:
630         kfree(sess);
631         sess = NULL;
632         goto out;
633 }
634
635 static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport)
636 {
637         struct q2t_tgt *tgt;
638         struct q2t_sess *sess;
639
640         TRACE_ENTRY();
641
642         mutex_lock(&ha->tgt_mutex);
643
644         tgt = ha->tgt;
645
646         if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
647                 goto out_unlock;
648
649         if (tgt->tgt_shutdown)
650                 goto out_unlock;
651
652         spin_lock_irq(&ha->hardware_lock);
653
654         sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
655         if (sess == NULL) {
656                 spin_unlock_irq(&ha->hardware_lock);
657                 sess = q2t_create_sess(ha, fcport, false);
658                 spin_lock_irq(&ha->hardware_lock);
659                 if (sess != NULL)
660                         q2t_sess_put(sess); /* put the extra creation ref */
661         } else {
662                 if (sess->deleted) {
663                         list_del(&sess->del_list_entry);
664                         sess->deleted = 0;
665
666                         PRINT_INFO("qla2x00tgt(%ld): session for port %02x:"
667                                 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
668                                 "reappeared", ha->instance, fcport->port_name[0],
669                                 fcport->port_name[1], fcport->port_name[2],
670                                 fcport->port_name[3], fcport->port_name[4],
671                                 fcport->port_name[5], fcport->port_name[6],
672                                 fcport->port_name[7], sess->loop_id);
673                         TRACE_MGMT_DBG("Appeared sess %p", sess);
674                 } else if (sess->local) {
675                         TRACE(TRACE_MGMT, "qla2x00tgt(%ld): local session for "
676                                 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
677                                 "(loop ID %d) became global", ha->instance,
678                                 fcport->port_name[0], fcport->port_name[1],
679                                 fcport->port_name[2], fcport->port_name[3],
680                                 fcport->port_name[4], fcport->port_name[5],
681                                 fcport->port_name[6], fcport->port_name[7],
682                                 sess->loop_id);
683                 }
684                 sess->local = 0;
685         }
686
687         spin_unlock_irq(&ha->hardware_lock);
688
689 out_unlock:
690         mutex_unlock(&ha->tgt_mutex);
691
692         TRACE_EXIT();
693         return;
694 }
695
696 static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport)
697 {
698         struct q2t_tgt *tgt;
699         struct q2t_sess *sess;
700         uint32_t dev_loss_tmo;
701
702         TRACE_ENTRY();
703
704         mutex_lock(&ha->tgt_mutex);
705
706         tgt = ha->tgt;
707
708         if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
709                 goto out_unlock;
710
711         dev_loss_tmo = ha->port_down_retry_count + 5;
712
713         if (tgt->tgt_shutdown)
714                 goto out_unlock;
715
716         spin_lock_irq(&ha->hardware_lock);
717
718         sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
719         if (sess == NULL)
720                 goto out_unlock_ha;
721
722         if (!sess->deleted) {
723                 int add_tmr;
724
725                 add_tmr = list_empty(&tgt->del_sess_list);
726
727                 TRACE_MGMT_DBG("Scheduling sess %p to deletion", sess);
728                 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
729                 sess->deleted = 1;
730
731                 PRINT_INFO("qla2x00tgt(%ld): %ssession for port %02x:%02x:%02x:"
732                         "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
733                         "deletion in %d secs", ha->instance,
734                         sess->local ? "local " : "",
735                         fcport->port_name[0], fcport->port_name[1],
736                         fcport->port_name[2], fcport->port_name[3],
737                         fcport->port_name[4], fcport->port_name[5],
738                         fcport->port_name[6], fcport->port_name[7],
739                         sess->loop_id, dev_loss_tmo);
740
741                 sess->expires = jiffies + dev_loss_tmo * HZ;
742                 if (add_tmr)
743                         mod_timer(&tgt->sess_del_timer, sess->expires);
744         }
745
746 out_unlock_ha:
747         spin_unlock_irq(&ha->hardware_lock);
748
749 out_unlock:
750         mutex_unlock(&ha->tgt_mutex);
751
752         TRACE_EXIT();
753         return;
754 }
755
756 static inline int test_tgt_sess_count(struct q2t_tgt *tgt)
757 {
758         unsigned long flags;
759         int res;
760
761         /*
762          * We need to protect against race, when tgt is freed before or
763          * inside wake_up()
764          */
765         spin_lock_irqsave(&tgt->ha->hardware_lock, flags);
766         TRACE_DBG("tgt %p, empty(sess_list)=%d sess_count=%d",
767               tgt, list_empty(&tgt->sess_list), tgt->sess_count);
768         res = (tgt->sess_count == 0);
769         spin_unlock_irqrestore(&tgt->ha->hardware_lock, flags);
770
771         return res;
772 }
773
774 /* Must be called under read locked q2t_unreg_rwsem */
775 static int q2t_target_release(struct scst_tgt *scst_tgt)
776 {
777         int res = 0;
778         struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
779         scsi_qla_host_t *ha = tgt->ha;
780
781         TRACE_ENTRY();
782
783         /* 
784          * Mutex needed to sync with q2t_fc_port_[added,deleted].
785          * Lock is needed, because we still can get an incoming packet.
786          */
787
788         mutex_lock(&ha->tgt_mutex);
789         spin_lock_irq(&ha->hardware_lock);
790         tgt->tgt_shutdown = 1;
791         q2t_clear_tgt_db(tgt, false);
792         spin_unlock_irq(&ha->hardware_lock);
793         mutex_unlock(&ha->tgt_mutex);
794
795         del_timer_sync(&tgt->sess_del_timer);
796
797         TRACE_MGMT_DBG("Waiting for sess works (tgt %p)", tgt);
798         spin_lock_irq(&tgt->sess_work_lock);
799         while(!list_empty(&tgt->sess_works_list)) {
800                 spin_unlock_irq(&tgt->sess_work_lock);
801                 flush_scheduled_work();
802                 spin_lock_irq(&tgt->sess_work_lock);
803         }
804         spin_unlock_irq(&tgt->sess_work_lock);
805
806         TRACE_MGMT_DBG("Waiting for tgt %p: list_empty(sess_list)=%d "
807                 "sess_count=%d", tgt, list_empty(&tgt->sess_list),
808                 tgt->sess_count);
809
810         wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
811
812         /* Big hammer */
813         if (!ha->host_shutting_down) 
814                 tgt_data.disable_tgt_mode(ha);
815
816         /* Wait for sessions to clear out (just in case) */
817         wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 
818
819         TRACE_MGMT_DBG("Waiting for %d IRQ commands to complete (tgt %p)",
820                 tgt->irq_cmd_count, tgt);
821
822         mutex_lock(&ha->tgt_mutex);
823         spin_lock_irq(&ha->hardware_lock);
824         while (tgt->irq_cmd_count != 0) {
825                 spin_unlock_irq(&ha->hardware_lock);
826                 udelay(2);
827                 spin_lock_irq(&ha->hardware_lock);
828         }
829         scst_tgt_set_tgt_priv(scst_tgt, NULL);
830         ha->tgt = NULL;
831         spin_unlock_irq(&ha->hardware_lock);
832         mutex_unlock(&ha->tgt_mutex);
833
834         TRACE_MGMT_DBG("Release of tgt %p finished", tgt);
835
836         kfree(tgt);
837
838         TRACE_EXIT_RES(res);
839         return res;
840 }
841
842 /*
843  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
844  */
845 static void q2x_modify_command_count(scsi_qla_host_t *ha, int cmd_count,
846         int imm_count)
847 {
848         modify_lun_entry_t *pkt;
849
850         TRACE_ENTRY();
851
852         TRACE_DBG("Sending MODIFY_LUN (ha=%p, cmd=%d, imm=%d)", 
853                   ha, cmd_count, imm_count);
854
855         /* Sending marker isn't necessary, since we called from ISR */
856
857         pkt = (modify_lun_entry_t *)tgt_data.req_pkt(ha);
858         if (pkt == NULL) {
859                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
860                         "request packet", ha->instance, __func__);
861                 goto out;
862         }
863
864         ha->tgt->modify_lun_expected++;
865
866         pkt->entry_type = MODIFY_LUN_TYPE;
867         pkt->entry_count = 1;
868         if (cmd_count < 0) {
869                 pkt->operators = MODIFY_LUN_CMD_SUB;    /* Subtract from command count */
870                 pkt->command_count = -cmd_count;
871         } else if (cmd_count > 0){
872                 pkt->operators = MODIFY_LUN_CMD_ADD;    /* Add to command count */
873                 pkt->command_count = cmd_count;
874         }
875
876         if (imm_count < 0) {
877                 pkt->operators |= MODIFY_LUN_IMM_SUB;
878                 pkt->immed_notify_count = -imm_count;
879         } else if (imm_count > 0) {
880                 pkt->operators |= MODIFY_LUN_IMM_ADD;
881                 pkt->immed_notify_count = imm_count;
882         }
883
884         pkt->timeout = 0;       /* Use default */
885
886         TRACE_BUFFER("MODIFY LUN packet data", pkt, REQUEST_ENTRY_SIZE);
887
888         q2t_exec_queue(ha);
889
890 out:
891         TRACE_EXIT();
892         return;
893 }
894
895 /*
896  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
897  */
898 static void q2x_send_notify_ack(scsi_qla_host_t *ha, notify_entry_t *iocb,
899         uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
900         uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
901 {
902         nack_entry_t *ntfy;
903         
904         TRACE_ENTRY();
905
906         TRACE_DBG("Sending NOTIFY_ACK (ha=%p)", ha);
907
908         /* Send marker if required */
909         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
910                 goto out;
911
912         ntfy = (nack_entry_t *)tgt_data.req_pkt(ha);
913         if (ntfy == NULL) {
914                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
915                         "request packet", ha->instance, __func__);
916                 goto out;
917         }
918
919         if (ha->tgt != NULL)
920                 ha->tgt->notify_ack_expected++;
921
922         ntfy->entry_type = NOTIFY_ACK_TYPE;
923         ntfy->entry_count = 1;
924         SET_TARGET_ID(ha, ntfy->target, GET_TARGET_ID(ha, iocb));
925         ntfy->status = iocb->status;
926         ntfy->task_flags = iocb->task_flags;
927         ntfy->seq_id = iocb->seq_id;
928         /* Do not increment here, the chip isn't decrementing */
929         /* ntfy->flags = __constant_cpu_to_le16(NOTIFY_ACK_RES_COUNT); */
930         ntfy->flags |= cpu_to_le16(add_flags);
931         ntfy->srr_rx_id = iocb->srr_rx_id;
932         ntfy->srr_rel_offs = iocb->srr_rel_offs;
933         ntfy->srr_ui = iocb->srr_ui;
934         ntfy->srr_flags = cpu_to_le16(srr_flags);
935         ntfy->srr_reject_code = cpu_to_le16(srr_reject_code);
936         ntfy->srr_reject_code_expl = srr_explan;
937         ntfy->ox_id = iocb->ox_id;
938
939         if (resp_code_valid) {
940                 ntfy->resp_code = cpu_to_le16(resp_code);
941                 ntfy->flags |= __constant_cpu_to_le16(
942                         NOTIFY_ACK_TM_RESP_CODE_VALID);
943         }
944
945         TRACE(TRACE_SCSI, "Sending Notify Ack Seq %#x -> I %#x St %#x RC %#x",
946               le16_to_cpu(iocb->seq_id), GET_TARGET_ID(ha, iocb),
947               le16_to_cpu(iocb->status), le16_to_cpu(ntfy->resp_code));
948         TRACE_BUFFER("Notify Ack packet data", ntfy, REQUEST_ENTRY_SIZE);       
949
950         q2t_exec_queue(ha);
951
952 out:
953         TRACE_EXIT();
954         return;
955 }
956
957 /*
958  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
959  */
960 static void q24_send_abts_resp(scsi_qla_host_t *ha,
961         const abts24_recv_entry_t *abts, uint32_t status, bool ids_reversed)
962 {
963         abts24_resp_entry_t *resp;
964         uint32_t f_ctl;
965         uint8_t *p;
966
967         TRACE_ENTRY();
968
969         TRACE_DBG("Sending task mgmt ABTS response (ha=%p, atio=%p, "
970                 "status=%x", ha, abts, status);
971
972         /* Send marker if required */
973         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
974                 goto out;
975
976         resp = (abts24_resp_entry_t *)tgt_data.req_pkt(ha);
977         if (resp == NULL) {
978                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
979                         "request packet", ha->instance, __func__);
980                 goto out;
981         }
982
983         resp->entry_type = ABTS_RESP_24XX;
984         resp->entry_count = 1;
985         resp->nport_handle = abts->nport_handle;
986         resp->sof_type = abts->sof_type;
987         resp->exchange_address = abts->exchange_address;
988         resp->fcp_hdr_le = abts->fcp_hdr_le;
989         f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
990                         F_CTL_LAST_SEQ | F_CTL_END_SEQ |
991                         F_CTL_SEQ_INITIATIVE);
992         p = (uint8_t *)&f_ctl;
993         resp->fcp_hdr_le.f_ctl[0] = *p++;
994         resp->fcp_hdr_le.f_ctl[1] = *p++;
995         resp->fcp_hdr_le.f_ctl[2] = *p;
996         if (ids_reversed) {
997                 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
998                 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
999                 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1000                 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1001                 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1002                 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1003         } else {
1004                 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1005                 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1006                 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1007                 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1008                 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1009                 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1010         }
1011         resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1012         if (status == SCST_MGMT_STATUS_SUCCESS) {
1013                 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1014                 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1015                 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1016                 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1017                 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1018                 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1019         } else {
1020                 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1021                 resp->payload.ba_rjt.reason_code = 
1022                         BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1023                 /* Other bytes are zero */
1024         }
1025
1026         TRACE_BUFFER("ABTS RESP packet data", resp, REQUEST_ENTRY_SIZE);
1027
1028         ha->tgt->abts_resp_expected++;
1029
1030         q2t_exec_queue(ha);
1031
1032 out:
1033         TRACE_EXIT();
1034         return;
1035 }
1036
1037 /*
1038  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1039  */
1040 static void q24_retry_term_exchange(scsi_qla_host_t *ha,
1041         abts24_resp_fw_entry_t *entry)
1042 {
1043         ctio7_status1_entry_t *ctio;
1044
1045         TRACE_ENTRY();
1046
1047         TRACE_DBG("Sending retry TERM EXCH CTIO7 (ha=%p)", ha);
1048
1049         /* Send marker if required */
1050         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1051                 goto out;
1052
1053         ctio = (ctio7_status1_entry_t *)tgt_data.req_pkt(ha);
1054         if (ctio == NULL) {
1055                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1056                         "request packet", ha->instance, __func__);
1057                 goto out;
1058         }
1059
1060         /*
1061          * We've got on entrance firmware's response on by us generated
1062          * ABTS response. So, in it ID fields are reversed.
1063          */
1064
1065         ctio->common.entry_type = CTIO_TYPE7;
1066         ctio->common.entry_count = 1;
1067         ctio->common.nport_handle = entry->nport_handle;
1068         ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1069         ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1070         ctio->common.initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1071         ctio->common.initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1072         ctio->common.initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1073         ctio->common.exchange_addr = entry->exchange_addr_to_abort;
1074         ctio->flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1075         ctio->ox_id = entry->fcp_hdr_le.ox_id;
1076
1077         TRACE_BUFFER("CTIO7 retry TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
1078
1079         q2t_exec_queue(ha);
1080
1081         q24_send_abts_resp(ha, (abts24_recv_entry_t *)entry,
1082                 SCST_MGMT_STATUS_SUCCESS, true);
1083
1084 out:
1085         TRACE_EXIT();
1086         return;
1087 }
1088
1089 /*
1090  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1091  */
1092 static void q24_handle_abts(scsi_qla_host_t *ha, abts24_recv_entry_t *abts)
1093 {
1094         uint32_t tag;
1095         int rc;
1096         struct q2t_mgmt_cmd *mcmd;
1097         struct q2t_sess *sess;
1098
1099         TRACE_ENTRY();
1100
1101         if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1102                 PRINT_ERROR("qla2x00tgt(%ld): ABTS: Abort Sequence not "
1103                         "supported", ha->instance);
1104                 goto out_err;
1105         }
1106
1107         tag = abts->exchange_addr_to_abort;
1108
1109         if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1110                 TRACE_MGMT_DBG("qla2x00tgt(%ld): ABTS: Unknown Exchange "
1111                         "Address received", ha->instance);
1112                 goto out_err;
1113         }
1114
1115         TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): task abort (s_id=%x:%x:%x, "
1116                 "tag=%d, param=%x)", ha->instance, abts->fcp_hdr_le.s_id[0],
1117                 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[2], tag, 
1118                 le32_to_cpu(abts->fcp_hdr_le.parameter));
1119
1120         sess = q2t_find_sess_by_s_id_le(ha->tgt, abts->fcp_hdr_le.s_id);
1121         if (sess == NULL) {
1122                 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task abort for unexisting "
1123                         "session", ha->instance);
1124                 ha->tgt->tm_to_unknown = 1;
1125                 goto out_err;
1126         }
1127
1128         mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
1129         if (mcmd == NULL) {
1130                 PRINT_ERROR("%s: Allocation of ABORT cmd failed", __func__);
1131                 goto out_err;
1132         }
1133         memset(mcmd, 0, sizeof(*mcmd));
1134
1135         mcmd->sess = sess;
1136         memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1137
1138         rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag, 
1139                 SCST_ATOMIC, mcmd);
1140         if (rc != 0) {
1141                 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_tag() failed: %d",
1142                             ha->instance, rc);
1143                 goto out_err_free;
1144         }
1145
1146 out:
1147         TRACE_EXIT();
1148         return;
1149
1150 out_err_free:
1151         mempool_free(mcmd, q2t_mgmt_cmd_mempool);
1152
1153 out_err:
1154         q24_send_abts_resp(ha, abts, SCST_MGMT_STATUS_REJECTED, false);
1155         goto out;
1156 }
1157
1158 /*
1159  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1160  */
1161 static void q24_send_task_mgmt_ctio(scsi_qla_host_t *ha,
1162         struct q2t_mgmt_cmd *mcmd, uint32_t resp_code)
1163 {
1164         const atio7_entry_t *atio = &mcmd->orig_iocb.atio7;
1165         ctio7_status1_entry_t *ctio;
1166
1167         TRACE_ENTRY();
1168
1169         TRACE_DBG("Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x", 
1170                   ha, atio, resp_code);
1171
1172         /* Send marker if required */
1173         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1174                 goto out;
1175
1176         ctio = (ctio7_status1_entry_t *)tgt_data.req_pkt(ha);
1177         if (ctio == NULL) {
1178                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1179                         "request packet", ha->instance, __func__);
1180                 goto out;
1181         }
1182
1183         ctio->common.entry_type = CTIO_TYPE7;
1184         ctio->common.entry_count = 1;
1185         ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1186         ctio->common.nport_handle = mcmd->sess->loop_id;
1187         ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1188         ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
1189         ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
1190         ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
1191         ctio->common.exchange_addr = atio->exchange_addr;
1192         ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
1193                 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
1194         ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
1195         ctio->scsi_status = cpu_to_le16(resp_code);
1196
1197         TRACE_BUFFER("CTIO7 TASK MGMT packet data", ctio, REQUEST_ENTRY_SIZE);
1198
1199         q2t_exec_queue(ha);
1200
1201 out:
1202         TRACE_EXIT();
1203         return;
1204 }
1205
1206 /*
1207  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1208  */
1209 static void q24_send_notify_ack(scsi_qla_host_t *ha,
1210         notify24xx_entry_t *iocb, uint16_t srr_flags,
1211         uint8_t srr_reject_code, uint8_t srr_explan)
1212 {
1213         nack24xx_entry_t *nack;
1214         
1215         TRACE_ENTRY();
1216
1217         TRACE_DBG("Sending NOTIFY_ACK24 (ha=%p)", ha);
1218
1219         /* Send marker if required */
1220         if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1221                 goto out;
1222
1223         if (ha->tgt != NULL)
1224                 ha->tgt->notify_ack_expected++;
1225
1226         nack = (nack24xx_entry_t *)tgt_data.req_pkt(ha);
1227         if (nack == NULL) {
1228                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1229                         "request packet", ha->instance, __func__);
1230                 goto out;
1231         }
1232
1233         nack->entry_type = NOTIFY_ACK_TYPE;
1234         nack->entry_count = 1;
1235         nack->nport_handle = iocb->nport_handle;
1236         if (le16_to_cpu(iocb->status) == IMM_NTFY_ELS) {
1237                 nack->flags = iocb->flags & 
1238                         __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1239         }
1240         nack->srr_rx_id = iocb->srr_rx_id;
1241         nack->status = iocb->status;
1242         nack->status_subcode = iocb->status_subcode;
1243         nack->exchange_address = iocb->exchange_address;
1244         nack->srr_rel_offs = iocb->srr_rel_offs;
1245         nack->srr_ui = iocb->srr_ui;
1246         nack->srr_flags = cpu_to_le16(srr_flags);
1247         nack->srr_reject_code = srr_reject_code;
1248         nack->srr_reject_code_expl = srr_explan;
1249         nack->ox_id = iocb->ox_id;
1250
1251         TRACE(TRACE_SCSI, "Sending 24xx Notify Ack %d", nack->status);
1252         TRACE_BUFFER("24xx Notify Ack packet data", nack, sizeof(*nack));
1253
1254         q2t_exec_queue(ha);
1255
1256 out:
1257         TRACE_EXIT();
1258         return;
1259 }
1260
1261 int q2t_convert_to_fc_tm_status(int scst_mstatus)
1262 {
1263         int res;
1264
1265         switch (scst_mstatus) {
1266         case SCST_MGMT_STATUS_SUCCESS:
1267                 res = FC_TM_SUCCESS;
1268                 break;
1269         case SCST_MGMT_STATUS_TASK_NOT_EXIST:
1270                 res = FC_TM_BAD_CMD;
1271                 break;
1272         case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
1273         case SCST_MGMT_STATUS_REJECTED:
1274                 res = FC_TM_REJECT;
1275                 break;
1276         case SCST_MGMT_STATUS_LUN_NOT_EXIST:
1277         case SCST_MGMT_STATUS_FAILED:
1278         default:
1279                 res = FC_TM_FAILED;
1280                 break;
1281         }
1282
1283         TRACE_EXIT_RES(res);
1284         return res;
1285 }
1286
1287 /* SCST Callback */
1288 static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
1289 {
1290         struct q2t_mgmt_cmd *mcmd;
1291         unsigned long flags;
1292         scsi_qla_host_t *ha;
1293
1294         TRACE_ENTRY();
1295
1296         TRACE_MGMT_DBG("scst_mcmd (%p) status %#x state %#x", scst_mcmd,
1297                 scst_mcmd->status, scst_mcmd->state);
1298
1299         mcmd = scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
1300         if (unlikely(mcmd == NULL)) {
1301                 PRINT_ERROR("scst_mcmd %p tgt_spec is NULL", mcmd);
1302                 goto out;
1303         }
1304
1305         ha = mcmd->sess->tgt->ha;
1306
1307         spin_lock_irqsave(&ha->hardware_lock, flags);
1308         if (IS_FWI2_CAPABLE(ha)) {
1309                 if (mcmd->flags == Q24_MGMT_SEND_NACK) {
1310                         q24_send_notify_ack(ha, 
1311                                 &mcmd->orig_iocb.notify_entry24, 0, 0, 0);
1312                 } else {
1313                         if (scst_mcmd->fn == SCST_ABORT_TASK)
1314                                 q24_send_abts_resp(ha, &mcmd->orig_iocb.abts,
1315                                         scst_mgmt_cmd_get_status(scst_mcmd),
1316                                         false);
1317                         else
1318                                 q24_send_task_mgmt_ctio(ha, mcmd, 
1319                                         q2t_convert_to_fc_tm_status(
1320                                                 scst_mgmt_cmd_get_status(scst_mcmd)));
1321                 }
1322         } else {
1323                 int resp_code = q2t_convert_to_fc_tm_status(
1324                                         scst_mgmt_cmd_get_status(scst_mcmd));
1325                 q2x_send_notify_ack(ha, &mcmd->orig_iocb.notify_entry, 0,
1326                         resp_code, 1, 0, 0, 0);
1327         }
1328         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1329
1330         scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
1331         mempool_free(mcmd, q2t_mgmt_cmd_mempool);
1332
1333 out:
1334         TRACE_EXIT();
1335         return;
1336 }
1337
1338 /* No locks */
1339 static int q2t_pci_map_calc_cnt(struct q2t_prm *prm)
1340 {
1341         int res = 0;
1342
1343         sBUG_ON(prm->cmd->sg_cnt == 0);
1344
1345         prm->sg = (struct scatterlist *)prm->cmd->sg;
1346         prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, prm->cmd->sg,
1347                 prm->cmd->sg_cnt,
1348                 scst_to_tgt_dma_dir(prm->cmd->data_direction));
1349         if (unlikely(prm->seg_cnt == 0))
1350                 goto out_err;
1351         /*
1352          * If greater than four sg entries then we need to allocate
1353          * the continuation entries
1354          */
1355         if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) {
1356                 prm->req_cnt += (uint16_t)(prm->seg_cnt -
1357                                 prm->tgt->datasegs_per_cmd) /
1358                                 prm->tgt->datasegs_per_cont;
1359                 if (((uint16_t)(prm->seg_cnt - prm->tgt->datasegs_per_cmd)) %
1360                                         prm->tgt->datasegs_per_cont) 
1361                 {
1362                         prm->req_cnt++;
1363                 }
1364         }
1365
1366 out:
1367         TRACE_DBG("seg_cnt=%d, req_cnt=%d, res=%d", prm->seg_cnt, 
1368                 prm->req_cnt, res);
1369         return res;
1370
1371 out_err:
1372         PRINT_ERROR("qla2x00tgt(%ld): PCI mapping failed: sg_cnt=%d", 
1373                 prm->tgt->ha->instance, prm->cmd->sg_cnt);
1374         res = -1;
1375         goto out;
1376 }
1377
1378 static int q2t_check_reserve_free_req(scsi_qla_host_t *ha, uint32_t req_cnt)
1379 {
1380         int res = SCST_TGT_RES_SUCCESS;
1381         device_reg_t __iomem *reg = ha->iobase;
1382         uint32_t cnt;
1383
1384         TRACE_ENTRY();
1385
1386         if (ha->req_q_cnt < (req_cnt + 2)) {
1387                 if (IS_FWI2_CAPABLE(ha))
1388                         cnt = (uint16_t)RD_REG_DWORD(
1389                                     &reg->isp24.req_q_out);
1390                 else
1391                         cnt = qla2x00_debounce_register(
1392                                     ISP_REQ_Q_OUT(ha, &reg->isp));
1393                 TRACE_DBG("Request ring circled: cnt=%d, "
1394                         "ha->req_ring_index=%d, ha->req_q_cnt=%d, req_cnt=%d",
1395                         cnt, ha->req_ring_index, ha->req_q_cnt, req_cnt);
1396                 if  (ha->req_ring_index < cnt)
1397                         ha->req_q_cnt = cnt - ha->req_ring_index;
1398                 else
1399                         ha->req_q_cnt = ha->request_q_length -
1400                             (ha->req_ring_index - cnt);
1401         }
1402
1403         if (unlikely(ha->req_q_cnt < (req_cnt + 2))) {
1404                 TRACE(TRACE_OUT_OF_MEM, "There is no room in the request ring: "
1405                         "ha->req_ring_index=%d, ha->req_q_cnt=%d, req_cnt=%d",
1406                         ha->req_ring_index, ha->req_q_cnt, req_cnt);
1407                 res = SCST_TGT_RES_QUEUE_FULL;
1408                 goto out;
1409         }
1410
1411         ha->req_q_cnt -= req_cnt;
1412
1413 out:
1414         TRACE_EXIT_RES(res);
1415         return res;
1416 }
1417
1418 /*
1419  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1420  */
1421 static inline void *q2t_get_req_pkt(scsi_qla_host_t *ha)
1422 {
1423         /* Adjust ring index. */
1424         ha->req_ring_index++;
1425         if (ha->req_ring_index == ha->request_q_length) {
1426                 ha->req_ring_index = 0;
1427                 ha->request_ring_ptr = ha->request_ring;
1428         } else {
1429                 ha->request_ring_ptr++;
1430         }
1431         return (cont_entry_t *)ha->request_ring_ptr;
1432 }
1433
1434 /* ha->hardware_lock supposed to be held on entry */
1435 static inline uint32_t q2t_make_handle(scsi_qla_host_t *ha)
1436 {
1437         uint32_t h;
1438
1439         h = ha->current_handle;
1440         /* always increment cmd handle */
1441         do {
1442                 ++h;
1443                 if (h > MAX_OUTSTANDING_COMMANDS) {
1444                         h = 1; /* 0 is Q2T_NULL_HANDLE */
1445                 }
1446                 if (h == ha->current_handle) {
1447                         TRACE(TRACE_OUT_OF_MEM, 
1448                               "Ran out of empty cmd slots in ha %p", ha);
1449                         h = Q2T_NULL_HANDLE;
1450                         break;
1451                 }
1452         } while ((h == Q2T_NULL_HANDLE) ||
1453                  (h == Q2T_SKIP_HANDLE) || 
1454                  (ha->cmds[h-1] != NULL));
1455
1456         if (h != Q2T_NULL_HANDLE)
1457                 ha->current_handle = h;
1458
1459         return h;
1460 }
1461
1462 /* ha->hardware_lock supposed to be held on entry */
1463 static void q2x_build_ctio_pkt(struct q2t_prm *prm)
1464 {
1465         uint32_t h;
1466         ctio_entry_t *pkt;
1467         scsi_qla_host_t *ha = prm->tgt->ha;
1468
1469         pkt = (ctio_entry_t *)ha->request_ring_ptr;
1470         prm->pkt = pkt;
1471         memset(pkt, 0, sizeof(*pkt));
1472
1473         if (prm->tgt->tgt_enable_64bit_addr)
1474                 pkt->common.entry_type = CTIO_A64_TYPE;
1475         else
1476                 pkt->common.entry_type = CONTINUE_TGT_IO_TYPE;
1477
1478         pkt->common.entry_count = (uint8_t)prm->req_cnt;
1479
1480         h = q2t_make_handle(ha);
1481         if (h != Q2T_NULL_HANDLE)
1482                 ha->cmds[h-1] = prm->cmd;
1483
1484         pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
1485         pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1486
1487         /* Set initiator ID */
1488         h = GET_TARGET_ID(ha, &prm->cmd->atio.atio2x);
1489         SET_TARGET_ID(ha, pkt->common.target, h);
1490                       
1491         pkt->common.rx_id = prm->cmd->atio.atio2x.rx_id;
1492         pkt->common.relative_offset = cpu_to_le32(prm->cmd->offset);
1493
1494         TRACE(TRACE_DEBUG|TRACE_SCSI, 
1495               "handle(scst_cmd) -> %08x, timeout %d L %#x -> I %#x E %#x",
1496               pkt->common.handle, Q2T_TIMEOUT,
1497               le16_to_cpu(prm->cmd->atio.atio2x.lun),
1498               GET_TARGET_ID(ha, &pkt->common), pkt->common.rx_id);
1499 }
1500
1501 /* ha->hardware_lock supposed to be held on entry */
1502 static int q24_build_ctio_pkt(struct q2t_prm *prm)
1503 {
1504         uint32_t h;
1505         ctio7_status0_entry_t *pkt;
1506         scsi_qla_host_t *ha = prm->tgt->ha;
1507         atio7_entry_t *atio = &prm->cmd->atio.atio7;
1508         int res = SCST_TGT_RES_SUCCESS;
1509
1510         TRACE_ENTRY();
1511
1512         pkt = (ctio7_status0_entry_t *)ha->request_ring_ptr;
1513         prm->pkt = pkt;
1514         memset(pkt, 0, sizeof(*pkt));
1515
1516         pkt->common.entry_type = CTIO_TYPE7;
1517         pkt->common.entry_count = (uint8_t)prm->req_cnt;
1518
1519         h = q2t_make_handle(ha);
1520         if (unlikely(h == Q2T_NULL_HANDLE)) {
1521                 /*
1522                  * CTIO type 7 from the firmware doesn't provide a way to
1523                  * know the initiator's LOOP ID, hence we can't find
1524                  * the session and, so, the command.
1525                  */
1526                 res = SCST_TGT_RES_QUEUE_FULL;
1527                 goto out;
1528         } else
1529                 ha->cmds[h-1] = prm->cmd;
1530
1531         pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
1532         pkt->common.nport_handle = prm->cmd->loop_id;
1533         pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1534         pkt->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
1535         pkt->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
1536         pkt->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
1537         pkt->common.exchange_addr = atio->exchange_addr;
1538         pkt->flags |= (atio->attr << 9);
1539         pkt->ox_id = swab16(atio->fcp_hdr.ox_id);
1540         pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
1541
1542 out:
1543         TRACE(TRACE_DEBUG|TRACE_SCSI, "handle(scst_cmd) -> %08x, timeout %d "
1544                 "ox_id %#x", pkt->common.handle, Q2T_TIMEOUT,
1545                 le16_to_cpu(pkt->ox_id));
1546         TRACE_EXIT_RES(res);
1547         return res;
1548 }
1549
1550 /*
1551  * ha->hardware_lock supposed to be held on entry. We have already made sure
1552  * that there is sufficient amount of request entries to not drop it.
1553  */
1554 static void q2t_load_cont_data_segments(struct q2t_prm *prm)
1555 {
1556         int cnt;
1557         uint32_t *dword_ptr;
1558         int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1559
1560         TRACE_ENTRY();
1561
1562         /* Build continuation packets */
1563         while (prm->seg_cnt > 0) {
1564                 cont_a64_entry_t *cont_pkt64 =
1565                         (cont_a64_entry_t *)q2t_get_req_pkt(prm->tgt->ha);
1566
1567                 /* 
1568                  * Make sure that from cont_pkt64 none of
1569                  * 64-bit specific fields used for 32-bit
1570                  * addressing. Cast to (cont_entry_t *) for
1571                  * that.
1572                  */
1573
1574                 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1575
1576                 cont_pkt64->entry_count = 1;
1577                 cont_pkt64->sys_define = 0;
1578
1579                 if (enable_64bit_addressing) {
1580                         cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1581                         dword_ptr =
1582                             (uint32_t *)&cont_pkt64->dseg_0_address;
1583                 } else {
1584                         cont_pkt64->entry_type = CONTINUE_TYPE;
1585                         dword_ptr =
1586                             (uint32_t *)&((cont_entry_t *)
1587                                             cont_pkt64)->dseg_0_address;
1588                 }
1589
1590                 /* Load continuation entry data segments */
1591                 for (cnt = 0;
1592                      cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1593                      cnt++, prm->seg_cnt--) 
1594                 {
1595                         *dword_ptr++ =
1596                             cpu_to_le32(pci_dma_lo32
1597                                         (sg_dma_address(prm->sg)));
1598                         if (enable_64bit_addressing) {
1599                                 *dword_ptr++ =
1600                                     cpu_to_le32(pci_dma_hi32
1601                                                 (sg_dma_address
1602                                                  (prm->sg)));
1603                         }
1604                         *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1605
1606                         TRACE_SG("S/G Segment Cont. phys_addr=%llx:%llx, len=%d",
1607                               (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
1608                               (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
1609                               (int)sg_dma_len(prm->sg));
1610
1611                         prm->sg++;
1612                 }
1613
1614                 TRACE_BUFFER("Continuation packet data",
1615                              cont_pkt64, REQUEST_ENTRY_SIZE);
1616         }
1617
1618         TRACE_EXIT();
1619         return;
1620 }
1621
1622 /*
1623  * ha->hardware_lock supposed to be held on entry. We have already made sure
1624  * that there is sufficient amount of request entries to not drop it.
1625  */
1626 static void q2x_load_data_segments(struct q2t_prm *prm)
1627 {
1628         int cnt;
1629         uint32_t *dword_ptr;
1630         int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1631         ctio_common_entry_t *pkt = (ctio_common_entry_t *)prm->pkt;
1632
1633         TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
1634               le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
1635
1636         pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
1637
1638         /* Setup packet address segment pointer */
1639         dword_ptr = pkt->dseg_0_address;
1640
1641         if (prm->seg_cnt == 0) {
1642                 /* No data transfer */
1643                 *dword_ptr++ = 0;
1644                 *dword_ptr = 0;
1645
1646                 TRACE_BUFFER("No data, CTIO packet data", pkt,
1647                         REQUEST_ENTRY_SIZE);
1648                 goto out;
1649         }
1650
1651         /* Set total data segment count */
1652         pkt->dseg_count = cpu_to_le16(prm->seg_cnt);
1653
1654         /* If scatter gather */
1655         TRACE_SG("%s", "Building S/G data segments...");
1656         /* Load command entry data segments */
1657         for (cnt = 0;
1658              (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1659              cnt++, prm->seg_cnt--) 
1660         {
1661                 *dword_ptr++ =
1662                     cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1663                 if (enable_64bit_addressing) {
1664                         *dword_ptr++ =
1665                             cpu_to_le32(pci_dma_hi32
1666                                         (sg_dma_address(prm->sg)));
1667                 }
1668                 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1669
1670                 TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
1671                       (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
1672                       (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
1673                       (int)sg_dma_len(prm->sg));
1674
1675                 prm->sg++;
1676         }
1677
1678         TRACE_BUFFER("Scatter/gather, CTIO packet data", pkt,
1679                 REQUEST_ENTRY_SIZE);
1680
1681         q2t_load_cont_data_segments(prm);
1682
1683 out:
1684         return;
1685 }
1686
1687 /*
1688  * ha->hardware_lock supposed to be held on entry. We have already made sure
1689  * that there is sufficient amount of request entries to not drop it.
1690  */
1691 static void q24_load_data_segments(struct q2t_prm *prm)
1692 {
1693         int cnt;
1694         uint32_t *dword_ptr;
1695         int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1696         ctio7_status0_entry_t *pkt = (ctio7_status0_entry_t *)prm->pkt;
1697
1698         TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
1699               le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
1700
1701         pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
1702
1703         /* Setup packet address segment pointer */
1704         dword_ptr = pkt->dseg_0_address;
1705
1706         if (prm->seg_cnt == 0) {
1707                 /* No data transfer */
1708                 *dword_ptr++ = 0;
1709                 *dword_ptr = 0;
1710
1711                 TRACE_BUFFER("No data, CTIO7 packet data", pkt,
1712                         REQUEST_ENTRY_SIZE);
1713                 goto out;
1714         }
1715
1716         /* Set total data segment count */
1717         pkt->common.dseg_count = cpu_to_le16(prm->seg_cnt);
1718
1719         /* If scatter gather */
1720         TRACE_SG("%s", "Building S/G data segments...");
1721         /* Load command entry data segments */
1722         for (cnt = 0;
1723              (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1724              cnt++, prm->seg_cnt--) 
1725         {
1726                 *dword_ptr++ =
1727                     cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1728                 if (enable_64bit_addressing) {
1729                         *dword_ptr++ =
1730                             cpu_to_le32(pci_dma_hi32(
1731                                         sg_dma_address(prm->sg)));
1732                 }
1733                 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1734
1735                 TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
1736                       (long long unsigned int)pci_dma_hi32(sg_dma_address(
1737                                                                 prm->sg)),
1738                       (long long unsigned int)pci_dma_lo32(sg_dma_address(
1739                                                                 prm->sg)),
1740                       (int)sg_dma_len(prm->sg));
1741
1742                 prm->sg++;
1743         }
1744
1745         q2t_load_cont_data_segments(prm);
1746
1747 out:
1748         return;
1749 }
1750
1751 static inline int q2t_has_data(struct q2t_cmd *cmd)
1752 {
1753         return cmd->bufflen > 0;
1754 }
1755
1756 static int q2t_pre_xmit_response(struct q2t_cmd *cmd,
1757         struct q2t_prm *prm, int xmit_type, unsigned long *flags)
1758 {
1759         int res;
1760         struct q2t_tgt *tgt = cmd->tgt;
1761         scsi_qla_host_t *ha;
1762         uint16_t full_req_cnt;
1763         struct scst_cmd *scst_cmd = cmd->scst_cmd;
1764
1765         TRACE_ENTRY();
1766
1767         if (unlikely(cmd->aborted)) {
1768                 scsi_qla_host_t *ha = tgt->ha;
1769
1770                 TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): terminating exchange "
1771                         "for aborted cmd=%p (scst_cmd=%p, tag=%d)",
1772                         ha->instance, cmd, scst_cmd, cmd->tag);
1773
1774                 cmd->state = Q2T_STATE_ABORTED;
1775                 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
1776
1777                 if (IS_FWI2_CAPABLE(ha))
1778                         q24_send_term_exchange(ha, cmd, &cmd->atio.atio7, 0);
1779                 else
1780                         q2x_send_term_exchange(ha, cmd, &cmd->atio.atio2x, 0);
1781                 /* !! At this point cmd could be already freed !! */
1782                 res = Q2T_PRE_XMIT_RESP_CMD_ABORTED;
1783                 goto out;
1784         }
1785
1786         TRACE(TRACE_SCSI, "tag=%Ld", scst_cmd_get_tag(scst_cmd));
1787
1788         prm->cmd = cmd;
1789         prm->tgt = tgt;
1790         prm->rq_result = scst_cmd_get_status(scst_cmd);
1791         prm->sense_buffer = scst_cmd_get_sense_buffer(scst_cmd);
1792         prm->sense_buffer_len = scst_cmd_get_sense_buffer_len(scst_cmd);
1793         prm->sg = NULL;
1794         prm->seg_cnt = -1;
1795         prm->req_cnt = 1;
1796         prm->add_status_pkt = 0;
1797         ha = tgt->ha;
1798
1799         TRACE_DBG("rq_result=%x, xmit_type=%x", prm->rq_result, xmit_type);
1800         if (prm->rq_result != 0)
1801                 TRACE_BUFFER("Sense", prm->sense_buffer, prm->sense_buffer_len);
1802
1803         /* Send marker if required */
1804         if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
1805                 res = SCST_TGT_RES_FATAL_ERROR;
1806                 goto out;
1807         }
1808
1809         TRACE_DBG("CTIO start: ha(%d)", (int)ha->instance);
1810
1811         if ((xmit_type & Q2T_XMIT_DATA) && q2t_has_data(cmd)) {
1812                 if  (q2t_pci_map_calc_cnt(prm) != 0) {
1813                         res = SCST_TGT_RES_QUEUE_FULL;
1814                         goto out;
1815                 }
1816         }
1817
1818         full_req_cnt = prm->req_cnt;
1819
1820         if (xmit_type & Q2T_XMIT_STATUS) {
1821                 if (cmd->data_direction != SCST_DATA_WRITE) {
1822                         int expected;
1823                         if (IS_FWI2_CAPABLE(ha))
1824                                 expected = be32_to_cpu(cmd->
1825                                              atio.atio7.fcp_cmnd.data_length);
1826                         else
1827                                 expected = le32_to_cpu(cmd->
1828                                                 atio.atio2x.data_length);
1829                         prm->residual = expected -
1830                                 scst_cmd_get_resp_data_len(scst_cmd);
1831                         if (prm->residual > 0) {
1832                                 TRACE_DBG("Residual underflow: %d (tag %Ld, "
1833                                         "op %x, expected %d, resp_data_len "
1834                                         "%d, bufflen %d, rq_result %x)",
1835                                         prm->residual, scst_cmd->tag,
1836                                         scst_cmd->cdb[0], expected,
1837                                         scst_cmd_get_resp_data_len(scst_cmd),
1838                                         cmd->bufflen, prm->rq_result);
1839                                 prm->rq_result |= SS_RESIDUAL_UNDER;
1840                         } else if (prm->residual < 0) {
1841                                 TRACE_DBG("Residual overflow: %d (tag %Ld, "
1842                                         "op %x, expected %d, resp_data_len "
1843                                         "%d, bufflen %d, rq_result %x)",
1844                                         prm->residual, scst_cmd->tag,
1845                                         scst_cmd->cdb[0], expected,
1846                                         scst_cmd_get_resp_data_len(scst_cmd),
1847                                         cmd->bufflen, prm->rq_result);
1848                                 prm->rq_result |= SS_RESIDUAL_OVER;
1849                                 prm->residual = -prm->residual;
1850                         }
1851                 }
1852
1853                 /* 
1854                  * If Q2T_XMIT_DATA is not set, add_status_pkt will be ignored
1855                  * in *xmit_response() below
1856                  */
1857                 if (q2t_has_data(cmd)) {
1858                         if (SCST_SENSE_VALID(prm->sense_buffer) ||
1859                             (IS_FWI2_CAPABLE(ha) &&
1860                              (prm->rq_result != 0))) {
1861                                 prm->add_status_pkt = 1;
1862                                 full_req_cnt++;
1863                         }
1864                 }
1865         }
1866
1867         TRACE_DBG("req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d", 
1868                 prm->req_cnt, full_req_cnt, prm->add_status_pkt);
1869
1870         /* Acquire ring specific lock */
1871         spin_lock_irqsave(&ha->hardware_lock, *flags);
1872
1873         /* Does F/W have an IOCBs for this request */
1874         res = q2t_check_reserve_free_req(ha, full_req_cnt);
1875         if (unlikely(res != SCST_TGT_RES_SUCCESS) && 
1876             (xmit_type & Q2T_XMIT_DATA))
1877                 goto out_unlock_free_unmap;
1878
1879 out:
1880         TRACE_EXIT_RES(res);
1881         return res;
1882
1883 out_unlock_free_unmap:
1884         if (q2t_has_data(cmd)) {
1885                 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
1886                      scst_to_dma_dir(cmd->data_direction));
1887         }
1888         /* Release ring specific lock */
1889         spin_unlock_irqrestore(&ha->hardware_lock, *flags);
1890         goto out;
1891 }
1892
1893 static inline int q2t_need_explicit_conf(scsi_qla_host_t *ha,
1894         struct q2t_cmd *cmd, int sending_sense)
1895 {
1896         if (ha->enable_class_2)
1897                 return 0;
1898
1899         if (sending_sense)
1900                 return cmd->conf_compl_supported;
1901         else
1902                 return ha->enable_explicit_conf && cmd->conf_compl_supported;
1903 }
1904
1905 static void q2x_init_ctio_ret_entry(ctio_ret_entry_t *ctio_m1,
1906         struct q2t_prm *prm)
1907 {
1908         TRACE_ENTRY();
1909
1910         prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len, 
1911                                     (uint32_t)sizeof(ctio_m1->sense_data));
1912
1913         ctio_m1->flags = __constant_cpu_to_le16(OF_SSTS | OF_FAST_POST |
1914                                      OF_NO_DATA | OF_SS_MODE_1);
1915         ctio_m1->flags |= __constant_cpu_to_le16(OF_INC_RC);
1916         if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1917                 ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
1918                                         OF_CONF_REQ);
1919         }
1920         ctio_m1->scsi_status = cpu_to_le16(prm->rq_result);
1921         ctio_m1->residual = cpu_to_le32(prm->residual);
1922         if (SCST_SENSE_VALID(prm->sense_buffer)) {
1923                 if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1924                         ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
1925                                                 OF_CONF_REQ);
1926                 }
1927                 ctio_m1->scsi_status |= __constant_cpu_to_le16(
1928                                                 SS_SENSE_LEN_VALID);
1929                 ctio_m1->sense_length = cpu_to_le16(prm->sense_buffer_len);
1930                 memcpy(ctio_m1->sense_data, prm->sense_buffer,
1931                        prm->sense_buffer_len);
1932         } else {
1933                 memset(ctio_m1->sense_data, 0, sizeof(ctio_m1->sense_data));
1934                 ctio_m1->sense_length = 0;
1935         }
1936
1937         /* Sense with len > 26, is it possible ??? */
1938
1939         TRACE_EXIT();
1940         return;
1941 }
1942
1943 static int __q2x_xmit_response(struct q2t_cmd *cmd, int xmit_type)
1944 {
1945         int res;
1946         unsigned long flags;
1947         scsi_qla_host_t *ha;
1948         struct q2t_prm prm;
1949         ctio_common_entry_t *pkt;
1950
1951         TRACE_ENTRY();
1952
1953         memset(&prm, 0, sizeof(prm));
1954
1955         res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
1956         if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
1957                 if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
1958                         res = SCST_TGT_RES_SUCCESS;
1959                 goto out;
1960         }
1961
1962         /* Here ha->hardware_lock already locked */
1963
1964         ha = prm.tgt->ha;
1965
1966         q2x_build_ctio_pkt(&prm);
1967         pkt = (ctio_common_entry_t *)prm.pkt;
1968
1969         if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
1970                 pkt->flags |= __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_IN);
1971                 pkt->flags |= __constant_cpu_to_le16(OF_INC_RC);
1972
1973                 q2x_load_data_segments(&prm);
1974
1975                 if (prm.add_status_pkt == 0) {
1976                         if (xmit_type & Q2T_XMIT_STATUS) {
1977                                 pkt->scsi_status = cpu_to_le16(prm.rq_result);
1978                                 pkt->residual = cpu_to_le32(prm.residual);
1979                                 pkt->flags |= __constant_cpu_to_le16(OF_SSTS);
1980                                 if (q2t_need_explicit_conf(ha, cmd, 0)) {
1981                                         pkt->flags |= __constant_cpu_to_le16(
1982                                                         OF_EXPL_CONF |
1983                                                         OF_CONF_REQ);
1984                                 }
1985                         }
1986                 } else {
1987                         /*
1988                          * We have already made sure that there is sufficient
1989                          * amount of request entries to not drop HW lock in
1990                          * req_pkt().
1991                          */
1992                         ctio_ret_entry_t *ctio_m1 = 
1993                                 (ctio_ret_entry_t *)q2t_get_req_pkt(ha);
1994
1995                         TRACE_DBG("%s", "Building additional status packet");
1996
1997                         memcpy(ctio_m1, pkt, sizeof(*ctio_m1));
1998                         ctio_m1->entry_count = 1;
1999                         ctio_m1->dseg_count = 0;
2000
2001                         /* Real finish is ctio_m1's finish */
2002                         pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2003                         pkt->flags &= ~__constant_cpu_to_le16(OF_INC_RC);
2004
2005                         q2x_init_ctio_ret_entry(ctio_m1, &prm);
2006                         TRACE_BUFFER("Status CTIO packet data", ctio_m1,
2007                                 REQUEST_ENTRY_SIZE);
2008                 }
2009         } else
2010                 q2x_init_ctio_ret_entry((ctio_ret_entry_t *)pkt, &prm);
2011
2012         cmd->state = Q2T_STATE_PROCESSED;       /* Mid-level is done processing */
2013
2014         TRACE_BUFFER("Xmitting", pkt, REQUEST_ENTRY_SIZE);
2015
2016         q2t_exec_queue(ha);
2017
2018         /* Release ring specific lock */
2019         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2020
2021 out:
2022         TRACE_EXIT_RES(res);
2023         return res;
2024 }
2025
2026 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
2027 static void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type)
2028 {
2029 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
2030         if ((*xmit_type & Q2T_XMIT_STATUS) && (scst_random() % 200) == 50) {
2031                 *xmit_type &= ~Q2T_XMIT_STATUS;
2032                 TRACE_MGMT_DBG("Dropping cmd %p (tag %d) status", cmd,
2033                         cmd->tag);
2034         }
2035 #endif
2036
2037         if (q2t_has_data(cmd) && (cmd->sg_cnt > 1) &&
2038             ((scst_random() % 100) == 20)) {
2039                 int i, leave = 0;
2040                 unsigned int tot_len = 0;
2041
2042                 while(leave == 0)
2043                         leave = scst_random() % cmd->sg_cnt;
2044
2045                 for(i = 0; i < leave; i++)
2046                         tot_len += cmd->sg[i].length;
2047
2048                 TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer tail to len %d, "
2049                         "sg_cnt %d (cmd->bufflen %d, cmd->sg_cnt %d)", cmd,
2050                         cmd->tag, tot_len, leave, cmd->bufflen, cmd->sg_cnt);
2051
2052                 cmd->bufflen = tot_len;
2053                 cmd->sg_cnt = leave;
2054         }
2055
2056         if (q2t_has_data(cmd) && ((scst_random() % 100) == 70)) {
2057                 unsigned int offset = scst_random() % cmd->bufflen;
2058                 
2059                 TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer head "
2060                         "to offset %d (cmd->bufflen %d)", cmd, cmd->tag,
2061                         offset, cmd->bufflen);
2062                 if (offset == 0)
2063                         *xmit_type &= ~Q2T_XMIT_DATA;
2064                 else if (q2t_cut_cmd_data_head(cmd, offset)) {
2065                         TRACE_MGMT_DBG("q2t_cut_cmd_data_head() failed (tag %d)",
2066                                 cmd->tag);
2067                 }
2068         }
2069 }
2070 #else
2071 static inline void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type) {}
2072 #endif
2073
2074 static int q2x_xmit_response(struct scst_cmd *scst_cmd)
2075 {
2076         int xmit_type = Q2T_XMIT_DATA;
2077         int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2078         struct q2t_cmd *cmd = (struct q2t_cmd*)scst_cmd_get_tgt_priv(scst_cmd);
2079
2080 #ifdef CONFIG_SCST_EXTRACHECKS
2081         sBUG_ON(!q2t_has_data(cmd) && !is_send_status);
2082 #endif
2083
2084 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2085         if (scst_cmd_atomic(scst_cmd))
2086                 return SCST_TGT_RES_NEED_THREAD_CTX;
2087 #endif
2088
2089         if (is_send_status)
2090                 xmit_type |= Q2T_XMIT_STATUS;
2091
2092         cmd->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2093         cmd->sg = scst_cmd_get_sg(scst_cmd);
2094         cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2095         cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
2096         cmd->offset = scst_cmd_get_ppl_offset(scst_cmd);
2097         cmd->aborted = scst_cmd_aborted(scst_cmd);
2098
2099         q2t_check_srr_debug(cmd, &xmit_type);
2100
2101         TRACE_DBG("is_send_status=%x, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2102                 "cmd->data_direction=%d", is_send_status, cmd->bufflen,
2103                 cmd->sg_cnt, cmd->data_direction);
2104
2105         return __q2x_xmit_response(cmd, xmit_type);
2106 }
2107
2108 static void q24_init_ctio_ret_entry(ctio7_status0_entry_t *ctio,
2109         struct q2t_prm *prm)
2110 {
2111         ctio7_status1_entry_t *ctio1;
2112
2113         TRACE_ENTRY();
2114
2115         prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len, 
2116                                     (uint32_t)sizeof(ctio1->sense_data));
2117         ctio->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2118         if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2119                 ctio->flags |= __constant_cpu_to_le16(
2120                                 CTIO7_FLAGS_EXPLICIT_CONFORM |
2121                                 CTIO7_FLAGS_CONFORM_REQ);
2122         }
2123         ctio->residual = cpu_to_le32(prm->residual);
2124         ctio->scsi_status = cpu_to_le16(prm->rq_result);
2125         if (SCST_SENSE_VALID(prm->sense_buffer)) {
2126                 int i;
2127                 ctio1 = (ctio7_status1_entry_t *)ctio;
2128                 if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2129                         ctio1->flags |= __constant_cpu_to_le16(
2130                                 CTIO7_FLAGS_EXPLICIT_CONFORM |
2131                                 CTIO7_FLAGS_CONFORM_REQ);
2132                 }
2133                 ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2134                 ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2135                 ctio1->scsi_status |= __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
2136                 ctio1->sense_length = cpu_to_le16(prm->sense_buffer_len);
2137                 for(i = 0; i < prm->sense_buffer_len/4; i++)
2138                         ((uint32_t *)ctio1->sense_data)[i] = 
2139                                 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2140 #if 0
2141                 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
2142                         static int q;
2143                         if (q < 10) {
2144                                 PRINT_INFO("qla2x00tgt(%ld): %d bytes of sense "
2145                                         "lost", prm->tgt->ha->instance,
2146                                         prm->sense_buffer_len % 4);
2147                                 q++;
2148                         }
2149                 }
2150 #endif
2151         } else {
2152                 ctio1 = (ctio7_status1_entry_t *)ctio;
2153                 ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2154                 ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2155                 ctio1->sense_length = 0;
2156                 memset(ctio1->sense_data, 0, sizeof(ctio1->sense_data));
2157         }
2158
2159         /* Sense with len > 24, is it possible ??? */
2160
2161         TRACE_EXIT();
2162         return;
2163 }
2164
2165 static int __q24_xmit_response(struct q2t_cmd *cmd, int xmit_type)
2166 {
2167         int res;
2168         unsigned long flags;
2169         scsi_qla_host_t *ha;
2170         struct q2t_prm prm;
2171         ctio7_status0_entry_t *pkt;
2172
2173         TRACE_ENTRY();
2174
2175         memset(&prm, 0, sizeof(prm));
2176
2177         res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
2178         if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
2179                 if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
2180                         res = SCST_TGT_RES_SUCCESS;
2181                 goto out;
2182         }
2183
2184         /* Here ha->hardware_lock already locked */
2185
2186         ha = prm.tgt->ha;
2187
2188         res = q24_build_ctio_pkt(&prm);
2189         if (unlikely(res != SCST_TGT_RES_SUCCESS))
2190                 goto out_unmap_unlock;
2191
2192         pkt = (ctio7_status0_entry_t *)prm.pkt;
2193
2194         if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
2195                 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2196                                 CTIO7_FLAGS_STATUS_MODE_0);
2197
2198                 q24_load_data_segments(&prm);
2199
2200                 if (prm.add_status_pkt == 0) {
2201                         if (xmit_type & Q2T_XMIT_STATUS) {
2202                                 pkt->scsi_status = cpu_to_le16(prm.rq_result);
2203                                 pkt->residual = cpu_to_le32(prm.residual);
2204                                 pkt->flags |= __constant_cpu_to_le16(
2205                                                 CTIO7_FLAGS_SEND_STATUS);
2206                                 if (q2t_need_explicit_conf(ha, cmd, 0)) {
2207                                         pkt->flags |= __constant_cpu_to_le16(
2208                                                 CTIO7_FLAGS_EXPLICIT_CONFORM |
2209                                                 CTIO7_FLAGS_CONFORM_REQ);
2210                                 }
2211                         }
2212                 } else {
2213                         /*
2214                          * We have already made sure that there is sufficient
2215                          * amount of request entries to not drop HW lock in
2216                          * req_pkt().
2217                          */
2218                         ctio7_status1_entry_t *ctio = 
2219                                 (ctio7_status1_entry_t *)q2t_get_req_pkt(ha);
2220
2221                         TRACE_DBG("%s", "Building additional status packet");
2222
2223                         memcpy(ctio, pkt, sizeof(*ctio));
2224                         ctio->common.entry_count = 1;
2225                         ctio->common.dseg_count = 0;
2226                         ctio->flags &= ~__constant_cpu_to_le16(
2227                                                 CTIO7_FLAGS_DATA_IN);
2228
2229                         /* Real finish is ctio_m1's finish */
2230                         pkt->common.handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2231                         pkt->flags |= __constant_cpu_to_le16(
2232                                         CTIO7_FLAGS_DONT_RET_CTIO);
2233                         q24_init_ctio_ret_entry((ctio7_status0_entry_t *)ctio,
2234                                                         &prm);
2235                         TRACE_BUFFER("Status CTIO7", ctio, REQUEST_ENTRY_SIZE);
2236                 }
2237         } else
2238                 q24_init_ctio_ret_entry(pkt, &prm);
2239
2240         cmd->state = Q2T_STATE_PROCESSED;       /* Mid-level is done processing */
2241
2242         TRACE_BUFFER("Xmitting CTIO7", pkt, REQUEST_ENTRY_SIZE);
2243
2244         q2t_exec_queue(ha);
2245
2246 out_unlock:
2247         /* Release ring specific lock */
2248         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2249
2250 out:
2251         TRACE_EXIT_RES(res);
2252         return res;
2253
2254 out_unmap_unlock:
2255         if (q2t_has_data(cmd)) {
2256                 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2257                      scst_to_dma_dir(cmd->data_direction));
2258         }
2259         goto out_unlock;
2260 }
2261
2262 static int q24_xmit_response(struct scst_cmd *scst_cmd)
2263 {
2264         int xmit_type = Q2T_XMIT_DATA;
2265         int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2266         struct q2t_cmd *cmd = (struct q2t_cmd*)scst_cmd_get_tgt_priv(scst_cmd);
2267
2268 #ifdef CONFIG_SCST_EXTRACHECKS
2269         sBUG_ON(!q2t_has_data(cmd) && !is_send_status);
2270 #endif
2271
2272 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2273         if (scst_cmd_atomic(scst_cmd))
2274                 return SCST_TGT_RES_NEED_THREAD_CTX;
2275 #endif
2276
2277         if (is_send_status)
2278                 xmit_type |= Q2T_XMIT_STATUS;
2279
2280         cmd->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2281         cmd->sg = scst_cmd_get_sg(scst_cmd);
2282         cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2283         cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
2284         cmd->offset = scst_cmd_get_ppl_offset(scst_cmd);
2285         cmd->aborted = scst_cmd_aborted(scst_cmd);
2286
2287         q2t_check_srr_debug(cmd, &xmit_type);
2288
2289         TRACE_DBG("is_send_status=%x, bufflen=%d, sg_cnt=%d, "
2290                 "data_direction=%d, offset=%d", is_send_status, cmd->bufflen,
2291                 cmd->sg_cnt, cmd->data_direction, cmd->offset);
2292
2293         return __q24_xmit_response(cmd, xmit_type);
2294 }
2295
2296 static int __q2t_rdy_to_xfer(struct q2t_cmd *cmd)
2297 {
2298         int res = SCST_TGT_RES_SUCCESS;
2299         unsigned long flags;
2300         scsi_qla_host_t *ha;
2301         struct q2t_tgt *tgt = cmd->tgt;
2302         struct q2t_prm prm;
2303         void *p;
2304
2305         TRACE_ENTRY();
2306
2307         memset(&prm, 0, sizeof(prm));
2308         prm.cmd = cmd;
2309         prm.tgt = tgt;
2310         prm.sg = NULL;
2311         prm.req_cnt = 1;
2312         ha = tgt->ha;
2313
2314         /* Send marker if required */
2315         if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
2316                 res = SCST_TGT_RES_FATAL_ERROR;
2317                 goto out;
2318         }
2319
2320         TRACE_DBG("CTIO_start: ha(%d)", (int)ha->instance);
2321
2322         /* Calculate number of entries and segments required */
2323         if (q2t_pci_map_calc_cnt(&prm) != 0) {
2324                 res = SCST_TGT_RES_QUEUE_FULL;
2325                 goto out;
2326         }
2327
2328         /* Acquire ring specific lock */
2329         spin_lock_irqsave(&ha->hardware_lock, flags);
2330
2331         /* Does F/W have an IOCBs for this request */
2332         res = q2t_check_reserve_free_req(ha, prm.req_cnt);
2333         if (res != SCST_TGT_RES_SUCCESS)
2334                 goto out_unlock_free_unmap;
2335
2336         if (IS_FWI2_CAPABLE(ha)) {
2337                 ctio7_status0_entry_t *pkt;
2338                 res = q24_build_ctio_pkt(&prm);
2339                 if (unlikely(res != SCST_TGT_RES_SUCCESS))
2340                         goto out_unlock_free_unmap;
2341                 pkt = (ctio7_status0_entry_t *)prm.pkt;
2342                 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2343                                 CTIO7_FLAGS_STATUS_MODE_0);
2344                 q24_load_data_segments(&prm);
2345                 p = pkt;
2346         } else {
2347                 ctio_common_entry_t *pkt;
2348                 q2x_build_ctio_pkt(&prm);
2349                 pkt = (ctio_common_entry_t *)prm.pkt;
2350                 pkt->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_OUT);
2351                 q2x_load_data_segments(&prm);
2352                 p = pkt;
2353         }
2354
2355         cmd->state = Q2T_STATE_NEED_DATA;
2356
2357         TRACE_BUFFER("Xfering", p, REQUEST_ENTRY_SIZE);
2358
2359         q2t_exec_queue(ha);
2360
2361 out_unlock:
2362         /* Release ring specific lock */
2363         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2364
2365 out:
2366         TRACE_EXIT_RES(res);
2367         return res;
2368
2369 out_unlock_free_unmap:
2370         if (q2t_has_data(cmd)) {
2371                 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2372                      scst_to_dma_dir(cmd->data_direction));
2373         }
2374         goto out_unlock;
2375 }
2376
2377 static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd)
2378 {
2379         int res;
2380         struct q2t_cmd *cmd;
2381
2382         TRACE_ENTRY();
2383
2384         TRACE(TRACE_SCSI, "tag=%Ld", scst_cmd_get_tag(scst_cmd));
2385
2386         cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2387         cmd->bufflen = scst_cmd_get_bufflen(scst_cmd);
2388         cmd->sg = scst_cmd_get_sg(scst_cmd);
2389         cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2390         cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
2391
2392         res = __q2t_rdy_to_xfer(cmd);
2393
2394         TRACE_EXIT();
2395         return res;
2396 }
2397
2398 /* If hardware_lock held on entry, might drop it, then reaquire */
2399 static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2400         atio_entry_t *atio, int ha_locked)
2401 {
2402         ctio_ret_entry_t *ctio;
2403         unsigned long flags = 0; /* to stop compiler's warning */
2404         int do_tgt_cmd_done = 0;
2405
2406         TRACE_ENTRY();
2407
2408         TRACE_DBG("Sending TERM EXCH CTIO (ha=%p)", ha);
2409
2410         /* Send marker if required */
2411         if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
2412                 goto out;
2413
2414         if (!ha_locked)
2415                 spin_lock_irqsave(&ha->hardware_lock, flags);
2416
2417         ctio = (ctio_ret_entry_t *)tgt_data.req_pkt(ha);
2418         if (ctio == NULL) {
2419                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
2420                         "request packet", ha->instance, __func__);
2421                 goto out_unlock;
2422         }
2423
2424         ctio->entry_type = CTIO_RET_TYPE;
2425         ctio->entry_count = 1;
2426         if (cmd != NULL) {
2427                 if (cmd->state < Q2T_STATE_PROCESSED) {
2428                         PRINT_ERROR("qla2x00tgt(%ld): Terminating cmd %p with "
2429                                 "incorrect state %d", ha->instance, cmd,
2430                                 cmd->state);
2431                 } else
2432                         do_tgt_cmd_done = 1;
2433         }
2434         ctio->handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2435
2436         /* Set IDs */
2437         SET_TARGET_ID(ha, ctio->target, GET_TARGET_ID(ha, atio));
2438         ctio->rx_id = atio->rx_id;
2439
2440         /* Most likely, it isn't needed */
2441         ctio->residual = atio->data_length;
2442         if (ctio->residual != 0)
2443                 ctio->scsi_status |= SS_RESIDUAL_UNDER;
2444
2445         ctio->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_TERM_EXCH |
2446                         OF_NO_DATA | OF_SS_MODE_1);
2447         ctio->flags |= __constant_cpu_to_le16(OF_INC_RC);
2448
2449         TRACE_BUFFER("CTIO TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
2450
2451         q2t_exec_queue(ha);
2452
2453 out_unlock:
2454         if (!ha_locked)
2455                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2456
2457         if (do_tgt_cmd_done) {
2458                 if (!ha_locked && !in_interrupt()) {
2459                         msleep(250); /* just in case */
2460                         scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
2461                 } else
2462                         scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
2463                 /* !! At this point cmd could be already freed !! */
2464         }
2465
2466 out:
2467         TRACE_EXIT();
2468         return;
2469 }
2470
2471 /* If hardware_lock held on entry, might drop it, then reaquire */
2472 static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2473         atio7_entry_t *atio, int ha_locked)
2474 {
2475         ctio7_status1_entry_t *ctio;
2476         unsigned long flags = 0; /* to stop compiler's warning */
2477         int do_tgt_cmd_done = 0;
2478
2479         TRACE_ENTRY();
2480
2481         TRACE_DBG("Sending TERM EXCH CTIO7 (ha=%p)", ha);
2482
2483         /* Send marker if required */
2484         if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
2485                 goto out;
2486
2487         if (!ha_locked)
2488                 spin_lock_irqsave(&ha->hardware_lock, flags);
2489
2490         ctio = (ctio7_status1_entry_t *)tgt_data.req_pkt(ha);
2491         if (ctio == NULL) {
2492                 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
2493                         "request packet", ha->instance, __func__);
2494                 goto out_unlock;
2495         }
2496
2497         ctio->common.entry_type = CTIO_TYPE7;
2498         ctio->common.entry_count = 1;
2499         if (cmd != NULL) {
2500                 ctio->common.nport_handle = cmd->loop_id;
2501                 if (cmd->state < Q2T_STATE_PROCESSED) {
2502                         PRINT_ERROR("qla2x00tgt(%ld): Terminating cmd %p with "
2503                                 "incorrect state %d", ha->instance, cmd,
2504                                  cmd->state);
2505                 } else
2506                         do_tgt_cmd_done = 1;
2507         } else
2508                 ctio->common.nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
2509         ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2510         ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
2511         ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
2512         ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
2513         ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
2514         ctio->common.exchange_addr = atio->exchange_addr;
2515         ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
2516                 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
2517         ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
2518
2519         /* Most likely, it isn't needed */
2520         ctio->residual = atio->fcp_cmnd.data_length;
2521         if (ctio->residual != 0)
2522                 ctio->scsi_status |= SS_RESIDUAL_UNDER;
2523
2524         TRACE_BUFFER("CTIO7 TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
2525
2526         q2t_exec_queue(ha);
2527
2528 out_unlock:
2529         if (!ha_locked)
2530                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2531
2532         if (do_tgt_cmd_done) {
2533                 if (!ha_locked && !in_interrupt()) {
2534                         msleep(250); /* just in case */
2535                         scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
2536                 } else
2537                         scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
2538                 /* !! At this point cmd could be already freed !! */
2539         }
2540
2541 out:
2542         TRACE_EXIT();
2543         return;
2544 }
2545
2546 static inline void q2t_free_cmd(struct q2t_cmd *cmd)
2547 {
2548         if (unlikely(cmd->free_sg))
2549                 kfree(cmd->sg);
2550         kmem_cache_free(q2t_cmd_cachep, cmd);
2551 }
2552
2553 static void q2t_on_free_cmd(struct scst_cmd *scst_cmd)
2554 {
2555         struct q2t_cmd *cmd;
2556
2557         TRACE_ENTRY();
2558
2559         TRACE(TRACE_SCSI, "Freeing command %p, tag %Ld", scst_cmd,
2560                 scst_cmd_get_tag(scst_cmd));
2561
2562         cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2563         scst_cmd_set_tgt_priv(scst_cmd, NULL);
2564
2565         q2t_free_cmd(cmd);
2566
2567         TRACE_EXIT();
2568         return;
2569 }
2570
2571 /* ha->hardware_lock supposed to be held on entry */
2572 static int q2t_prepare_srr_ctio(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2573         void *ctio)
2574 {
2575         struct srr_ctio *sc;
2576         struct q2t_tgt *tgt = ha->tgt;
2577         int res = 0;
2578         struct srr_imm *imm;
2579
2580         tgt->ctio_srr_id++;
2581
2582         TRACE_MGMT_DBG("qla2x00tgt(%ld): CTIO with SRR "
2583                 "status received", ha->instance);
2584
2585         if (ctio == NULL) {
2586                 PRINT_ERROR("qla2x00tgt(%ld): SRR CTIO, "
2587                         "but ctio is NULL", ha->instance);
2588                 res = -EINVAL;
2589                 goto out;
2590         }
2591
2592         sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2593         if (sc != NULL) {
2594                 sc->cmd = cmd;
2595                 /* IRQ is already OFF */
2596                 spin_lock(&tgt->srr_lock);
2597                 sc->srr_id = tgt->ctio_srr_id;
2598                 list_add_tail(&sc->srr_list_entry, 
2599                         &tgt->srr_ctio_list);
2600                 TRACE_MGMT_DBG("CTIO SRR %p added (id %d)",
2601                         sc, sc->srr_id);
2602                 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2603                         int found = 0;
2604                         list_for_each_entry(imm, &tgt->srr_imm_list,
2605                                         srr_list_entry) {
2606                                 if (imm->srr_id == sc->srr_id) {
2607                                         found = 1;
2608                                         break;
2609                                 }
2610                         }
2611                         if (found) {
2612                                 TRACE_MGMT_DBG("%s", "Scheduling srr work");
2613                                 schedule_work(&tgt->srr_work);
2614                         } else {
2615                                 PRINT_ERROR("qla2x00tgt(%ld): imm_srr_id "
2616                                         "== ctio_srr_id (%d), but there is no "
2617                                         "corresponding SRR IMM, deleting CTIO "
2618                                         "SRR %p", ha->instance, tgt->ctio_srr_id,
2619                                         sc);
2620                                 list_del(&sc->srr_list_entry);
2621                                 spin_unlock(&tgt->srr_lock);
2622
2623                                 kfree(sc);
2624                                 res = -EINVAL;
2625                                 goto out;
2626                         }
2627                 }
2628                 spin_unlock(&tgt->srr_lock);
2629         } else {
2630                 struct srr_imm *ti;
2631                 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Unable to "
2632                     "allocate SRR CTIO entry", ha->instance);
2633                 spin_lock(&tgt->srr_lock);
2634                 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2635                                         srr_list_entry) {
2636                         if (imm->srr_id == tgt->ctio_srr_id) {
2637                                 TRACE_MGMT_DBG("IMM SRR %p deleted "
2638                                         "(id %d)", imm, imm->srr_id);
2639                                 list_del(&imm->srr_list_entry);
2640                                 q2t_reject_free_srr_imm(ha, imm, 1);
2641                         }
2642                 }
2643                 spin_unlock(&tgt->srr_lock);
2644                 res = -ENOMEM;
2645                 goto out;
2646         }
2647
2648 out:
2649         TRACE_EXIT_RES(res);
2650         return res;
2651 }
2652
2653 /*
2654  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2655  */
2656 static int q2t_term_ctio_exchange(scsi_qla_host_t *ha, void *ctio,
2657         struct q2t_cmd *cmd, uint32_t status)
2658 {
2659         int term = 0;
2660
2661         if (IS_FWI2_CAPABLE(ha)) {
2662                 if (ctio != NULL) {
2663                         ctio7_fw_entry_t *c = (ctio7_fw_entry_t *)ctio;
2664                         term = !(c->flags & 
2665                                 __constant_cpu_to_le16(OF_TERM_EXCH));
2666                 } else
2667                         term = 1;
2668                 if (term) {
2669                         q24_send_term_exchange(ha, cmd,
2670                                 &cmd->atio.atio7, 1);
2671                 }
2672         } else {
2673                 if (status != CTIO_SUCCESS)
2674                         q2x_modify_command_count(ha, 1, 0);
2675 #if 0 /* seems, it isn't needed */
2676                 if (ctio != NULL) {
2677                         ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
2678                         term = !(c->flags & 
2679                                 __constant_cpu_to_le16(
2680                                         CTIO7_FLAGS_TERMINATE));
2681                 } else
2682                         term = 1;
2683                 if (term) {
2684                         q2x_send_term_exchange(ha, cmd,
2685                                 &cmd->atio.atio2x, 1);
2686                 }
2687 #endif
2688         }
2689         return term;
2690 }
2691
2692 /* ha->hardware_lock supposed to be held on entry */
2693 static inline struct q2t_cmd *q2t_get_cmd(scsi_qla_host_t *ha, uint32_t handle)
2694 {
2695         handle--;
2696         if (ha->cmds[handle] != NULL) {
2697                 struct q2t_cmd *cmd = ha->cmds[handle];
2698                 ha->cmds[handle] = NULL;
2699                 return cmd;
2700         } else
2701                 return NULL;
2702 }
2703
2704 /* ha->hardware_lock supposed to be held on entry */
2705 static struct q2t_cmd *q2t_ctio_to_cmd(scsi_qla_host_t *ha, uint32_t handle,
2706         void *ctio)
2707 {
2708         struct q2t_cmd *cmd = NULL;
2709
2710         /* Clear out internal marks */
2711         handle &= ~(CTIO_COMPLETION_HANDLE_MARK | CTIO_INTERMEDIATE_HANDLE_MARK);
2712
2713         if (handle != Q2T_NULL_HANDLE) {
2714                 if (unlikely(handle == Q2T_SKIP_HANDLE)) {
2715                         TRACE_DBG("%s", "SKIP_HANDLE CTIO");
2716                         goto out;
2717                 }
2718                 /* handle-1 is actually used */
2719                 if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
2720                         PRINT_ERROR("qla2x00tgt(%ld): Wrong handle %x "
2721                                 "received", ha->instance, handle);
2722                         goto out;
2723                 }
2724                 cmd = q2t_get_cmd(ha, handle);
2725                 if (unlikely(cmd == NULL)) {
2726                         PRINT_WARNING("qla2x00tgt(%ld): Suspicious: unable to "
2727                                    "find the command with handle %x", 
2728                                    ha->instance, handle);
2729                         goto out;
2730                 }
2731         } else if (ctio != NULL) {
2732                 uint16_t loop_id;
2733                 int tag;
2734                 struct q2t_sess *sess;
2735                 struct scst_cmd *scst_cmd;
2736
2737                 if (IS_FWI2_CAPABLE(ha)) {
2738                         /* We can't get loop ID from CTIO7 */
2739                         PRINT_ERROR("qla2x00tgt(%ld): Wrong CTIO received: "
2740                                 "QLA24xx doesn't support NULL handles",
2741                                 ha->instance);
2742                         goto out;
2743                 } else {
2744                         ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
2745                         loop_id = GET_TARGET_ID(ha, c);
2746                         tag = c->rx_id;
2747                 }
2748
2749                 sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
2750                 if (sess == NULL) {
2751                         PRINT_WARNING("qla2x00tgt(%ld): Suspicious: "
2752                                    "ctio_completion for non-existing session "
2753                                    "(loop_id %d, tag %d)", 
2754                                    ha->instance, loop_id, tag);
2755                         goto out;
2756                 }
2757
2758                 scst_cmd = scst_find_cmd_by_tag(sess->scst_sess, tag);
2759                 if (scst_cmd == NULL) {
2760                         PRINT_WARNING("qla2x00tgt(%ld): Suspicious: unable to "
2761                              "find the command with tag %d (loop_id %d)", 
2762                              ha->instance, tag, loop_id);
2763                         goto out;
2764                 }
2765
2766                 cmd = (struct q2t_cmd*)scst_cmd_get_tgt_priv(scst_cmd);
2767                 TRACE_DBG("Found q2t_cmd %p (tag %d)", cmd, tag);
2768         }
2769
2770 out:
2771         return cmd;
2772 }
2773
2774 /*
2775  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2776  */
2777 static void q2t_do_ctio_completion(scsi_qla_host_t *ha, uint32_t handle,
2778         uint32_t status, void *ctio)
2779 {
2780         struct scst_cmd *scst_cmd;
2781         struct q2t_cmd *cmd;
2782         enum scst_exec_context context;
2783
2784         TRACE_ENTRY();
2785
2786 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2787         context = SCST_CONTEXT_THREAD;
2788 #else
2789         context = SCST_CONTEXT_TASKLET;
2790 #endif
2791
2792         TRACE(TRACE_DEBUG|TRACE_SCSI, "handle(ctio %p status %#x) <- %08x", 
2793               ctio, status, handle);
2794
2795         if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2796                 /* That could happen only in case of an error/reset/abort */
2797                 if (status != CTIO_SUCCESS) {
2798                         TRACE_MGMT_DBG("Intermediate CTIO received (status %x)",
2799                                 status);
2800                 }
2801                 goto out;
2802         }
2803
2804         cmd = q2t_ctio_to_cmd(ha, handle, ctio);
2805         if (cmd == NULL) {
2806                 if (status != CTIO_SUCCESS)
2807                         q2t_term_ctio_exchange(ha, ctio, NULL, status);
2808                 goto out;
2809         }
2810
2811         scst_cmd = cmd->scst_cmd;
2812
2813         if (unlikely(status != CTIO_SUCCESS)) {
2814                 switch (status & 0xFFFF) {
2815                 case CTIO_LIP_RESET:
2816                 case CTIO_TARGET_RESET:
2817                 case CTIO_ABORTED:
2818                 case CTIO_TIMEOUT:
2819                 case CTIO_INVALID_RX_ID:
2820                         /* they are OK */
2821                         TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): CTIO with "
2822                                 "status %#x received, state %x, scst_cmd %p, "
2823                                 "op %x (LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2824                                 "TIMEOUT=b, INVALID_RX_ID=8)", ha->instance,
2825                                 status, cmd->state, scst_cmd, scst_cmd->cdb[0]);
2826                         break;
2827
2828                 case CTIO_PORT_LOGGED_OUT:
2829                 case CTIO_PORT_UNAVAILABLE:
2830                         PRINT_INFO("qla2x00tgt(%ld): CTIO with PORT LOGGED "
2831                                 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2832                                 "received (state %x, scst_cmd %p, op %x)",
2833                                 ha->instance, status, cmd->state, scst_cmd,
2834                                 scst_cmd->cdb[0]);
2835                         break;
2836
2837                 case CTIO_SRR_RECEIVED:
2838                         if (q2t_prepare_srr_ctio(ha, cmd, ctio) != 0)
2839                                 break;
2840                         else
2841                                 goto out;
2842
2843                 default:
2844                         PRINT_ERROR("qla2x00tgt(%ld): CTIO with error status "
2845                                 "0x%x received (state %x, scst_cmd %p, op %x)",
2846                                 ha->instance, status, cmd->state, scst_cmd,
2847                                 scst_cmd->cdb[0]);
2848                         break;
2849                 }
2850
2851                 if (cmd->state != Q2T_STATE_NEED_DATA)
2852                         if (q2t_term_ctio_exchange(ha, ctio, cmd, status))
2853                                 goto out;
2854         }
2855
2856         if (cmd->state == Q2T_STATE_PROCESSED) {
2857                 TRACE_DBG("Command %p finished", cmd);
2858                 if (q2t_has_data(cmd)) {
2859                         pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2860                                 scst_to_dma_dir(cmd->data_direction));
2861                 }
2862         } else if (cmd->state == Q2T_STATE_NEED_DATA) {
2863                 int rx_status = SCST_RX_STATUS_SUCCESS;
2864
2865                 cmd->state = Q2T_STATE_DATA_IN;
2866
2867                 if (unlikely(status != CTIO_SUCCESS))
2868                         rx_status = SCST_RX_STATUS_ERROR;
2869
2870                 TRACE_DBG("Data received, context %x, rx_status %d",
2871                       context, rx_status);
2872
2873                 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2874                                 scst_to_dma_dir(cmd->data_direction));
2875
2876                 scst_rx_data(scst_cmd, rx_status, context);
2877                 goto out;
2878         } else if (cmd->state == Q2T_STATE_ABORTED) {
2879                 TRACE_MGMT_DBG("Aborted command %p (tag %d) finished", cmd,
2880                         cmd->tag);
2881         } else {
2882                 PRINT_ERROR("qla2x00tgt(%ld): A command in state (%d) should "
2883                         "not return a CTIO complete", ha->instance, cmd->state);
2884         }
2885
2886         if (unlikely(status != CTIO_SUCCESS)) {
2887                 TRACE_MGMT_DBG("%s", "Finishing failed CTIO");
2888                 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
2889         }
2890
2891         scst_tgt_cmd_done(scst_cmd, context);
2892
2893 out:
2894         TRACE_EXIT();
2895         return;
2896 }
2897
2898 /* ha->hardware_lock supposed to be held on entry */
2899 /* called via callback from qla2xxx */
2900 static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle)
2901 {
2902         struct q2t_tgt *tgt = ha->tgt;
2903
2904         TRACE_ENTRY();
2905
2906         if (likely(tgt != NULL)) {
2907                 tgt->irq_cmd_count++;
2908                 q2t_do_ctio_completion(ha, handle, CTIO_SUCCESS, NULL);
2909                 tgt->irq_cmd_count--;
2910         } else {
2911                 TRACE_DBG("CTIO, but target mode not enabled (ha %p handle "
2912                         "%#x)", ha, handle);
2913         }
2914
2915         TRACE_EXIT();
2916         return;
2917 }
2918
2919 /* ha->hardware_lock is supposed to be held on entry */
2920 static int q2x_do_send_cmd_to_scst(struct q2t_cmd *cmd)
2921 {
2922         int res = 0;
2923         struct q2t_sess *sess = cmd->sess;
2924         uint16_t lun;
2925         atio_entry_t *atio = &cmd->atio.atio2x;
2926         scst_data_direction dir;
2927         int context;
2928
2929         TRACE_ENTRY();
2930
2931         /* make it be in network byte order */
2932         lun = swab16(le16_to_cpu(atio->lun));
2933         cmd->scst_cmd = scst_rx_cmd(sess->scst_sess, (uint8_t *)&lun,
2934                                     sizeof(lun), atio->cdb, Q2T_MAX_CDB_LEN,
2935                                     SCST_ATOMIC);
2936
2937         if (cmd->scst_cmd == NULL) {
2938                 PRINT_ERROR("%s", "qla2x00tgt: scst_rx_cmd() failed");
2939                 res = -EFAULT;
2940                 goto out;
2941         }
2942
2943         cmd->tag = atio->rx_id;
2944         scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
2945         scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
2946
2947         if (atio->execution_codes & ATIO_EXEC_READ)
2948                 dir = SCST_DATA_READ;
2949         else if (atio->execution_codes & ATIO_EXEC_WRITE)
2950                 dir = SCST_DATA_WRITE;
2951         else
2952                 dir = SCST_DATA_NONE;
2953         scst_cmd_set_expected(cmd->scst_cmd, dir,
2954                 le32_to_cpu(atio->data_length));
2955
2956         switch(atio->task_codes) {
2957         case ATIO_SIMPLE_QUEUE:
2958                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2959                 break;
2960         case ATIO_HEAD_OF_QUEUE:
2961                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2962                 break;
2963         case ATIO_ORDERED_QUEUE:
2964                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2965                 break;
2966         case ATIO_ACA_QUEUE:
2967                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
2968                 break;
2969         case ATIO_UNTAGGED:
2970                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
2971                 break;
2972         default:
2973                 PRINT_ERROR("qla2x00tgt: unknown task code %x, use "
2974                         "ORDERED instead", atio->task_codes);
2975                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2976                 break;
2977         }
2978
2979 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2980         context = SCST_CONTEXT_THREAD;
2981 #else
2982         context = SCST_CONTEXT_TASKLET;
2983 #endif
2984
2985         TRACE_DBG("Context %x", context);
2986         TRACE(TRACE_SCSI, "START Command (tag %d, queue_type %d)",
2987                 cmd->tag, cmd->scst_cmd->queue_type);
2988         scst_cmd_init_done(cmd->scst_cmd, context);
2989
2990 out:
2991         TRACE_EXIT_RES(res);
2992         return res;
2993 }
2994
2995 /* ha->hardware_lock is supposed to be held on entry */
2996 static int q24_do_send_cmd_to_scst(struct q2t_cmd *cmd)
2997 {
2998         int res = 0;
2999         struct q2t_sess *sess = cmd->sess;
3000         atio7_entry_t *atio = &cmd->atio.atio7;
3001         scst_data_direction dir;
3002         int context;
3003
3004         TRACE_ENTRY();
3005
3006         cmd->scst_cmd = scst_rx_cmd(sess->scst_sess, 
3007                 (uint8_t *)&atio->fcp_cmnd.lun, sizeof(atio->fcp_cmnd.lun),
3008                 atio->fcp_cmnd.cdb, Q2T_MAX_CDB_LEN, SCST_ATOMIC);
3009
3010         if (cmd->scst_cmd == NULL) {
3011                 PRINT_ERROR("%s", "qla2x00tgt: scst_rx_cmd() failed");
3012                 res = -EFAULT;
3013                 goto out;
3014         }
3015
3016         cmd->tag = atio->exchange_addr;
3017         scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
3018         scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
3019
3020         if (atio->fcp_cmnd.rddata)
3021                 dir = SCST_DATA_READ;
3022         else if (atio->fcp_cmnd.wrdata)
3023                 dir = SCST_DATA_WRITE;
3024         else
3025                 dir = SCST_DATA_NONE;
3026         scst_cmd_set_expected(cmd->scst_cmd, dir, 
3027                 be32_to_cpu(atio->fcp_cmnd.data_length));
3028
3029         switch(atio->fcp_cmnd.task_attr) {
3030         case ATIO_SIMPLE_QUEUE:
3031                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3032                 break;
3033         case ATIO_HEAD_OF_QUEUE:
3034                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3035                 break;
3036         case ATIO_ORDERED_QUEUE:
3037                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3038                 break;
3039         case ATIO_ACA_QUEUE:
3040                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
3041                 break;
3042         case ATIO_UNTAGGED:
3043                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
3044                 break;
3045         default:
3046                 PRINT_ERROR("qla2x00tgt: unknown task code %x, use "
3047                         "ORDERED instead", atio->fcp_cmnd.task_attr);
3048                 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3049                 break;
3050         }
3051
3052 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
3053         context = SCST_CONTEXT_THREAD;
3054 #else
3055         context = SCST_CONTEXT_TASKLET;
3056 #endif
3057
3058         TRACE_DBG("Context %x", context);
3059         TRACE(TRACE_SCSI, "START Command %p (tag %d, queue type %x)", cmd,
3060                 cmd->tag, cmd->scst_cmd->queue_type);
3061         scst_cmd_init_done(cmd->scst_cmd, context);
3062
3063 out:
3064         TRACE_EXIT_RES(res);
3065         return res;
3066 }
3067
3068 /* ha->hardware_lock supposed to be held on entry */
3069 static int q2t_do_send_cmd_to_scst(scsi_qla_host_t *ha,
3070         struct q2t_cmd *cmd, struct q2t_sess *sess)
3071 {
3072         int res;
3073
3074         TRACE_ENTRY();
3075
3076         cmd->sess = sess;
3077         cmd->loop_id = sess->loop_id;
3078         cmd->conf_compl_supported = sess->conf_compl_supported;
3079
3080         if (IS_FWI2_CAPABLE(ha))
3081                 res = q24_do_send_cmd_to_scst(cmd);
3082         else
3083                 res = q2x_do_send_cmd_to_scst(cmd);
3084
3085         TRACE_EXIT_RES(res);
3086         return res;
3087 }
3088
3089 /* ha->hardware_lock supposed to be held on entry */
3090 static int q2t_send_cmd_to_scst(scsi_qla_host_t *ha, atio_t *atio)
3091 {
3092         int res = 0;
3093         struct q2t_tgt *tgt = ha->tgt;
3094         struct q2t_sess *sess;
3095         struct q2t_cmd *cmd;
3096
3097         TRACE_ENTRY();
3098
3099         if (unlikely(tgt->tgt_shutdown)) {
3100                 TRACE_MGMT_DBG("New command while device %p is shutting "
3101                         "down", tgt);
3102                 res = -EFAULT;
3103                 goto out;
3104         }
3105
3106         cmd = kmem_cache_zalloc(q2t_cmd_cachep, GFP_ATOMIC);
3107         if (cmd == NULL) {
3108                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of cmd failed");
3109                 res = -ENOMEM;
3110                 goto out;
3111         }
3112
3113         memcpy(&cmd->atio.atio2x, atio, sizeof(*atio));
3114         cmd->state = Q2T_STATE_NEW;
3115         cmd->tgt = ha->tgt;
3116
3117         if (IS_FWI2_CAPABLE(ha)) {
3118                 atio7_entry_t *a = (atio7_entry_t *)atio;
3119                 sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
3120                 if (unlikely(sess == NULL)) {
3121                         TRACE_MGMT_DBG("qla2x00tgt(%ld): Unable to find "
3122                                 "wwn login (s_id %x:%x:%x), trying to create "
3123                                 "it manually", ha->instance, 
3124                                 a->fcp_hdr.s_id[0], a->fcp_hdr.s_id[1],
3125                                 a->fcp_hdr.s_id[2]);
3126                         goto out_sched;
3127                 }
3128         } else {
3129                 sess = q2t_find_sess_by_loop_id(tgt, 
3130                         GET_TARGET_ID(ha, (atio_entry_t *)atio));
3131                 if (unlikely(sess == NULL)) {
3132                         TRACE_MGMT_DBG("qla2x00tgt(%ld): Unable to find "
3133                                 "wwn login (loop_id=%d), trying to create it "
3134                                 "manually", ha->instance,
3135                                 GET_TARGET_ID(ha, (atio_entry_t *)atio));
3136                         goto out_sched;
3137                 }
3138         }
3139
3140         res = q2t_do_send_cmd_to_scst(ha, cmd, sess);
3141         if (unlikely(res != 0))
3142                 goto out_free_cmd;
3143
3144 out:
3145         TRACE_EXIT_RES(res);
3146         return res;
3147
3148 out_free_cmd:
3149         q2t_free_cmd(cmd);
3150         goto out;
3151
3152 out_sched:
3153         {
3154                 struct q2t_sess_work_param *prm;
3155                 unsigned long flags;
3156
3157                 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
3158                 if (prm == NULL) {
3159                         PRINT_ERROR("%s", "Unable to create session work, "
3160                                 "command will be refused");
3161                         res = -1;
3162                         goto out_free_cmd;
3163                 }
3164
3165                 TRACE_MGMT_DBG("Scheduling work to find session for cmd %p",
3166                         cmd);
3167
3168                 prm->cmd = cmd;
3169
3170                 spin_lock_irqsave(&tgt->sess_work_lock, flags);
3171                 if (list_empty(&tgt->sess_works_list))
3172                         tgt->tm_to_unknown = 0;
3173                 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
3174                 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
3175
3176                 schedule_work(&tgt->sess_work);
3177         }
3178         goto out;
3179 }
3180
3181 /* ha->hardware_lock supposed to be held on entry */
3182 static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
3183         int lun_size, int fn, void *iocb, int flags)
3184 {
3185         int res = 0, rc = -1;
3186         struct q2t_mgmt_cmd *mcmd;
3187
3188         TRACE_ENTRY();
3189
3190         mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
3191         if (mcmd == NULL) {
3192                 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Allocation of management "
3193                         "command failed, some commands and their data could "
3194                         "leak", sess->tgt->ha->instance);
3195                 res = -ENOMEM;
3196                 goto out;
3197         }
3198         memset(mcmd, 0, sizeof(*mcmd));
3199
3200         mcmd->sess = sess;
3201         if (iocb) {
3202                 memcpy(&mcmd->orig_iocb.notify_entry, iocb, 
3203                         sizeof(mcmd->orig_iocb.notify_entry));
3204         }
3205         mcmd->flags = flags;
3206
3207         switch (fn) {
3208         case Q2T_CLEAR_ACA:
3209                 TRACE(TRACE_MGMT, "%s", "CLEAR_ACA received");
3210                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_ACA,
3211                                          lun, lun_size, SCST_ATOMIC, mcmd);
3212                 break;
3213
3214         case Q2T_TARGET_RESET:
3215                 TRACE(TRACE_MGMT, "%s", "TARGET_RESET received");
3216                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_TARGET_RESET,
3217                                          lun, lun_size, SCST_ATOMIC, mcmd);
3218                 break;
3219
3220         case Q2T_LUN_RESET:
3221                 TRACE(TRACE_MGMT, "%s", "LUN_RESET received");
3222                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_LUN_RESET,
3223                                          lun, lun_size, SCST_ATOMIC, mcmd);
3224                 break;
3225
3226         case Q2T_CLEAR_TS:
3227                 TRACE(TRACE_MGMT, "%s", "CLEAR_TS received");
3228                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_TASK_SET,
3229                                          lun, lun_size, SCST_ATOMIC, mcmd);
3230                 break;
3231
3232         case Q2T_ABORT_TS:
3233                 TRACE(TRACE_MGMT, "%s", "ABORT_TS received");
3234                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_ABORT_TASK_SET,
3235                                          lun, lun_size, SCST_ATOMIC, mcmd);
3236                 break;
3237
3238         case Q2T_ABORT_ALL:
3239                 TRACE(TRACE_MGMT, "%s", "Doing ABORT_ALL_TASKS");
3240                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
3241                                          SCST_ABORT_ALL_TASKS,
3242                                          lun, lun_size, SCST_ATOMIC, mcmd);
3243                 break;
3244
3245         case Q2T_ABORT_ALL_SESS:
3246                 TRACE(TRACE_MGMT, "%s", "Doing ABORT_ALL_TASKS_SESS");
3247                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
3248                                          SCST_ABORT_ALL_TASKS_SESS,
3249                                          lun, lun_size, SCST_ATOMIC, mcmd);
3250                 break;
3251
3252         case Q2T_NEXUS_LOSS_SESS:
3253                 TRACE(TRACE_MGMT, "%s", "Doing NEXUS_LOSS_SESS");
3254                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS_SESS,
3255                                          lun, lun_size, SCST_ATOMIC, mcmd);
3256                 break;
3257
3258         case Q2T_NEXUS_LOSS:
3259                 TRACE(TRACE_MGMT, "%s", "Doing NEXUS_LOSS");
3260                 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS,
3261                                          lun, lun_size, SCST_ATOMIC, mcmd);
3262                 break;
3263
3264         default:
3265                 PRINT_ERROR("qla2x00tgt(%ld): Unknown task mgmt fn 0x%x",
3266                             sess->tgt->ha->instance, fn);
3267                 rc = -1;
3268                 break;
3269         }
3270
3271         if (rc != 0) {
3272                 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_lun() failed: %d",
3273                             sess->tgt->ha->instance, rc);
3274                 res = -EFAULT;
3275                 goto out_free;
3276         }
3277
3278 out:
3279         TRACE_EXIT_RES(res);
3280         return res;
3281
3282 out_free:
3283         mempool_free(mcmd, q2t_mgmt_cmd_mempool);
3284         goto out;
3285 }
3286
3287 /* ha->hardware_lock supposed to be held on entry */
3288 static int q2t_handle_task_mgmt(scsi_qla_host_t *ha, void *iocb)
3289 {
3290         int res = 0;
3291         struct q2t_tgt *tgt;
3292         struct q2t_sess *sess;
3293         uint8_t *lun;
3294         uint16_t lun_data;
3295         int lun_size;
3296         int fn;
3297
3298         TRACE_ENTRY();
3299
3300         tgt = ha->tgt;
3301         if (IS_FWI2_CAPABLE(ha)) {
3302                 atio7_entry_t *a = (atio7_entry_t *)iocb;
3303                 lun = (uint8_t *)&a->fcp_cmnd.lun;
3304                 lun_size = sizeof(a->fcp_cmnd.lun);
3305                 fn = a->fcp_cmnd.task_mgmt_flags;
3306                 sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
3307                 if (sess != NULL) {
3308                         sess->s_id.b.al_pa = a->fcp_hdr.s_id[2];
3309                         sess->s_id.b.area = a->fcp_hdr.s_id[1];
3310                         sess->s_id.b.domain = a->fcp_hdr.s_id[0];
3311                 }
3312         } else {
3313                 notify_entry_t *n = (notify_entry_t *)iocb;
3314                 /* make it be in network byte order */
3315                 lun_data = swab16(le16_to_cpu(n->lun));
3316                 lun = (uint8_t *)&lun_data;
3317                 lun_size = sizeof(lun_data);
3318                 fn = n->task_flags >> IMM_NTFY_TASK_MGMT_SHIFT;
3319                 sess = q2t_find_sess_by_loop_id(tgt, GET_TARGET_ID(ha, n));
3320         }
3321
3322         if (sess == NULL) {
3323                 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task mgmt fn 0x%x for "
3324                         "non-existant session", ha->instance, fn);
3325                 tgt->tm_to_unknown = 1;
3326                 res = -ESRCH;
3327                 goto out;
3328         }
3329
3330         res = q2t_issue_task_mgmt(sess, lun, lun_size, fn, iocb, 0);
3331
3332 out:
3333         TRACE_EXIT_RES(res);
3334         return res;
3335 }
3336
3337 /* ha->hardware_lock supposed to be held on entry */
3338 static int q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb)
3339 {
3340         int res = 0, rc;
3341         struct q2t_mgmt_cmd *mcmd;
3342         struct q2t_sess *sess;
3343         int loop_id;
3344         uint32_t tag;
3345
3346         TRACE_ENTRY();
3347
3348         loop_id = GET_TARGET_ID(ha, iocb);
3349         tag = le16_to_cpu(iocb->seq_id);
3350
3351         sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
3352         if (sess == NULL) {
3353                 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task abort for unexisting "
3354                         "session", ha->instance);
3355                 ha->tgt->tm_to_unknown = 1;
3356                 res = -EFAULT;
3357                 goto out;
3358         }
3359         
3360         mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
3361         if (mcmd == NULL) {
3362                 PRINT_ERROR("%s: Allocation of ABORT cmd failed", __func__);
3363                 res = -ENOMEM;
3364                 goto out;
3365         }
3366         memset(mcmd, 0, sizeof(*mcmd));
3367
3368         mcmd->sess = sess;
3369         memcpy(&mcmd->orig_iocb.notify_entry, iocb, 
3370                 sizeof(mcmd->orig_iocb.notify_entry));
3371
3372         rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag, 
3373                 SCST_ATOMIC, mcmd);
3374         if (rc != 0) {
3375                 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_tag() failed: %d",
3376                             ha->instance, rc);
3377                 res = -EFAULT;
3378                 goto out_free;
3379         }
3380
3381 out:
3382         TRACE_EXIT_RES(res);
3383         return res;
3384
3385 out_free:
3386         mempool_free(mcmd, q2t_mgmt_cmd_mempool);
3387         goto out;
3388 }
3389
3390 /*
3391  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3392  */
3393 static int q24_handle_els(scsi_qla_host_t *ha, notify24xx_entry_t *iocb)
3394 {
3395         int res = 0;
3396
3397         TRACE_ENTRY();
3398
3399         TRACE(TRACE_MGMT, "ELS opcode %x", iocb->status_subcode);
3400
3401         switch(iocb->status_subcode) {
3402         case ELS_PLOGI:
3403         case ELS_FLOGI:
3404         case ELS_PRLI:
3405         case ELS_LOGO:
3406         case ELS_PRLO:
3407                 res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
3408                 break;
3409
3410         case ELS_PDISC:
3411         case ELS_ADISC:
3412         {
3413                 struct q2t_tgt *tgt = ha->tgt;
3414                 if (tgt->link_reinit_iocb_pending) {
3415                         q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
3416                         tgt->link_reinit_iocb_pending = 0;
3417                 }
3418                 res = 1; /* send notify ack */
3419                 break;
3420         }
3421
3422         default:
3423                 PRINT_ERROR("qla2x00tgt(%ld): Unsupported ELS command %x "
3424                         "received", ha->instance, iocb->status_subcode);
3425                 res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
3426                 break;
3427         }
3428
3429         TRACE_EXIT_RES(res);
3430         return res;
3431 }
3432
3433 static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset)
3434 {
3435         int res = 0;
3436         int cnt, first_sg, first_page = 0, first_page_offs = 0, i;
3437         unsigned int l;
3438         int cur_dst, cur_src;
3439         struct scatterlist *sg;
3440         size_t bufflen = 0;
3441
3442         TRACE_ENTRY();
3443
3444         first_sg = -1;
3445         cnt = 0;
3446         l = 0;
3447         for(i = 0; i < cmd->sg_cnt; i++) {
3448                 l += cmd->sg[i].length;
3449                 if (l > offset) {
3450                         int sg_offs = l - cmd->sg[i].length;
3451                         first_sg = i;
3452                         if (cmd->sg[i].offset == 0) {
3453                                 first_page_offs = offset % PAGE_SIZE;
3454                                 first_page = (offset - sg_offs) >> PAGE_SHIFT;
3455                         } else {
3456                                 TRACE_SG("i=%d, sg[i].offset=%d, "
3457                                         "sg_offs=%d", i, cmd->sg[i].offset, sg_offs);
3458                                 if ((cmd->sg[i].offset + sg_offs) > offset) {
3459                                         first_page_offs = offset - sg_offs;
3460                                         first_page = 0;
3461                                 } else {
3462                                         int sec_page_offs = sg_offs + 
3463                                                 (PAGE_SIZE - cmd->sg[i].offset);
3464                                         first_page_offs = sec_page_offs % PAGE_SIZE;
3465                                         first_page = 1 + 
3466                                                 ((offset - sec_page_offs) >> 
3467                                                         PAGE_SHIFT);
3468                                 }
3469                         }
3470                         cnt = cmd->sg_cnt - i + (first_page_offs != 0);
3471                         break;
3472                 }
3473         }
3474         if (first_sg == -1) {
3475                 PRINT_ERROR("qla2x00tgt(%ld): Wrong offset %d, buf length %d",
3476                         cmd->tgt->ha->instance, offset, cmd->bufflen);
3477                 res = -EINVAL;
3478                 goto out;
3479         }
3480
3481         TRACE_SG("offset=%d, first_sg=%d, first_page=%d, "
3482                 "first_page_offs=%d, cmd->bufflen=%d, cmd->sg_cnt=%d", offset,
3483                 first_sg, first_page, first_page_offs, cmd->bufflen,
3484                 cmd->sg_cnt);
3485
3486         sg = kmalloc(cnt * sizeof(sg[0]), GFP_KERNEL);
3487         if (sg == NULL) {
3488                 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Unable to allocate cut "
3489                         "SG (len %zd)", cmd->tgt->ha->instance,
3490                         cnt * sizeof(sg[0]));
3491                 res = -ENOMEM;
3492                 goto out;
3493         }
3494         sg_init_table(sg, cnt);
3495
3496         cur_dst = 0;
3497         cur_src = first_sg;
3498         if (first_page_offs != 0) {
3499                 int fpgs;
3500                 sg_set_page(&sg[cur_dst], &sg_page(&cmd->sg[cur_src])[first_page],
3501                         PAGE_SIZE - first_page_offs, first_page_offs);
3502                 bufflen += sg[cur_dst].length;
3503                 TRACE_SG("cur_dst=%d, cur_src=%d, sg[].page=%p, "
3504                         "sg[].offset=%d, sg[].length=%d, bufflen=%zu",
3505                         cur_dst, cur_src, sg_page(&sg[cur_dst]), sg[cur_dst].offset,
3506               &