4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
7 * Copyright (C) 2006 - 2009 ID7 Ltd.
9 * QLogic 22xx/23xx/24xx/25xx FC target driver.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, version 2
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/types.h>
25 #include <linux/version.h>
26 #include <linux/blkdev.h>
27 #include <linux/interrupt.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <linux/pci.h>
31 #include <linux/delay.h>
32 #include <linux/list.h>
39 * This driver calls qla2x00_req_pkt() and qla2x00_issue_marker(), which
40 * must be called under HW lock and could unlock/lock it inside.
41 * It isn't an issue, since in the current implementation on the time when
42 * those functions are called:
44 * - Either context is IRQ and only IRQ handler can modify HW data,
45 * including rings related fields,
47 * - Or access to target mode variables from struct q2t_tgt doesn't
48 * cross those functions boundaries, except tgt_stop, which
49 * additionally protected by irq_cmd_count.
52 #ifndef CONFIG_SCSI_QLA2XXX_TARGET
53 #error "CONFIG_SCSI_QLA2XXX_TARGET is NOT DEFINED"
56 #ifdef CONFIG_SCST_DEBUG
57 #define Q2T_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE | TRACE_PID | \
58 TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_MINOR | \
59 TRACE_MGMT_DEBUG | TRACE_MINOR | TRACE_SPECIAL)
61 # ifdef CONFIG_SCST_TRACING
62 #define Q2T_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MINOR | \
67 static int q2t_target_detect(struct scst_tgt_template *templ);
68 static int q2t_target_release(struct scst_tgt *scst_tgt);
69 static int q2x_xmit_response(struct scst_cmd *scst_cmd);
70 static int __q24_xmit_response(struct q2t_cmd *cmd, int xmit_type);
71 static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd);
72 static void q2t_on_free_cmd(struct scst_cmd *scst_cmd);
73 static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *mcmd);
75 /* Predefs for callbacks handed to qla2xxx(target) */
76 static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *pkt);
77 static void q2t_response_pkt(scsi_qla_host_t *ha, response_t *pkt);
78 static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha,
80 static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle);
81 static int q2t_host_action(scsi_qla_host_t *ha,
82 qla2x_tgt_host_action_t action);
83 static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport);
84 static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport);
85 static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
86 int lun_size, int fn, void *iocb, int flags);
87 static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
88 atio_entry_t *atio, int ha_locked);
89 static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
90 atio7_entry_t *atio, int ha_locked);
91 static void q2t_reject_free_srr_imm(scsi_qla_host_t *ha, struct srr_imm *imm,
93 static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset);
94 static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only);
95 static void q2t_on_hw_pending_cmd_timeout(struct scst_cmd *scst_cmd);
96 static int q2t_unreg_sess(struct q2t_sess *sess);
98 #ifndef CONFIG_SCST_PROC
102 static ssize_t q2t_version_show(struct kobject *kobj,
103 struct kobj_attribute *attr, char *buf);
105 struct kobj_attribute q2t_version_attr =
106 __ATTR(version, S_IRUGO, q2t_version_show, NULL);
108 static const struct attribute *q2t_attrs[] = {
109 &q2t_version_attr.attr,
113 static ssize_t q2t_show_expl_conf_enabled(struct kobject *kobj,
114 struct kobj_attribute *attr, char *buffer);
115 static ssize_t q2t_store_expl_conf_enabled(struct kobject *kobj,
116 struct kobj_attribute *attr, const char *buffer, size_t size);
118 struct kobj_attribute q2t_expl_conf_attr =
119 __ATTR(explicit_confirmation, S_IRUGO|S_IWUSR,
120 q2t_show_expl_conf_enabled, q2t_store_expl_conf_enabled);
122 static const struct attribute *q2t_tgt_attrs[] = {
123 &q2t_expl_conf_attr.attr,
127 #endif /* CONFIG_SCST_PROC */
129 static ssize_t q2t_enable_tgt(struct scst_tgt *tgt, const char *buf,
131 static bool q2t_is_tgt_enabled(struct scst_tgt *tgt);
137 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
138 #define trace_flag q2t_trace_flag
139 static unsigned long q2t_trace_flag = Q2T_DEFAULT_LOG_FLAGS;
142 static struct scst_tgt_template tgt2x_template = {
143 .name = "qla2x00tgt",
146 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
147 .xmit_response_atomic = 0,
148 .rdy_to_xfer_atomic = 0,
150 .xmit_response_atomic = 1,
151 .rdy_to_xfer_atomic = 1,
153 .max_hw_pending_time = Q2T_MAX_HW_PENDING_TIME,
154 .detect = q2t_target_detect,
155 .release = q2t_target_release,
156 .xmit_response = q2x_xmit_response,
157 .rdy_to_xfer = q2t_rdy_to_xfer,
158 .on_free_cmd = q2t_on_free_cmd,
159 .task_mgmt_fn_done = q2t_task_mgmt_fn_done,
160 .on_hw_pending_cmd_timeout = q2t_on_hw_pending_cmd_timeout,
161 .enable_tgt = q2t_enable_tgt,
162 .is_tgt_enabled = q2t_is_tgt_enabled,
163 #ifndef CONFIG_SCST_PROC
164 .tgtt_attrs = q2t_attrs,
165 .tgt_attrs = q2t_tgt_attrs,
167 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
168 .default_trace_flags = Q2T_DEFAULT_LOG_FLAGS,
169 .trace_flags = &trace_flag,
173 static struct kmem_cache *q2t_cmd_cachep;
174 static struct kmem_cache *q2t_mgmt_cmd_cachep;
175 static mempool_t *q2t_mgmt_cmd_mempool;
177 static DECLARE_RWSEM(q2t_unreg_rwsem);
179 /* It's not yet supported */
180 static inline int scst_cmd_get_ppl_offset(struct scst_cmd *scst_cmd)
185 /* ha->hardware_lock supposed to be held on entry */
186 static inline void q2t_sess_get(struct q2t_sess *sess)
189 TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref);
192 /* ha->hardware_lock supposed to be held on entry */
193 static inline void q2t_sess_put(struct q2t_sess *sess)
195 TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref-1);
196 sBUG_ON(sess->sess_ref == 0);
199 if (sess->sess_ref == 0)
200 q2t_unreg_sess(sess);
203 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
204 static inline struct q2t_sess *q2t_find_sess_by_loop_id(struct q2t_tgt *tgt,
207 struct q2t_sess *sess;
208 sBUG_ON(tgt == NULL);
209 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
210 if (lid == (sess->loop_id))
216 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
217 static inline struct q2t_sess *q2t_find_sess_by_s_id(struct q2t_tgt *tgt,
220 struct q2t_sess *sess;
221 sBUG_ON(tgt == NULL);
222 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
223 if ((sess->s_id.b.al_pa == s_id[2]) &&
224 (sess->s_id.b.area == s_id[1]) &&
225 (sess->s_id.b.domain == s_id[0]))
231 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
232 static inline struct q2t_sess *q2t_find_sess_by_s_id_le(struct q2t_tgt *tgt,
235 struct q2t_sess *sess;
236 sBUG_ON(tgt == NULL);
237 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
238 if ((sess->s_id.b.al_pa == s_id[0]) &&
239 (sess->s_id.b.area == s_id[1]) &&
240 (sess->s_id.b.domain == s_id[2]))
246 /* ha->hardware_lock supposed to be held on entry */
247 static inline void q2t_exec_queue(scsi_qla_host_t *ha)
252 /* Might release hw lock, then reaquire!! */
253 static inline int q2t_issue_marker(scsi_qla_host_t *ha, int ha_locked)
255 /* Send marker if required */
256 if (unlikely(ha->marker_needed != 0)) {
257 int rc = qla2x00_issue_marker(ha, ha_locked);
258 if (rc != QLA_SUCCESS) {
259 PRINT_ERROR("qla2x00tgt(%ld): issue_marker() "
260 "failed", ha->instance);
268 * Registers with initiator driver (but target mode isn't enabled till
269 * it's turned on via sysfs)
271 static int q2t_target_detect(struct scst_tgt_template *tgtt)
274 struct qla_tgt_data t = {
275 .magic = QLA2X_TARGET_MAGIC,
276 .tgt24_atio_pkt = q24_atio_pkt,
277 .tgt_response_pkt = q2t_response_pkt,
278 .tgt2x_ctio_completion = q2x_ctio_completion,
279 .tgt_async_event = q2t_async_event,
280 .tgt_host_action = q2t_host_action,
281 .tgt_fc_port_added = q2t_fc_port_added,
282 .tgt_fc_port_deleted = q2t_fc_port_deleted,
287 res = qla2xxx_tgt_register_driver(&t);
289 PRINT_ERROR("Unable to register driver: %d", res);
293 if (res != QLA2X_INITIATOR_MAGIC) {
294 PRINT_ERROR("Wrong version of the initiator part: %d",
300 qla2xxx_add_targets();
302 PRINT_INFO("%s", "Target mode driver for QLogic 2x00 controller "
303 "registered successfully");
310 static void q2t_free_session_done(struct scst_session *scst_sess)
312 struct q2t_sess *sess;
319 sBUG_ON(scst_sess == NULL);
320 sess = (struct q2t_sess *)scst_sess_get_tgt_priv(scst_sess);
321 sBUG_ON(sess == NULL);
324 TRACE_MGMT_DBG("Unregistration of sess %p finished", sess);
331 TRACE_DBG("empty(sess_list) %d sess_count %d",
332 list_empty(&tgt->sess_list), tgt->sess_count);
337 * We need to protect against race, when tgt is freed before or
340 spin_lock_irqsave(&ha->hardware_lock, flags);
342 if (tgt->sess_count == 0)
343 wake_up_all(&tgt->waitQ);
344 spin_unlock_irqrestore(&ha->hardware_lock, flags);
351 /* ha->hardware_lock supposed to be held on entry */
352 static int q2t_unreg_sess(struct q2t_sess *sess)
358 sBUG_ON(sess == NULL);
359 sBUG_ON(sess->sess_ref != 0);
361 TRACE_MGMT_DBG("Deleting sess %p from tgt %p", sess, sess->tgt);
362 list_del(&sess->sess_list_entry);
365 list_del(&sess->del_list_entry);
367 PRINT_INFO("qla2x00tgt(%ld): %ssession for loop_id %d deleted",
368 sess->tgt->ha->instance, sess->local ? "local " : "",
371 scst_unregister_session(sess->scst_sess, 0, q2t_free_session_done);
377 /* ha->hardware_lock supposed to be held on entry */
378 static int q2t_reset(scsi_qla_host_t *ha, void *iocb, int mcmd)
380 struct q2t_sess *sess;
387 if (IS_FWI2_CAPABLE(ha)) {
388 notify24xx_entry_t *n = (notify24xx_entry_t *)iocb;
389 loop_id = le16_to_cpu(n->nport_handle);
391 loop_id = GET_TARGET_ID(ha, (notify_entry_t *)iocb);
393 if (loop_id == 0xFFFF) {
395 q2t_clear_tgt_db(ha->tgt, 1);
396 if (!list_empty(&ha->tgt->sess_list)) {
397 sess = list_entry(ha->tgt->sess_list.next,
398 typeof(*sess), sess_list_entry);
400 case Q2T_NEXUS_LOSS_SESS:
401 mcmd = Q2T_NEXUS_LOSS;
404 case Q2T_ABORT_ALL_SESS:
405 mcmd = Q2T_ABORT_ALL;
413 PRINT_ERROR("qla2x00tgt(%ld): Not allowed "
414 "command %x in %s", ha->instance,
422 sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
426 ha->tgt->tm_to_unknown = 1;
430 TRACE_MGMT_DBG("scsi(%ld): resetting (session %p, "
431 "mcmd %x, loop_id %d)", ha->host_no, sess, mcmd, loop_id);
433 res = q2t_issue_task_mgmt(sess, (uint8_t *)&lun, sizeof(lun),
434 mcmd, iocb, Q24_MGMT_SEND_NACK);
441 /* ha->hardware_lock supposed to be held on entry */
442 static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only)
444 struct q2t_sess *sess, *sess_tmp;
448 TRACE(TRACE_MGMT, "Clearing targets DB %p", tgt);
450 list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list,
452 if (local_only && !sess->local)
454 if (local_only && sess->local)
455 TRACE_MGMT_DBG("Putting local session %p", sess);
459 /* At this point tgt could be already dead */
461 TRACE_MGMT_DBG("Finished clearing tgt %p DB", tgt);
467 /* Called in a thread context */
468 static void q2t_alloc_session_done(struct scst_session *scst_sess,
469 void *data, int result)
474 struct q2t_sess *sess = (struct q2t_sess *)data;
475 struct q2t_tgt *tgt = sess->tgt;
476 scsi_qla_host_t *ha = tgt->ha;
479 PRINT_INFO("qla2x00tgt(%ld): Session initialization failed",
482 spin_lock_irqsave(&ha->hardware_lock, flags);
484 spin_unlock_irqrestore(&ha->hardware_lock, flags);
491 static void q2t_del_sess_timer_fn(unsigned long arg)
493 struct q2t_tgt *tgt = (struct q2t_tgt *)arg;
494 scsi_qla_host_t *ha = tgt->ha;
495 struct q2t_sess *sess;
500 spin_lock_irqsave(&ha->hardware_lock, flags);
501 while (!list_empty(&tgt->del_sess_list)) {
502 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
504 if (time_after_eq(jiffies, sess->expires)) {
506 * sess will be deleted from del_sess_list in
509 TRACE_MGMT_DBG("Timeout: sess %p about to be deleted",
513 tgt->sess_del_timer.expires = sess->expires;
514 add_timer(&tgt->sess_del_timer);
518 spin_unlock_irqrestore(&ha->hardware_lock, flags);
525 * Must be called under tgt_mutex.
527 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
528 * Caller must put it.
530 static struct q2t_sess *q2t_create_sess(scsi_qla_host_t *ha, fc_port_t *fcport,
534 const int wwn_str_len = 3*WWN_SIZE+2;
535 struct q2t_tgt *tgt = ha->tgt;
536 struct q2t_sess *sess;
540 /* Check to avoid double sessions */
541 spin_lock_irq(&ha->hardware_lock);
542 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
543 if ((sess->port_name[0] == fcport->port_name[0]) &&
544 (sess->port_name[1] == fcport->port_name[1]) &&
545 (sess->port_name[2] == fcport->port_name[2]) &&
546 (sess->port_name[3] == fcport->port_name[3]) &&
547 (sess->port_name[4] == fcport->port_name[4]) &&
548 (sess->port_name[5] == fcport->port_name[5]) &&
549 (sess->port_name[6] == fcport->port_name[6]) &&
550 (sess->port_name[7] == fcport->port_name[7])) {
551 TRACE_MGMT_DBG("Double sess %p found (s_id %x:%x:%x, "
552 "loop_id %d), updating to d_id %x:%x:%x, "
553 "loop_id %d", sess, sess->s_id.b.al_pa,
554 sess->s_id.b.area, sess->s_id.b.domain,
555 sess->loop_id, fcport->d_id.b.al_pa,
556 fcport->d_id.b.area, fcport->d_id.b.domain,
560 list_del(&sess->del_list_entry);
564 sess->s_id = fcport->d_id;
565 sess->loop_id = fcport->loop_id;
566 sess->conf_compl_supported = fcport->conf_compl_supported;
567 if (sess->local && !local)
569 spin_unlock_irq(&ha->hardware_lock);
573 spin_unlock_irq(&ha->hardware_lock);
575 /* We are under tgt_mutex, so a new sess can't be added behind us */
577 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
579 PRINT_ERROR("qla2x00tgt(%ld): session allocation failed, "
580 "all commands from port %02x:%02x:%02x:%02x:"
581 "%02x:%02x:%02x:%02x will be refused", ha->instance,
582 fcport->port_name[0], fcport->port_name[1],
583 fcport->port_name[2], fcport->port_name[3],
584 fcport->port_name[4], fcport->port_name[5],
585 fcport->port_name[6], fcport->port_name[7]);
589 sess->sess_ref = 2; /* plus 1 extra ref, see above */
591 sess->s_id = fcport->d_id;
592 sess->loop_id = fcport->loop_id;
593 sess->conf_compl_supported = fcport->conf_compl_supported;
595 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
596 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
598 wwn_str = kmalloc(wwn_str_len, GFP_KERNEL);
599 if (wwn_str == NULL) {
600 PRINT_ERROR("qla2x00tgt(%ld): Allocation of wwn_str failed. "
601 "All commands from port %02x:%02x:%02x:%02x:%02x:%02x:"
602 "%02x:%02x will be refused", ha->instance,
603 fcport->port_name[0], fcport->port_name[1],
604 fcport->port_name[2], fcport->port_name[3],
605 fcport->port_name[4], fcport->port_name[5],
606 fcport->port_name[6], fcport->port_name[7]);
610 sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
611 fcport->port_name[0], fcport->port_name[1],
612 fcport->port_name[2], fcport->port_name[3],
613 fcport->port_name[4], fcport->port_name[5],
614 fcport->port_name[6], fcport->port_name[7]);
616 /* Let's do the session creation async'ly */
617 sess->scst_sess = scst_register_session(tgt->scst_tgt, 1, wwn_str,
618 sess, q2t_alloc_session_done);
620 if (sess->scst_sess == NULL) {
621 PRINT_CRIT_ERROR("qla2x00tgt(%ld): scst_register_session() "
622 "failed for host %ld (wwn %s, loop_id %d), all "
623 "commands from it will be refused", ha->instance,
624 ha->host_no, wwn_str, fcport->loop_id);
625 goto out_free_sess_wwn;
627 scst_sess_set_tgt_priv(sess->scst_sess, sess);
629 spin_lock_irq(&ha->hardware_lock);
630 TRACE_MGMT_DBG("Adding sess %p to tgt %p", sess, tgt);
631 list_add_tail(&sess->sess_list_entry, &tgt->sess_list);
633 spin_unlock_irq(&ha->hardware_lock);
635 PRINT_INFO("qla2x00tgt(%ld): %ssession for wwn %s (loop_id %d, "
636 "s_id %x:%x:%x, confirmed completion %ssupported) added",
637 ha->instance, local ? "local " : "", wwn_str, fcport->loop_id,
638 sess->s_id.b.al_pa, sess->s_id.b.area, sess->s_id.b.domain,
639 sess->conf_compl_supported ? "" : "not ");
644 TRACE_EXIT_HRES(sess);
657 static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport)
660 struct q2t_sess *sess;
664 mutex_lock(&ha->tgt_mutex);
668 if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
674 spin_lock_irq(&ha->hardware_lock);
676 sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
678 spin_unlock_irq(&ha->hardware_lock);
679 sess = q2t_create_sess(ha, fcport, false);
680 spin_lock_irq(&ha->hardware_lock);
682 q2t_sess_put(sess); /* put the extra creation ref */
685 list_del(&sess->del_list_entry);
688 PRINT_INFO("qla2x00tgt(%ld): session for port %02x:"
689 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
690 "reappeared", ha->instance, fcport->port_name[0],
691 fcport->port_name[1], fcport->port_name[2],
692 fcport->port_name[3], fcport->port_name[4],
693 fcport->port_name[5], fcport->port_name[6],
694 fcport->port_name[7], sess->loop_id);
695 TRACE_MGMT_DBG("Appeared sess %p", sess);
696 } else if (sess->local) {
697 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): local session for "
698 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
699 "(loop ID %d) became global", ha->instance,
700 fcport->port_name[0], fcport->port_name[1],
701 fcport->port_name[2], fcport->port_name[3],
702 fcport->port_name[4], fcport->port_name[5],
703 fcport->port_name[6], fcport->port_name[7],
709 spin_unlock_irq(&ha->hardware_lock);
712 mutex_unlock(&ha->tgt_mutex);
718 static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport)
721 struct q2t_sess *sess;
722 uint32_t dev_loss_tmo;
726 mutex_lock(&ha->tgt_mutex);
730 if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
733 dev_loss_tmo = ha->port_down_retry_count + 5;
738 spin_lock_irq(&ha->hardware_lock);
740 sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
744 if (!sess->deleted) {
747 add_tmr = list_empty(&tgt->del_sess_list);
749 TRACE_MGMT_DBG("Scheduling sess %p to deletion", sess);
750 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
753 PRINT_INFO("qla2x00tgt(%ld): %ssession for port %02x:%02x:%02x:"
754 "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
755 "deletion in %d secs", ha->instance,
756 sess->local ? "local " : "",
757 fcport->port_name[0], fcport->port_name[1],
758 fcport->port_name[2], fcport->port_name[3],
759 fcport->port_name[4], fcport->port_name[5],
760 fcport->port_name[6], fcport->port_name[7],
761 sess->loop_id, dev_loss_tmo);
763 sess->expires = jiffies + dev_loss_tmo * HZ;
765 mod_timer(&tgt->sess_del_timer, sess->expires);
769 spin_unlock_irq(&ha->hardware_lock);
772 mutex_unlock(&ha->tgt_mutex);
778 static inline int test_tgt_sess_count(struct q2t_tgt *tgt)
784 * We need to protect against race, when tgt is freed before or
787 spin_lock_irqsave(&tgt->ha->hardware_lock, flags);
788 TRACE_DBG("tgt %p, empty(sess_list)=%d sess_count=%d",
789 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
790 res = (tgt->sess_count == 0);
791 spin_unlock_irqrestore(&tgt->ha->hardware_lock, flags);
796 /* Must be called under tgt_host_action_mutex */
797 static void q2t_target_stop(struct scst_tgt *scst_tgt)
799 struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
800 scsi_qla_host_t *ha = tgt->ha;
804 TRACE_DBG("Stopping target for host %ld(%p)", ha->host_no, ha);
807 * Mutex needed to sync with q2t_fc_port_[added,deleted].
808 * Lock is needed, because we still can get an incoming packet.
811 mutex_lock(&ha->tgt_mutex);
812 spin_lock_irq(&ha->hardware_lock);
814 q2t_clear_tgt_db(tgt, false);
815 spin_unlock_irq(&ha->hardware_lock);
816 mutex_unlock(&ha->tgt_mutex);
818 del_timer_sync(&tgt->sess_del_timer);
820 TRACE_MGMT_DBG("Waiting for sess works (tgt %p)", tgt);
821 spin_lock_irq(&tgt->sess_work_lock);
822 while (!list_empty(&tgt->sess_works_list)) {
823 spin_unlock_irq(&tgt->sess_work_lock);
824 flush_scheduled_work();
825 spin_lock_irq(&tgt->sess_work_lock);
827 spin_unlock_irq(&tgt->sess_work_lock);
829 TRACE_MGMT_DBG("Waiting for tgt %p: list_empty(sess_list)=%d "
830 "sess_count=%d", tgt, list_empty(&tgt->sess_list),
833 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
836 if (!ha->host_shutting_down && qla_tgt_mode_enabled(ha))
837 qla2x00_disable_tgt_mode(ha);
839 /* Wait for sessions to clear out (just in case) */
840 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
842 TRACE_MGMT_DBG("Waiting for %d IRQ commands to complete (tgt %p)",
843 tgt->irq_cmd_count, tgt);
845 mutex_lock(&ha->tgt_mutex);
846 spin_lock_irq(&ha->hardware_lock);
847 while (tgt->irq_cmd_count != 0) {
848 spin_unlock_irq(&ha->hardware_lock);
850 spin_lock_irq(&ha->hardware_lock);
853 spin_unlock_irq(&ha->hardware_lock);
854 mutex_unlock(&ha->tgt_mutex);
856 TRACE_MGMT_DBG("Stop of tgt %p finished", tgt);
862 /* Must be called under tgt_host_action_mutex */
863 static int q2t_target_release(struct scst_tgt *scst_tgt)
865 struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
866 scsi_qla_host_t *ha = tgt->ha;
870 q2t_target_stop(scst_tgt);
873 scst_tgt_set_tgt_priv(scst_tgt, NULL);
875 TRACE_MGMT_DBG("Release of tgt %p finished", tgt);
884 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
886 static void q2x_modify_command_count(scsi_qla_host_t *ha, int cmd_count,
889 modify_lun_entry_t *pkt;
893 TRACE_DBG("Sending MODIFY_LUN (ha=%p, cmd=%d, imm=%d)",
894 ha, cmd_count, imm_count);
896 /* Sending marker isn't necessary, since we called from ISR */
898 pkt = (modify_lun_entry_t *)qla2x00_req_pkt(ha);
900 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
901 "request packet", ha->instance, __func__);
905 ha->tgt->modify_lun_expected++;
907 pkt->entry_type = MODIFY_LUN_TYPE;
908 pkt->entry_count = 1;
910 pkt->operators = MODIFY_LUN_CMD_SUB; /* Subtract from command count */
911 pkt->command_count = -cmd_count;
912 } else if (cmd_count > 0) {
913 pkt->operators = MODIFY_LUN_CMD_ADD; /* Add to command count */
914 pkt->command_count = cmd_count;
918 pkt->operators |= MODIFY_LUN_IMM_SUB;
919 pkt->immed_notify_count = -imm_count;
920 } else if (imm_count > 0) {
921 pkt->operators |= MODIFY_LUN_IMM_ADD;
922 pkt->immed_notify_count = imm_count;
925 pkt->timeout = 0; /* Use default */
927 TRACE_BUFFER("MODIFY LUN packet data", pkt, REQUEST_ENTRY_SIZE);
937 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
939 static void q2x_send_notify_ack(scsi_qla_host_t *ha, notify_entry_t *iocb,
940 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
941 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
947 TRACE_DBG("Sending NOTIFY_ACK (ha=%p)", ha);
949 /* Send marker if required */
950 if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
953 ntfy = (nack_entry_t *)qla2x00_req_pkt(ha);
955 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
956 "request packet", ha->instance, __func__);
961 ha->tgt->notify_ack_expected++;
963 ntfy->entry_type = NOTIFY_ACK_TYPE;
964 ntfy->entry_count = 1;
965 SET_TARGET_ID(ha, ntfy->target, GET_TARGET_ID(ha, iocb));
966 ntfy->status = iocb->status;
967 ntfy->task_flags = iocb->task_flags;
968 ntfy->seq_id = iocb->seq_id;
969 /* Do not increment here, the chip isn't decrementing */
970 /* ntfy->flags = __constant_cpu_to_le16(NOTIFY_ACK_RES_COUNT); */
971 ntfy->flags |= cpu_to_le16(add_flags);
972 ntfy->srr_rx_id = iocb->srr_rx_id;
973 ntfy->srr_rel_offs = iocb->srr_rel_offs;
974 ntfy->srr_ui = iocb->srr_ui;
975 ntfy->srr_flags = cpu_to_le16(srr_flags);
976 ntfy->srr_reject_code = cpu_to_le16(srr_reject_code);
977 ntfy->srr_reject_code_expl = srr_explan;
978 ntfy->ox_id = iocb->ox_id;
980 if (resp_code_valid) {
981 ntfy->resp_code = cpu_to_le16(resp_code);
982 ntfy->flags |= __constant_cpu_to_le16(
983 NOTIFY_ACK_TM_RESP_CODE_VALID);
986 TRACE(TRACE_SCSI, "Sending Notify Ack Seq %#x -> I %#x St %#x RC %#x",
987 le16_to_cpu(iocb->seq_id), GET_TARGET_ID(ha, iocb),
988 le16_to_cpu(iocb->status), le16_to_cpu(ntfy->resp_code));
989 TRACE_BUFFER("Notify Ack packet data", ntfy, REQUEST_ENTRY_SIZE);
999 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1001 static void q24_send_abts_resp(scsi_qla_host_t *ha,
1002 const abts24_recv_entry_t *abts, uint32_t status, bool ids_reversed)
1004 abts24_resp_entry_t *resp;
1010 TRACE_DBG("Sending task mgmt ABTS response (ha=%p, atio=%p, "
1011 "status=%x", ha, abts, status);
1013 /* Send marker if required */
1014 if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1017 resp = (abts24_resp_entry_t *)qla2x00_req_pkt(ha);
1019 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1020 "request packet", ha->instance, __func__);
1024 resp->entry_type = ABTS_RESP_24XX;
1025 resp->entry_count = 1;
1026 resp->nport_handle = abts->nport_handle;
1027 resp->sof_type = abts->sof_type;
1028 resp->exchange_address = abts->exchange_address;
1029 resp->fcp_hdr_le = abts->fcp_hdr_le;
1030 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1031 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1032 F_CTL_SEQ_INITIATIVE);
1033 p = (uint8_t *)&f_ctl;
1034 resp->fcp_hdr_le.f_ctl[0] = *p++;
1035 resp->fcp_hdr_le.f_ctl[1] = *p++;
1036 resp->fcp_hdr_le.f_ctl[2] = *p;
1038 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1039 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1040 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1041 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1042 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1043 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1045 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1046 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1047 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1048 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1049 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1050 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1052 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1053 if (status == SCST_MGMT_STATUS_SUCCESS) {
1054 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1055 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1056 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1057 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1058 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1059 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1061 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1062 resp->payload.ba_rjt.reason_code =
1063 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1064 /* Other bytes are zero */
1067 TRACE_BUFFER("ABTS RESP packet data", resp, REQUEST_ENTRY_SIZE);
1069 ha->tgt->abts_resp_expected++;
1079 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1081 static void q24_retry_term_exchange(scsi_qla_host_t *ha,
1082 abts24_resp_fw_entry_t *entry)
1084 ctio7_status1_entry_t *ctio;
1088 TRACE_DBG("Sending retry TERM EXCH CTIO7 (ha=%p)", ha);
1090 /* Send marker if required */
1091 if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1094 ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
1096 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1097 "request packet", ha->instance, __func__);
1102 * We've got on entrance firmware's response on by us generated
1103 * ABTS response. So, in it ID fields are reversed.
1106 ctio->common.entry_type = CTIO_TYPE7;
1107 ctio->common.entry_count = 1;
1108 ctio->common.nport_handle = entry->nport_handle;
1109 ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1110 ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1111 ctio->common.initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1112 ctio->common.initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1113 ctio->common.initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1114 ctio->common.exchange_addr = entry->exchange_addr_to_abort;
1115 ctio->flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1116 ctio->ox_id = entry->fcp_hdr_le.ox_id;
1118 TRACE_BUFFER("CTIO7 retry TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
1122 q24_send_abts_resp(ha, (abts24_recv_entry_t *)entry,
1123 SCST_MGMT_STATUS_SUCCESS, true);
1131 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1133 static void q24_handle_abts(scsi_qla_host_t *ha, abts24_recv_entry_t *abts)
1137 struct q2t_mgmt_cmd *mcmd;
1138 struct q2t_sess *sess;
1142 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1143 PRINT_ERROR("qla2x00tgt(%ld): ABTS: Abort Sequence not "
1144 "supported", ha->instance);
1148 tag = abts->exchange_addr_to_abort;
1150 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1151 TRACE_MGMT_DBG("qla2x00tgt(%ld): ABTS: Unknown Exchange "
1152 "Address received", ha->instance);
1156 TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): task abort (s_id=%x:%x:%x, "
1157 "tag=%d, param=%x)", ha->instance, abts->fcp_hdr_le.s_id[0],
1158 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[2], tag,
1159 le32_to_cpu(abts->fcp_hdr_le.parameter));
1161 sess = q2t_find_sess_by_s_id_le(ha->tgt, abts->fcp_hdr_le.s_id);
1163 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task abort for unexisting "
1164 "session", ha->instance);
1165 ha->tgt->tm_to_unknown = 1;
1169 mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
1171 PRINT_ERROR("%s: Allocation of ABORT cmd failed", __func__);
1174 memset(mcmd, 0, sizeof(*mcmd));
1177 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1179 rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
1182 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_tag() failed: %d",
1192 mempool_free(mcmd, q2t_mgmt_cmd_mempool);
1195 q24_send_abts_resp(ha, abts, SCST_MGMT_STATUS_REJECTED, false);
1200 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1202 static void q24_send_task_mgmt_ctio(scsi_qla_host_t *ha,
1203 struct q2t_mgmt_cmd *mcmd, uint32_t resp_code)
1205 const atio7_entry_t *atio = &mcmd->orig_iocb.atio7;
1206 ctio7_status1_entry_t *ctio;
1210 TRACE_DBG("Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x",
1211 ha, atio, resp_code);
1213 /* Send marker if required */
1214 if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1217 ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
1219 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1220 "request packet", ha->instance, __func__);
1224 ctio->common.entry_type = CTIO_TYPE7;
1225 ctio->common.entry_count = 1;
1226 ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1227 ctio->common.nport_handle = mcmd->sess->loop_id;
1228 ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1229 ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
1230 ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
1231 ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
1232 ctio->common.exchange_addr = atio->exchange_addr;
1233 ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
1234 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
1235 ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
1236 ctio->scsi_status = __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1237 ctio->response_len = __constant_cpu_to_le16(8);
1238 ((uint32_t *)ctio->sense_data)[0] = cpu_to_be32(resp_code);
1240 TRACE_BUFFER("CTIO7 TASK MGMT packet data", ctio, REQUEST_ENTRY_SIZE);
1250 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1252 static void q24_send_notify_ack(scsi_qla_host_t *ha,
1253 notify24xx_entry_t *iocb, uint16_t srr_flags,
1254 uint8_t srr_reject_code, uint8_t srr_explan)
1256 nack24xx_entry_t *nack;
1260 TRACE_DBG("Sending NOTIFY_ACK24 (ha=%p)", ha);
1262 /* Send marker if required */
1263 if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
1266 if (ha->tgt != NULL)
1267 ha->tgt->notify_ack_expected++;
1269 nack = (nack24xx_entry_t *)qla2x00_req_pkt(ha);
1271 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
1272 "request packet", ha->instance, __func__);
1276 nack->entry_type = NOTIFY_ACK_TYPE;
1277 nack->entry_count = 1;
1278 nack->nport_handle = iocb->nport_handle;
1279 if (le16_to_cpu(iocb->status) == IMM_NTFY_ELS) {
1280 nack->flags = iocb->flags &
1281 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1283 nack->srr_rx_id = iocb->srr_rx_id;
1284 nack->status = iocb->status;
1285 nack->status_subcode = iocb->status_subcode;
1286 nack->exchange_address = iocb->exchange_address;
1287 nack->srr_rel_offs = iocb->srr_rel_offs;
1288 nack->srr_ui = iocb->srr_ui;
1289 nack->srr_flags = cpu_to_le16(srr_flags);
1290 nack->srr_reject_code = srr_reject_code;
1291 nack->srr_reject_code_expl = srr_explan;
1292 nack->ox_id = iocb->ox_id;
1294 TRACE(TRACE_SCSI, "Sending 24xx Notify Ack %d", nack->status);
1295 TRACE_BUFFER("24xx Notify Ack packet data", nack, sizeof(*nack));
1304 static uint32_t q2t_convert_to_fc_tm_status(int scst_mstatus)
1308 switch (scst_mstatus) {
1309 case SCST_MGMT_STATUS_SUCCESS:
1310 res = FC_TM_SUCCESS;
1312 case SCST_MGMT_STATUS_TASK_NOT_EXIST:
1313 res = FC_TM_BAD_CMD;
1315 case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
1316 case SCST_MGMT_STATUS_REJECTED:
1319 case SCST_MGMT_STATUS_LUN_NOT_EXIST:
1320 case SCST_MGMT_STATUS_FAILED:
1326 TRACE_EXIT_RES(res);
1331 static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
1333 struct q2t_mgmt_cmd *mcmd;
1334 unsigned long flags;
1335 scsi_qla_host_t *ha;
1339 TRACE_MGMT_DBG("scst_mcmd (%p) status %#x state %#x", scst_mcmd,
1340 scst_mcmd->status, scst_mcmd->state);
1342 mcmd = scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
1343 if (unlikely(mcmd == NULL)) {
1344 PRINT_ERROR("scst_mcmd %p tgt_spec is NULL", mcmd);
1348 ha = mcmd->sess->tgt->ha;
1350 spin_lock_irqsave(&ha->hardware_lock, flags);
1351 if (IS_FWI2_CAPABLE(ha)) {
1352 if (mcmd->flags == Q24_MGMT_SEND_NACK) {
1353 q24_send_notify_ack(ha,
1354 &mcmd->orig_iocb.notify_entry24, 0, 0, 0);
1356 if (scst_mcmd->fn == SCST_ABORT_TASK)
1357 q24_send_abts_resp(ha, &mcmd->orig_iocb.abts,
1358 scst_mgmt_cmd_get_status(scst_mcmd),
1361 q24_send_task_mgmt_ctio(ha, mcmd,
1362 q2t_convert_to_fc_tm_status(
1363 scst_mgmt_cmd_get_status(scst_mcmd)));
1366 uint32_t resp_code = q2t_convert_to_fc_tm_status(
1367 scst_mgmt_cmd_get_status(scst_mcmd));
1368 q2x_send_notify_ack(ha, &mcmd->orig_iocb.notify_entry, 0,
1369 resp_code, 1, 0, 0, 0);
1371 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1373 scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
1374 mempool_free(mcmd, q2t_mgmt_cmd_mempool);
1382 static int q2t_pci_map_calc_cnt(struct q2t_prm *prm)
1386 sBUG_ON(prm->cmd->sg_cnt == 0);
1388 prm->sg = (struct scatterlist *)prm->cmd->sg;
1389 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, prm->cmd->sg,
1390 prm->cmd->sg_cnt, prm->cmd->dma_data_direction);
1391 if (unlikely(prm->seg_cnt == 0))
1394 * If greater than four sg entries then we need to allocate
1395 * the continuation entries
1397 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) {
1398 prm->req_cnt += (uint16_t)(prm->seg_cnt -
1399 prm->tgt->datasegs_per_cmd) /
1400 prm->tgt->datasegs_per_cont;
1401 if (((uint16_t)(prm->seg_cnt - prm->tgt->datasegs_per_cmd)) %
1402 prm->tgt->datasegs_per_cont)
1407 TRACE_DBG("seg_cnt=%d, req_cnt=%d, res=%d", prm->seg_cnt,
1412 PRINT_ERROR("qla2x00tgt(%ld): PCI mapping failed: sg_cnt=%d",
1413 prm->tgt->ha->instance, prm->cmd->sg_cnt);
1418 static int q2t_check_reserve_free_req(scsi_qla_host_t *ha, uint32_t req_cnt)
1420 int res = SCST_TGT_RES_SUCCESS;
1421 device_reg_t __iomem *reg = ha->iobase;
1426 if (ha->req_q_cnt < (req_cnt + 2)) {
1427 if (IS_FWI2_CAPABLE(ha))
1428 cnt = (uint16_t)RD_REG_DWORD(
1429 ®->isp24.req_q_out);
1431 cnt = qla2x00_debounce_register(
1432 ISP_REQ_Q_OUT(ha, ®->isp));
1433 TRACE_DBG("Request ring circled: cnt=%d, "
1434 "ha->req_ring_index=%d, ha->req_q_cnt=%d, req_cnt=%d",
1435 cnt, ha->req_ring_index, ha->req_q_cnt, req_cnt);
1436 if (ha->req_ring_index < cnt)
1437 ha->req_q_cnt = cnt - ha->req_ring_index;
1439 ha->req_q_cnt = ha->request_q_length -
1440 (ha->req_ring_index - cnt);
1443 if (unlikely(ha->req_q_cnt < (req_cnt + 2))) {
1444 TRACE(TRACE_OUT_OF_MEM, "There is no room in the request ring: "
1445 "ha->req_ring_index=%d, ha->req_q_cnt=%d, req_cnt=%d",
1446 ha->req_ring_index, ha->req_q_cnt, req_cnt);
1447 res = SCST_TGT_RES_QUEUE_FULL;
1451 ha->req_q_cnt -= req_cnt;
1454 TRACE_EXIT_RES(res);
1459 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1461 static inline void *q2t_get_req_pkt(scsi_qla_host_t *ha)
1463 /* Adjust ring index. */
1464 ha->req_ring_index++;
1465 if (ha->req_ring_index == ha->request_q_length) {
1466 ha->req_ring_index = 0;
1467 ha->request_ring_ptr = ha->request_ring;
1469 ha->request_ring_ptr++;
1471 return (cont_entry_t *)ha->request_ring_ptr;
1474 /* ha->hardware_lock supposed to be held on entry */
1475 static inline uint32_t q2t_make_handle(scsi_qla_host_t *ha)
1479 h = ha->current_handle;
1480 /* always increment cmd handle */
1483 if (h > MAX_OUTSTANDING_COMMANDS)
1484 h = 1; /* 0 is Q2T_NULL_HANDLE */
1485 if (h == ha->current_handle) {
1486 TRACE(TRACE_OUT_OF_MEM,
1487 "Ran out of empty cmd slots in ha %p", ha);
1488 h = Q2T_NULL_HANDLE;
1491 } while ((h == Q2T_NULL_HANDLE) ||
1492 (h == Q2T_SKIP_HANDLE) ||
1493 (ha->cmds[h-1] != NULL));
1495 if (h != Q2T_NULL_HANDLE)
1496 ha->current_handle = h;
1501 /* ha->hardware_lock supposed to be held on entry */
1502 static void q2x_build_ctio_pkt(struct q2t_prm *prm)
1506 scsi_qla_host_t *ha = prm->tgt->ha;
1508 pkt = (ctio_entry_t *)ha->request_ring_ptr;
1510 memset(pkt, 0, sizeof(*pkt));
1512 if (prm->tgt->tgt_enable_64bit_addr)
1513 pkt->common.entry_type = CTIO_A64_TYPE;
1515 pkt->common.entry_type = CONTINUE_TGT_IO_TYPE;
1517 pkt->common.entry_count = (uint8_t)prm->req_cnt;
1519 h = q2t_make_handle(ha);
1520 if (h != Q2T_NULL_HANDLE)
1521 ha->cmds[h-1] = prm->cmd;
1523 pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
1524 pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1526 /* Set initiator ID */
1527 h = GET_TARGET_ID(ha, &prm->cmd->atio.atio2x);
1528 SET_TARGET_ID(ha, pkt->common.target, h);
1530 pkt->common.rx_id = prm->cmd->atio.atio2x.rx_id;
1531 pkt->common.relative_offset = cpu_to_le32(prm->cmd->offset);
1533 TRACE(TRACE_DEBUG|TRACE_SCSI,
1534 "handle(scst_cmd) -> %08x, timeout %d L %#x -> I %#x E %#x",
1535 pkt->common.handle, Q2T_TIMEOUT,
1536 le16_to_cpu(prm->cmd->atio.atio2x.lun),
1537 GET_TARGET_ID(ha, &pkt->common), pkt->common.rx_id);
1540 /* ha->hardware_lock supposed to be held on entry */
1541 static int q24_build_ctio_pkt(struct q2t_prm *prm)
1544 ctio7_status0_entry_t *pkt;
1545 scsi_qla_host_t *ha = prm->tgt->ha;
1546 atio7_entry_t *atio = &prm->cmd->atio.atio7;
1547 int res = SCST_TGT_RES_SUCCESS;
1551 pkt = (ctio7_status0_entry_t *)ha->request_ring_ptr;
1553 memset(pkt, 0, sizeof(*pkt));
1555 pkt->common.entry_type = CTIO_TYPE7;
1556 pkt->common.entry_count = (uint8_t)prm->req_cnt;
1558 h = q2t_make_handle(ha);
1559 if (unlikely(h == Q2T_NULL_HANDLE)) {
1561 * CTIO type 7 from the firmware doesn't provide a way to
1562 * know the initiator's LOOP ID, hence we can't find
1563 * the session and, so, the command.
1565 res = SCST_TGT_RES_QUEUE_FULL;
1568 ha->cmds[h-1] = prm->cmd;
1570 pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
1571 pkt->common.nport_handle = prm->cmd->loop_id;
1572 pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
1573 pkt->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
1574 pkt->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
1575 pkt->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
1576 pkt->common.exchange_addr = atio->exchange_addr;
1577 pkt->flags |= (atio->attr << 9);
1578 pkt->ox_id = swab16(atio->fcp_hdr.ox_id);
1579 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
1582 TRACE(TRACE_DEBUG|TRACE_SCSI, "handle(scst_cmd) -> %08x, timeout %d "
1583 "ox_id %#x", pkt->common.handle, Q2T_TIMEOUT,
1584 le16_to_cpu(pkt->ox_id));
1585 TRACE_EXIT_RES(res);
1590 * ha->hardware_lock supposed to be held on entry. We have already made sure
1591 * that there is sufficient amount of request entries to not drop it.
1593 static void q2t_load_cont_data_segments(struct q2t_prm *prm)
1596 uint32_t *dword_ptr;
1597 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1601 /* Build continuation packets */
1602 while (prm->seg_cnt > 0) {
1603 cont_a64_entry_t *cont_pkt64 =
1604 (cont_a64_entry_t *)q2t_get_req_pkt(prm->tgt->ha);
1607 * Make sure that from cont_pkt64 none of
1608 * 64-bit specific fields used for 32-bit
1609 * addressing. Cast to (cont_entry_t *) for
1613 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1615 cont_pkt64->entry_count = 1;
1616 cont_pkt64->sys_define = 0;
1618 if (enable_64bit_addressing) {
1619 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1621 (uint32_t *)&cont_pkt64->dseg_0_address;
1623 cont_pkt64->entry_type = CONTINUE_TYPE;
1625 (uint32_t *)&((cont_entry_t *)
1626 cont_pkt64)->dseg_0_address;
1629 /* Load continuation entry data segments */
1631 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1632 cnt++, prm->seg_cnt--) {
1634 cpu_to_le32(pci_dma_lo32
1635 (sg_dma_address(prm->sg)));
1636 if (enable_64bit_addressing) {
1638 cpu_to_le32(pci_dma_hi32
1642 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1644 TRACE_SG("S/G Segment Cont. phys_addr=%llx:%llx, len=%d",
1645 (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
1646 (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
1647 (int)sg_dma_len(prm->sg));
1652 TRACE_BUFFER("Continuation packet data",
1653 cont_pkt64, REQUEST_ENTRY_SIZE);
1661 * ha->hardware_lock supposed to be held on entry. We have already made sure
1662 * that there is sufficient amount of request entries to not drop it.
1664 static void q2x_load_data_segments(struct q2t_prm *prm)
1667 uint32_t *dword_ptr;
1668 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1669 ctio_common_entry_t *pkt = (ctio_common_entry_t *)prm->pkt;
1671 TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
1672 le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
1674 pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
1676 /* Setup packet address segment pointer */
1677 dword_ptr = pkt->dseg_0_address;
1679 if (prm->seg_cnt == 0) {
1680 /* No data transfer */
1684 TRACE_BUFFER("No data, CTIO packet data", pkt,
1685 REQUEST_ENTRY_SIZE);
1689 /* Set total data segment count */
1690 pkt->dseg_count = cpu_to_le16(prm->seg_cnt);
1692 /* If scatter gather */
1693 TRACE_SG("%s", "Building S/G data segments...");
1694 /* Load command entry data segments */
1696 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1697 cnt++, prm->seg_cnt--) {
1699 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1700 if (enable_64bit_addressing) {
1702 cpu_to_le32(pci_dma_hi32
1703 (sg_dma_address(prm->sg)));
1705 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1707 TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
1708 (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
1709 (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
1710 (int)sg_dma_len(prm->sg));
1715 TRACE_BUFFER("Scatter/gather, CTIO packet data", pkt,
1716 REQUEST_ENTRY_SIZE);
1718 q2t_load_cont_data_segments(prm);
1725 * ha->hardware_lock supposed to be held on entry. We have already made sure
1726 * that there is sufficient amount of request entries to not drop it.
1728 static void q24_load_data_segments(struct q2t_prm *prm)
1731 uint32_t *dword_ptr;
1732 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1733 ctio7_status0_entry_t *pkt = (ctio7_status0_entry_t *)prm->pkt;
1735 TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
1736 le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
1738 pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
1740 /* Setup packet address segment pointer */
1741 dword_ptr = pkt->dseg_0_address;
1743 if (prm->seg_cnt == 0) {
1744 /* No data transfer */
1748 TRACE_BUFFER("No data, CTIO7 packet data", pkt,
1749 REQUEST_ENTRY_SIZE);
1753 /* Set total data segment count */
1754 pkt->common.dseg_count = cpu_to_le16(prm->seg_cnt);
1756 /* If scatter gather */
1757 TRACE_SG("%s", "Building S/G data segments...");
1758 /* Load command entry data segments */
1760 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1761 cnt++, prm->seg_cnt--) {
1763 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1764 if (enable_64bit_addressing) {
1766 cpu_to_le32(pci_dma_hi32(
1767 sg_dma_address(prm->sg)));
1769 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1771 TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
1772 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1774 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1776 (int)sg_dma_len(prm->sg));
1781 q2t_load_cont_data_segments(prm);
1787 static inline int q2t_has_data(struct q2t_cmd *cmd)
1789 return cmd->bufflen > 0;
1792 static int q2t_pre_xmit_response(struct q2t_cmd *cmd,
1793 struct q2t_prm *prm, int xmit_type, unsigned long *flags)
1796 struct q2t_tgt *tgt = cmd->tgt;
1797 scsi_qla_host_t *ha = tgt->ha;
1798 uint16_t full_req_cnt;
1799 struct scst_cmd *scst_cmd = cmd->scst_cmd;
1803 if (unlikely(cmd->aborted)) {
1804 TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): terminating exchange "
1805 "for aborted cmd=%p (scst_cmd=%p, tag=%d)",
1806 ha->instance, cmd, scst_cmd, cmd->tag);
1808 cmd->state = Q2T_STATE_ABORTED;
1809 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
1811 if (IS_FWI2_CAPABLE(ha))
1812 q24_send_term_exchange(ha, cmd, &cmd->atio.atio7, 0);
1814 q2x_send_term_exchange(ha, cmd, &cmd->atio.atio2x, 0);
1815 /* !! At this point cmd could be already freed !! */
1816 res = Q2T_PRE_XMIT_RESP_CMD_ABORTED;
1820 TRACE(TRACE_SCSI, "tag=%lld", scst_cmd_get_tag(scst_cmd));
1824 prm->rq_result = scst_cmd_get_status(scst_cmd);
1825 prm->sense_buffer = scst_cmd_get_sense_buffer(scst_cmd);
1826 prm->sense_buffer_len = scst_cmd_get_sense_buffer_len(scst_cmd);
1830 prm->add_status_pkt = 0;
1832 TRACE_DBG("rq_result=%x, xmit_type=%x", prm->rq_result, xmit_type);
1833 if (prm->rq_result != 0)
1834 TRACE_BUFFER("Sense", prm->sense_buffer, prm->sense_buffer_len);
1836 /* Send marker if required */
1837 if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
1838 res = SCST_TGT_RES_FATAL_ERROR;
1842 TRACE_DBG("CTIO start: ha(%d)", (int)ha->instance);
1844 if ((xmit_type & Q2T_XMIT_DATA) && q2t_has_data(cmd)) {
1845 if (q2t_pci_map_calc_cnt(prm) != 0) {
1846 res = SCST_TGT_RES_QUEUE_FULL;
1851 full_req_cnt = prm->req_cnt;
1853 if (xmit_type & Q2T_XMIT_STATUS) {
1854 if (cmd->data_direction & SCST_DATA_READ) {
1856 if (IS_FWI2_CAPABLE(ha))
1857 expected = be32_to_cpu(cmd->
1858 atio.atio7.fcp_cmnd.data_length);
1860 expected = le32_to_cpu(cmd->
1861 atio.atio2x.data_length);
1862 prm->residual = expected -
1863 scst_cmd_get_resp_data_len(scst_cmd);
1864 if (prm->residual > 0) {
1865 TRACE_DBG("Residual underflow: %d (tag %lld, "
1866 "op %x, expected %d, resp_data_len "
1867 "%d, bufflen %d, rq_result %x)",
1868 prm->residual, scst_cmd->tag,
1869 scst_cmd->cdb[0], expected,
1870 scst_cmd_get_resp_data_len(scst_cmd),
1871 cmd->bufflen, prm->rq_result);
1872 prm->rq_result |= SS_RESIDUAL_UNDER;
1873 } else if (prm->residual < 0) {
1874 TRACE_DBG("Residual overflow: %d (tag %lld, "
1875 "op %x, expected %d, resp_data_len "
1876 "%d, bufflen %d, rq_result %x)",
1877 prm->residual, scst_cmd->tag,
1878 scst_cmd->cdb[0], expected,
1879 scst_cmd_get_resp_data_len(scst_cmd),
1880 cmd->bufflen, prm->rq_result);
1881 prm->rq_result |= SS_RESIDUAL_OVER;
1882 prm->residual = -prm->residual;
1887 * If Q2T_XMIT_DATA is not set, add_status_pkt will be ignored
1888 * in *xmit_response() below
1890 if (q2t_has_data(cmd)) {
1891 if (SCST_SENSE_VALID(prm->sense_buffer) ||
1892 (IS_FWI2_CAPABLE(ha) &&
1893 (prm->rq_result != 0))) {
1894 prm->add_status_pkt = 1;
1900 TRACE_DBG("req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d",
1901 prm->req_cnt, full_req_cnt, prm->add_status_pkt);
1903 /* Acquire ring specific lock */
1904 spin_lock_irqsave(&ha->hardware_lock, *flags);
1906 /* Does F/W have an IOCBs for this request */
1907 res = q2t_check_reserve_free_req(ha, full_req_cnt);
1908 if (unlikely(res != SCST_TGT_RES_SUCCESS) &&
1909 (xmit_type & Q2T_XMIT_DATA))
1910 goto out_unlock_free_unmap;
1913 TRACE_EXIT_RES(res);
1916 out_unlock_free_unmap:
1917 if (q2t_has_data(cmd))
1918 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
1919 cmd->dma_data_direction);
1921 /* Release ring specific lock */
1922 spin_unlock_irqrestore(&ha->hardware_lock, *flags);
1926 static inline int q2t_need_explicit_conf(scsi_qla_host_t *ha,
1927 struct q2t_cmd *cmd, int sending_sense)
1929 if (ha->enable_class_2)
1933 return cmd->conf_compl_supported;
1935 return ha->enable_explicit_conf && cmd->conf_compl_supported;
1938 static void q2x_init_ctio_ret_entry(ctio_ret_entry_t *ctio_m1,
1939 struct q2t_prm *prm)
1943 prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len,
1944 (uint32_t)sizeof(ctio_m1->sense_data));
1946 ctio_m1->flags = __constant_cpu_to_le16(OF_SSTS | OF_FAST_POST |
1947 OF_NO_DATA | OF_SS_MODE_1);
1948 ctio_m1->flags |= __constant_cpu_to_le16(OF_INC_RC);
1949 if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1950 ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
1953 ctio_m1->scsi_status = cpu_to_le16(prm->rq_result);
1954 ctio_m1->residual = cpu_to_le32(prm->residual);
1955 if (SCST_SENSE_VALID(prm->sense_buffer)) {
1956 if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1957 ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
1960 ctio_m1->scsi_status |= __constant_cpu_to_le16(
1961 SS_SENSE_LEN_VALID);
1962 ctio_m1->sense_length = cpu_to_le16(prm->sense_buffer_len);
1963 memcpy(ctio_m1->sense_data, prm->sense_buffer,
1964 prm->sense_buffer_len);
1966 memset(ctio_m1->sense_data, 0, sizeof(ctio_m1->sense_data));
1967 ctio_m1->sense_length = 0;
1970 /* Sense with len > 26, is it possible ??? */
1976 static int __q2x_xmit_response(struct q2t_cmd *cmd, int xmit_type)
1979 unsigned long flags;
1980 scsi_qla_host_t *ha;
1982 ctio_common_entry_t *pkt;
1986 memset(&prm, 0, sizeof(prm));
1988 res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
1989 if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
1990 if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
1991 res = SCST_TGT_RES_SUCCESS;
1995 /* Here ha->hardware_lock already locked */
1999 q2x_build_ctio_pkt(&prm);
2000 pkt = (ctio_common_entry_t *)prm.pkt;
2002 if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
2003 pkt->flags |= __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_IN);
2004 pkt->flags |= __constant_cpu_to_le16(OF_INC_RC);
2006 q2x_load_data_segments(&prm);
2008 if (prm.add_status_pkt == 0) {
2009 if (xmit_type & Q2T_XMIT_STATUS) {
2010 pkt->scsi_status = cpu_to_le16(prm.rq_result);
2011 pkt->residual = cpu_to_le32(prm.residual);
2012 pkt->flags |= __constant_cpu_to_le16(OF_SSTS);
2013 if (q2t_need_explicit_conf(ha, cmd, 0)) {
2014 pkt->flags |= __constant_cpu_to_le16(
2021 * We have already made sure that there is sufficient
2022 * amount of request entries to not drop HW lock in
2025 ctio_ret_entry_t *ctio_m1 =
2026 (ctio_ret_entry_t *)q2t_get_req_pkt(ha);
2028 TRACE_DBG("%s", "Building additional status packet");
2030 memcpy(ctio_m1, pkt, sizeof(*ctio_m1));
2031 ctio_m1->entry_count = 1;
2032 ctio_m1->dseg_count = 0;
2034 /* Real finish is ctio_m1's finish */
2035 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2036 pkt->flags &= ~__constant_cpu_to_le16(OF_INC_RC);
2038 q2x_init_ctio_ret_entry(ctio_m1, &prm);
2039 TRACE_BUFFER("Status CTIO packet data", ctio_m1,
2040 REQUEST_ENTRY_SIZE);
2043 q2x_init_ctio_ret_entry((ctio_ret_entry_t *)pkt, &prm);
2045 cmd->state = Q2T_STATE_PROCESSED; /* Mid-level is done processing */
2047 TRACE_BUFFER("Xmitting", pkt, REQUEST_ENTRY_SIZE);
2051 /* Release ring specific lock */
2052 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2055 TRACE_EXIT_RES(res);
2059 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
2060 static void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type)
2062 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
2063 if ((*xmit_type & Q2T_XMIT_STATUS) && (scst_random() % 200) == 50) {
2064 *xmit_type &= ~Q2T_XMIT_STATUS;
2065 TRACE_MGMT_DBG("Dropping cmd %p (tag %d) status", cmd,
2070 if (q2t_has_data(cmd) && (cmd->sg_cnt > 1) &&
2071 ((scst_random() % 100) == 20)) {
2073 unsigned int tot_len = 0;
2076 leave = scst_random() % cmd->sg_cnt;
2078 for (i = 0; i < leave; i++)
2079 tot_len += cmd->sg[i].length;
2081 TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer tail to len %d, "
2082 "sg_cnt %d (cmd->bufflen %d, cmd->sg_cnt %d)", cmd,
2083 cmd->tag, tot_len, leave, cmd->bufflen, cmd->sg_cnt);
2085 cmd->bufflen = tot_len;
2086 cmd->sg_cnt = leave;
2089 if (q2t_has_data(cmd) && ((scst_random() % 100) == 70)) {
2090 unsigned int offset = scst_random() % cmd->bufflen;
2092 TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer head "
2093 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag,
2094 offset, cmd->bufflen);
2096 *xmit_type &= ~Q2T_XMIT_DATA;
2097 else if (q2t_cut_cmd_data_head(cmd, offset)) {
2098 TRACE_MGMT_DBG("q2t_cut_cmd_data_head() failed (tag %d)",
2104 static inline void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type) {}
2107 static int q2x_xmit_response(struct scst_cmd *scst_cmd)
2109 int xmit_type = Q2T_XMIT_DATA, res;
2110 int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2111 struct q2t_cmd *cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2113 #ifdef CONFIG_SCST_EXTRACHECKS
2114 sBUG_ON(!q2t_has_data(cmd) && !is_send_status);
2117 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2118 if (scst_cmd_atomic(scst_cmd))
2119 return SCST_TGT_RES_NEED_THREAD_CTX;
2123 xmit_type |= Q2T_XMIT_STATUS;
2125 cmd->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2126 cmd->sg = scst_cmd_get_sg(scst_cmd);
2127 cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2128 cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
2129 cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
2130 cmd->offset = scst_cmd_get_ppl_offset(scst_cmd);
2131 cmd->aborted = scst_cmd_aborted(scst_cmd);
2133 q2t_check_srr_debug(cmd, &xmit_type);
2135 TRACE_DBG("is_send_status=%x, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2136 "cmd->data_direction=%d", is_send_status, cmd->bufflen,
2137 cmd->sg_cnt, cmd->data_direction);
2139 if (IS_FWI2_CAPABLE(cmd->tgt->ha))
2140 res = __q24_xmit_response(cmd, xmit_type);
2142 res = __q2x_xmit_response(cmd, xmit_type);
2147 static void q24_init_ctio_ret_entry(ctio7_status0_entry_t *ctio,
2148 struct q2t_prm *prm)
2150 ctio7_status1_entry_t *ctio1;
2154 prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len,
2155 (uint32_t)sizeof(ctio1->sense_data));
2156 ctio->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2157 if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2158 ctio->flags |= __constant_cpu_to_le16(
2159 CTIO7_FLAGS_EXPLICIT_CONFORM |
2160 CTIO7_FLAGS_CONFORM_REQ);
2162 ctio->residual = cpu_to_le32(prm->residual);
2163 ctio->scsi_status = cpu_to_le16(prm->rq_result);
2164 if (SCST_SENSE_VALID(prm->sense_buffer)) {
2166 ctio1 = (ctio7_status1_entry_t *)ctio;
2167 if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2168 ctio1->flags |= __constant_cpu_to_le16(
2169 CTIO7_FLAGS_EXPLICIT_CONFORM |
2170 CTIO7_FLAGS_CONFORM_REQ);
2172 ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2173 ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2174 ctio1->scsi_status |= __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
2175 ctio1->sense_length = cpu_to_le16(prm->sense_buffer_len);
2176 for (i = 0; i < prm->sense_buffer_len/4; i++)
2177 ((uint32_t *)ctio1->sense_data)[i] =
2178 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2180 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
2183 PRINT_INFO("qla2x00tgt(%ld): %d bytes of sense "
2184 "lost", prm->tgt->ha->instance,
2185 prm->sense_buffer_len % 4);
2191 ctio1 = (ctio7_status1_entry_t *)ctio;
2192 ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2193 ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2194 ctio1->sense_length = 0;
2195 memset(ctio1->sense_data, 0, sizeof(ctio1->sense_data));
2198 /* Sense with len > 24, is it possible ??? */
2204 static int __q24_xmit_response(struct q2t_cmd *cmd, int xmit_type)
2207 unsigned long flags;
2208 scsi_qla_host_t *ha;
2210 ctio7_status0_entry_t *pkt;
2214 memset(&prm, 0, sizeof(prm));
2216 res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
2217 if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
2218 if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
2219 res = SCST_TGT_RES_SUCCESS;
2223 /* Here ha->hardware_lock already locked */
2227 res = q24_build_ctio_pkt(&prm);
2228 if (unlikely(res != SCST_TGT_RES_SUCCESS))
2229 goto out_unmap_unlock;
2231 pkt = (ctio7_status0_entry_t *)prm.pkt;
2233 if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
2234 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2235 CTIO7_FLAGS_STATUS_MODE_0);
2237 q24_load_data_segments(&prm);
2239 if (prm.add_status_pkt == 0) {
2240 if (xmit_type & Q2T_XMIT_STATUS) {
2241 pkt->scsi_status = cpu_to_le16(prm.rq_result);
2242 pkt->residual = cpu_to_le32(prm.residual);
2243 pkt->flags |= __constant_cpu_to_le16(
2244 CTIO7_FLAGS_SEND_STATUS);
2245 if (q2t_need_explicit_conf(ha, cmd, 0)) {
2246 pkt->flags |= __constant_cpu_to_le16(
2247 CTIO7_FLAGS_EXPLICIT_CONFORM |
2248 CTIO7_FLAGS_CONFORM_REQ);
2253 * We have already made sure that there is sufficient
2254 * amount of request entries to not drop HW lock in
2257 ctio7_status1_entry_t *ctio =
2258 (ctio7_status1_entry_t *)q2t_get_req_pkt(ha);
2260 TRACE_DBG("%s", "Building additional status packet");
2262 memcpy(ctio, pkt, sizeof(*ctio));
2263 ctio->common.entry_count = 1;
2264 ctio->common.dseg_count = 0;
2265 ctio->flags &= ~__constant_cpu_to_le16(
2266 CTIO7_FLAGS_DATA_IN);
2268 /* Real finish is ctio_m1's finish */
2269 pkt->common.handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2270 pkt->flags |= __constant_cpu_to_le16(
2271 CTIO7_FLAGS_DONT_RET_CTIO);
2272 q24_init_ctio_ret_entry((ctio7_status0_entry_t *)ctio,
2274 TRACE_BUFFER("Status CTIO7", ctio, REQUEST_ENTRY_SIZE);
2277 q24_init_ctio_ret_entry(pkt, &prm);
2279 cmd->state = Q2T_STATE_PROCESSED; /* Mid-level is done processing */
2281 TRACE_BUFFER("Xmitting CTIO7", pkt, REQUEST_ENTRY_SIZE);
2286 /* Release ring specific lock */
2287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2290 TRACE_EXIT_RES(res);
2294 if (q2t_has_data(cmd))
2295 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2296 cmd->dma_data_direction);
2300 static int __q2t_rdy_to_xfer(struct q2t_cmd *cmd)
2302 int res = SCST_TGT_RES_SUCCESS;
2303 unsigned long flags;
2304 scsi_qla_host_t *ha;
2305 struct q2t_tgt *tgt = cmd->tgt;
2311 memset(&prm, 0, sizeof(prm));
2318 /* Send marker if required */
2319 if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
2320 res = SCST_TGT_RES_FATAL_ERROR;
2324 TRACE_DBG("CTIO_start: ha(%d)", (int)ha->instance);
2326 /* Calculate number of entries and segments required */
2327 if (q2t_pci_map_calc_cnt(&prm) != 0) {
2328 res = SCST_TGT_RES_QUEUE_FULL;
2332 /* Acquire ring specific lock */
2333 spin_lock_irqsave(&ha->hardware_lock, flags);
2335 /* Does F/W have an IOCBs for this request */
2336 res = q2t_check_reserve_free_req(ha, prm.req_cnt);
2337 if (res != SCST_TGT_RES_SUCCESS)
2338 goto out_unlock_free_unmap;
2340 if (IS_FWI2_CAPABLE(ha)) {
2341 ctio7_status0_entry_t *pkt;
2342 res = q24_build_ctio_pkt(&prm);
2343 if (unlikely(res != SCST_TGT_RES_SUCCESS))
2344 goto out_unlock_free_unmap;
2345 pkt = (ctio7_status0_entry_t *)prm.pkt;
2346 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2347 CTIO7_FLAGS_STATUS_MODE_0);
2348 q24_load_data_segments(&prm);
2351 ctio_common_entry_t *pkt;
2352 q2x_build_ctio_pkt(&prm);
2353 pkt = (ctio_common_entry_t *)prm.pkt;
2354 pkt->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_OUT);
2355 q2x_load_data_segments(&prm);
2359 cmd->state = Q2T_STATE_NEED_DATA;
2361 TRACE_BUFFER("Xfering", p, REQUEST_ENTRY_SIZE);
2366 /* Release ring specific lock */
2367 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2370 TRACE_EXIT_RES(res);
2373 out_unlock_free_unmap:
2374 if (q2t_has_data(cmd)) {
2375 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2376 cmd->dma_data_direction);
2381 static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd)
2384 struct q2t_cmd *cmd;
2388 TRACE(TRACE_SCSI, "tag=%lld", scst_cmd_get_tag(scst_cmd));
2390 cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2391 cmd->bufflen = scst_cmd_get_bufflen(scst_cmd);
2392 cmd->sg = scst_cmd_get_sg(scst_cmd);
2393 cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2394 cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
2395 cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
2397 res = __q2t_rdy_to_xfer(cmd);
2403 /* If hardware_lock held on entry, might drop it, then reaquire */
2404 static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2405 atio_entry_t *atio, int ha_locked)
2407 ctio_ret_entry_t *ctio;
2408 unsigned long flags = 0; /* to stop compiler's warning */
2409 int do_tgt_cmd_done = 0;
2413 TRACE_DBG("Sending TERM EXCH CTIO (ha=%p)", ha);
2415 /* Send marker if required */
2416 if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
2420 spin_lock_irqsave(&ha->hardware_lock, flags);
2422 ctio = (ctio_ret_entry_t *)qla2x00_req_pkt(ha);
2424 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
2425 "request packet", ha->instance, __func__);
2429 ctio->entry_type = CTIO_RET_TYPE;
2430 ctio->entry_count = 1;
2432 if (cmd->state < Q2T_STATE_PROCESSED) {
2433 PRINT_ERROR("qla2x00tgt(%ld): Terminating cmd %p with "
2434 "incorrect state %d", ha->instance, cmd,
2437 do_tgt_cmd_done = 1;
2439 ctio->handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2442 SET_TARGET_ID(ha, ctio->target, GET_TARGET_ID(ha, atio));
2443 ctio->rx_id = atio->rx_id;
2445 /* Most likely, it isn't needed */
2446 ctio->residual = atio->data_length;
2447 if (ctio->residual != 0)
2448 ctio->scsi_status |= SS_RESIDUAL_UNDER;
2450 ctio->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_TERM_EXCH |
2451 OF_NO_DATA | OF_SS_MODE_1);
2452 ctio->flags |= __constant_cpu_to_le16(OF_INC_RC);
2454 TRACE_BUFFER("CTIO TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
2460 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2462 if (do_tgt_cmd_done) {
2463 if (!ha_locked && !in_interrupt()) {
2464 msleep(250); /* just in case */
2465 scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
2467 scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
2468 /* !! At this point cmd could be already freed !! */
2476 /* If hardware_lock held on entry, might drop it, then reaquire */
2477 static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2478 atio7_entry_t *atio, int ha_locked)
2480 ctio7_status1_entry_t *ctio;
2481 unsigned long flags = 0; /* to stop compiler's warning */
2482 int do_tgt_cmd_done = 0;
2486 TRACE_DBG("Sending TERM EXCH CTIO7 (ha=%p)", ha);
2488 /* Send marker if required */
2489 if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
2493 spin_lock_irqsave(&ha->hardware_lock, flags);
2495 ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
2497 PRINT_ERROR("qla2x00tgt(%ld): %s failed: unable to allocate "
2498 "request packet", ha->instance, __func__);
2502 ctio->common.entry_type = CTIO_TYPE7;
2503 ctio->common.entry_count = 1;
2505 ctio->common.nport_handle = cmd->loop_id;
2506 if (cmd->state < Q2T_STATE_PROCESSED) {
2507 PRINT_ERROR("qla2x00tgt(%ld): Terminating cmd %p with "
2508 "incorrect state %d", ha->instance, cmd,
2511 do_tgt_cmd_done = 1;
2513 ctio->common.nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
2514 ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2515 ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
2516 ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
2517 ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
2518 ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
2519 ctio->common.exchange_addr = atio->exchange_addr;
2520 ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
2521 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
2522 ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
2524 /* Most likely, it isn't needed */
2525 ctio->residual = atio->fcp_cmnd.data_length;
2526 if (ctio->residual != 0)
2527 ctio->scsi_status |= SS_RESIDUAL_UNDER;
2529 TRACE_BUFFER("CTIO7 TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
2535 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2537 if (do_tgt_cmd_done) {
2538 if (!ha_locked && !in_interrupt()) {
2539 msleep(250); /* just in case */
2540 scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
2542 scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
2543 /* !! At this point cmd could be already freed !! */
2551 static inline void q2t_free_cmd(struct q2t_cmd *cmd)
2553 if (unlikely(cmd->free_sg))
2555 kmem_cache_free(q2t_cmd_cachep, cmd);
2558 static void q2t_on_free_cmd(struct scst_cmd *scst_cmd)
2560 struct q2t_cmd *cmd;
2564 TRACE(TRACE_SCSI, "Freeing command %p, tag %lld", scst_cmd,
2565 scst_cmd_get_tag(scst_cmd));
2567 cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2568 scst_cmd_set_tgt_priv(scst_cmd, NULL);
2576 /* ha->hardware_lock supposed to be held on entry */
2577 static int q2t_prepare_srr_ctio(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
2580 struct srr_ctio *sc;
2581 struct q2t_tgt *tgt = ha->tgt;
2583 struct srr_imm *imm;
2587 TRACE_MGMT_DBG("qla2x00tgt(%ld): CTIO with SRR "
2588 "status received", ha->instance);
2591 PRINT_ERROR("qla2x00tgt(%ld): SRR CTIO, "
2592 "but ctio is NULL", ha->instance);
2597 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2600 /* IRQ is already OFF */
2601 spin_lock(&tgt->srr_lock);
2602 sc->srr_id = tgt->ctio_srr_id;
2603 list_add_tail(&sc->srr_list_entry,
2604 &tgt->srr_ctio_list);
2605 TRACE_MGMT_DBG("CTIO SRR %p added (id %d)",
2607 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2609 list_for_each_entry(imm, &tgt->srr_imm_list,
2611 if (imm->srr_id == sc->srr_id) {
2617 TRACE_MGMT_DBG("%s", "Scheduling srr work");
2618 schedule_work(&tgt->srr_work);
2620 PRINT_ERROR("qla2x00tgt(%ld): imm_srr_id "
2621 "== ctio_srr_id (%d), but there is no "
2622 "corresponding SRR IMM, deleting CTIO "
2623 "SRR %p", ha->instance, tgt->ctio_srr_id,
2625 list_del(&sc->srr_list_entry);
2626 spin_unlock(&tgt->srr_lock);
2633 spin_unlock(&tgt->srr_lock);
2636 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Unable to "
2637 "allocate SRR CTIO entry", ha->instance);
2638 spin_lock(&tgt->srr_lock);
2639 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2641 if (imm->srr_id == tgt->ctio_srr_id) {
2642 TRACE_MGMT_DBG("IMM SRR %p deleted "
2643 "(id %d)", imm, imm->srr_id);
2644 list_del(&imm->srr_list_entry);
2645 q2t_reject_free_srr_imm(ha, imm, 1);
2648 spin_unlock(&tgt->srr_lock);
2654 TRACE_EXIT_RES(res);
2659 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2661 static int q2t_term_ctio_exchange(scsi_qla_host_t *ha, void *ctio,
2662 struct q2t_cmd *cmd, uint32_t status)
2666 if (IS_FWI2_CAPABLE(ha)) {
2668 ctio7_fw_entry_t *c = (ctio7_fw_entry_t *)ctio;
2670 __constant_cpu_to_le16(OF_TERM_EXCH));
2674 q24_send_term_exchange(ha, cmd,
2675 &cmd->atio.atio7, 1);
2678 if (status != CTIO_SUCCESS)
2679 q2x_modify_command_count(ha, 1, 0);
2680 #if 0 /* seems, it isn't needed */
2682 ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
2684 __constant_cpu_to_le16(
2685 CTIO7_FLAGS_TERMINATE));
2689 q2x_send_term_exchange(ha, cmd,
2690 &cmd->atio.atio2x, 1);
2697 /* ha->hardware_lock supposed to be held on entry */
2698 static inline struct q2t_cmd *q2t_get_cmd(scsi_qla_host_t *ha, uint32_t handle)
2701 if (ha->cmds[handle] != NULL) {
2702 struct q2t_cmd *cmd = ha->cmds[handle];
2703 ha->cmds[handle] = NULL;
2709 /* ha->hardware_lock supposed to be held on entry */
2710 static struct q2t_cmd *q2t_ctio_to_cmd(scsi_qla_host_t *ha, uint32_t handle,
2713 struct q2t_cmd *cmd = NULL;
2715 /* Clear out internal marks */
2716 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | CTIO_INTERMEDIATE_HANDLE_MARK);
2718 if (handle != Q2T_NULL_HANDLE) {
2719 if (unlikely(handle == Q2T_SKIP_HANDLE)) {
2720 TRACE_DBG("%s", "SKIP_HANDLE CTIO");
2723 /* handle-1 is actually used */
2724 if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
2725 PRINT_ERROR("qla2x00tgt(%ld): Wrong handle %x "
2726 "received", ha->instance, handle);
2729 cmd = q2t_get_cmd(ha, handle);
2730 if (unlikely(cmd == NULL)) {
2731 PRINT_WARNING("qla2x00tgt(%ld): Suspicious: unable to "
2732 "find the command with handle %x",
2733 ha->instance, handle);
2736 } else if (ctio != NULL) {
2739 struct q2t_sess *sess;
2740 struct scst_cmd *scst_cmd;
2742 if (IS_FWI2_CAPABLE(ha)) {
2743 /* We can't get loop ID from CTIO7 */
2744 PRINT_ERROR("qla2x00tgt(%ld): Wrong CTIO received: "
2745 "QLA24xx doesn't support NULL handles",
2749 ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
2750 loop_id = GET_TARGET_ID(ha, c);
2754 sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
2756 PRINT_WARNING("qla2x00tgt(%ld): Suspicious: "
2757 "ctio_completion for non-existing session "
2758 "(loop_id %d, tag %d)",
2759 ha->instance, loop_id, tag);
2763 scst_cmd = scst_find_cmd_by_tag(sess->scst_sess, tag);
2764 if (scst_cmd == NULL) {
2765 PRINT_WARNING("qla2x00tgt(%ld): Suspicious: unable to "
2766 "find the command with tag %d (loop_id %d)",
2767 ha->instance, tag, loop_id);
2771 cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
2772 TRACE_DBG("Found q2t_cmd %p (tag %d)", cmd, tag);
2780 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2782 static void q2t_do_ctio_completion(scsi_qla_host_t *ha, uint32_t handle,
2783 uint32_t status, void *ctio)
2785 struct scst_cmd *scst_cmd;
2786 struct q2t_cmd *cmd;
2787 enum scst_exec_context context;
2791 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2792 context = SCST_CONTEXT_THREAD;
2794 context = SCST_CONTEXT_TASKLET;
2797 TRACE(TRACE_DEBUG|TRACE_SCSI, "handle(ctio %p status %#x) <- %08x",
2798 ctio, status, handle);
2800 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2801 /* That could happen only in case of an error/reset/abort */
2802 if (status != CTIO_SUCCESS) {
2803 TRACE_MGMT_DBG("Intermediate CTIO received (status %x)",
2809 cmd = q2t_ctio_to_cmd(ha, handle, ctio);
2811 if (status != CTIO_SUCCESS)
2812 q2t_term_ctio_exchange(ha, ctio, NULL, status);
2816 scst_cmd = cmd->scst_cmd;
2818 if (unlikely(status != CTIO_SUCCESS)) {
2819 switch (status & 0xFFFF) {
2820 case CTIO_LIP_RESET:
2821 case CTIO_TARGET_RESET:
2824 case CTIO_INVALID_RX_ID:
2826 TRACE(TRACE_MGMT_MINOR, "qla2x00tgt(%ld): CTIO with "
2827 "status %#x received, state %x, scst_cmd %p, "
2828 "op %x (LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2829 "TIMEOUT=b, INVALID_RX_ID=8)", ha->instance,
2830 status, cmd->state, scst_cmd, scst_cmd->cdb[0]);
2833 case CTIO_PORT_LOGGED_OUT:
2834 case CTIO_PORT_UNAVAILABLE:
2835 PRINT_INFO("qla2x00tgt(%ld): CTIO with PORT LOGGED "
2836 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2837 "received (state %x, scst_cmd %p, op %x)",
2838 ha->instance, status, cmd->state, scst_cmd,
2842 case CTIO_SRR_RECEIVED:
2843 if (q2t_prepare_srr_ctio(ha, cmd, ctio) != 0)
2849 PRINT_ERROR("qla2x00tgt(%ld): CTIO with error status "
2850 "0x%x received (state %x, scst_cmd %p, op %x)",
2851 ha->instance, status, cmd->state, scst_cmd,
2856 if (cmd->state != Q2T_STATE_NEED_DATA)
2857 if (q2t_term_ctio_exchange(ha, ctio, cmd, status))
2861 if (cmd->state == Q2T_STATE_PROCESSED) {
2862 TRACE_DBG("Command %p finished", cmd);
2863 if (q2t_has_data(cmd)) {
2864 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2865 cmd->dma_data_direction);
2867 } else if (cmd->state == Q2T_STATE_NEED_DATA) {
2868 int rx_status = SCST_RX_STATUS_SUCCESS;
2870 cmd->state = Q2T_STATE_DATA_IN;
2872 if (unlikely(status != CTIO_SUCCESS))
2873 rx_status = SCST_RX_STATUS_ERROR;
2875 TRACE_DBG("Data received, context %x, rx_status %d",
2876 context, rx_status);
2878 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt,
2879 cmd->dma_data_direction);
2881 scst_rx_data(scst_cmd, rx_status, context);
2883 } else if (cmd->state == Q2T_STATE_ABORTED) {
2884 TRACE_MGMT_DBG("Aborted command %p (tag %d) finished", cmd,
2887 PRINT_ERROR("qla2x00tgt(%ld): A command in state (%d) should "
2888 "not return a CTIO complete", ha->instance, cmd->state);
2891 if (unlikely(status != CTIO_SUCCESS)) {
2892 TRACE_MGMT_DBG("%s", "Finishing failed CTIO");
2893 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
2896 scst_tgt_cmd_done(scst_cmd, context);
2903 /* ha->hardware_lock supposed to be held on entry */
2904 /* called via callback from qla2xxx */
2905 static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle)
2907 struct q2t_tgt *tgt = ha->tgt;
2911 if (likely(tgt != NULL)) {
2912 tgt->irq_cmd_count++;
2913 q2t_do_ctio_completion(ha, handle, CTIO_SUCCESS, NULL);
2914 tgt->irq_cmd_count--;
2916 TRACE_DBG("CTIO, but target mode not enabled (ha %p handle "
2917 "%#x)", ha, handle);
2924 /* ha->hardware_lock is supposed to be held on entry */
2925 static int q2x_do_send_cmd_to_scst(struct q2t_cmd *cmd)
2928 struct q2t_sess *sess = cmd->sess;
2930 atio_entry_t *atio = &cmd->atio.atio2x;
2931 scst_data_direction dir;
2936 /* make it be in network byte order */
2937 lun = swab16(le16_to_cpu(atio->lun));
2938 cmd->scst_cmd = scst_rx_cmd(sess->scst_sess, (uint8_t *)&lun,
2939 sizeof(lun), atio->cdb, Q2T_MAX_CDB_LEN,
2942 if (cmd->scst_cmd == NULL) {
2943 PRINT_ERROR("%s", "qla2x00tgt: scst_rx_cmd() failed");
2948 cmd->tag = atio->rx_id;
2949 scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
2950 scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
2952 dir = SCST_DATA_NONE;
2953 if (atio->execution_codes & ATIO_EXEC_READ)
2954 dir |= SCST_DATA_READ;
2955 if (atio->execution_codes & ATIO_EXEC_WRITE)
2956 dir |= SCST_DATA_WRITE;
2957 scst_cmd_set_expected(cmd->scst_cmd, dir,
2958 le32_to_cpu(atio->data_length));
2960 switch (atio->task_codes) {
2961 case ATIO_SIMPLE_QUEUE:
2962 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2964 case ATIO_HEAD_OF_QUEUE:
2965 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2967 case ATIO_ORDERED_QUEUE:
2968 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2970 case ATIO_ACA_QUEUE:
2971 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
2974 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
2977 PRINT_ERROR("qla2x00tgt: unknown task code %x, use "
2978 "ORDERED instead", atio->task_codes);
2979 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2983 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
2984 context = SCST_CONTEXT_THREAD;
2986 context = SCST_CONTEXT_TASKLET;
2989 TRACE_DBG("Context %x", context);
2990 TRACE(TRACE_SCSI, "START Command (tag %d, queue_type %d)",
2991 cmd->tag, cmd->scst_cmd->queue_type);
2992 scst_cmd_init_done(cmd->scst_cmd, context);
2995 TRACE_EXIT_RES(res);
2999 /* ha->hardware_lock is supposed to be held on entry */
3000 static int q24_do_send_cmd_to_scst(struct q2t_cmd *cmd)
3003 struct q2t_sess *sess = cmd->sess;
3004 atio7_entry_t *atio = &cmd->atio.atio7;
3005 scst_data_direction dir;
3010 cmd->scst_cmd = scst_rx_cmd(sess->scst_sess,
3011 (uint8_t *)&atio->fcp_cmnd.lun, sizeof(atio->fcp_cmnd.lun),
3012 atio->fcp_cmnd.cdb, Q2T_MAX_CDB_LEN, SCST_ATOMIC);
3014 if (cmd->scst_cmd == NULL) {
3015 PRINT_ERROR("%s", "qla2x00tgt: scst_rx_cmd() failed");
3020 cmd->tag = atio->exchange_addr;
3021 scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
3022 scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
3024 dir = SCST_DATA_NONE;
3025 if (atio->fcp_cmnd.rddata)
3026 dir |= SCST_DATA_READ;
3027 if (atio->fcp_cmnd.wrdata)
3028 dir |= SCST_DATA_WRITE;
3029 scst_cmd_set_expected(cmd->scst_cmd, dir,
3030 be32_to_cpu(atio->fcp_cmnd.data_length));
3032 switch (atio->fcp_cmnd.task_attr) {
3033 case ATIO_SIMPLE_QUEUE:
3034 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3036 case ATIO_HEAD_OF_QUEUE:
3037 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3039 case ATIO_ORDERED_QUEUE:
3040 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3042 case ATIO_ACA_QUEUE:
3043 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
3046 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
3049 PRINT_ERROR("qla2x00tgt: unknown task code %x, use "
3050 "ORDERED instead", atio->fcp_cmnd.task_attr);
3051 cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3055 #ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
3056 context = SCST_CONTEXT_THREAD;
3058 context = SCST_CONTEXT_TASKLET;
3061 TRACE_DBG("Context %x", context);
3062 TRACE(TRACE_SCSI, "START Command %p (tag %d, queue type %x)", cmd,
3063 cmd->tag, cmd->scst_cmd->queue_type);
3064 scst_cmd_init_done(cmd->scst_cmd, context);
3067 TRACE_EXIT_RES(res);
3071 /* ha->hardware_lock supposed to be held on entry */
3072 static int q2t_do_send_cmd_to_scst(scsi_qla_host_t *ha,
3073 struct q2t_cmd *cmd, struct q2t_sess *sess)
3080 cmd->loop_id = sess->loop_id;
3081 cmd->conf_compl_supported = sess->conf_compl_supported;
3083 if (IS_FWI2_CAPABLE(ha))
3084 res = q24_do_send_cmd_to_scst(cmd);
3086 res = q2x_do_send_cmd_to_scst(cmd);
3088 TRACE_EXIT_RES(res);
3092 /* ha->hardware_lock supposed to be held on entry */
3093 static int q2t_send_cmd_to_scst(scsi_qla_host_t *ha, atio_t *atio)
3096 struct q2t_tgt *tgt = ha->tgt;
3097 struct q2t_sess *sess;
3098 struct q2t_cmd *cmd;
3102 if (unlikely(tgt->tgt_stop)) {
3103 TRACE_MGMT_DBG("New command while device %p is shutting "
3109 cmd = kmem_cache_zalloc(q2t_cmd_cachep, GFP_ATOMIC);
3111 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of cmd failed");
3116 memcpy(&cmd->atio.atio2x, atio, sizeof(*atio));
3117 cmd->state = Q2T_STATE_NEW;
3120 if (IS_FWI2_CAPABLE(ha)) {
3121 atio7_entry_t *a = (atio7_entry_t *)atio;
3122 sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
3123 if (unlikely(sess == NULL)) {
3124 TRACE_MGMT_DBG("qla2x00tgt(%ld): Unable to find "
3125 "wwn login (s_id %x:%x:%x), trying to create "
3126 "it manually", ha->instance,
3127 a->fcp_hdr.s_id[0], a->fcp_hdr.s_id[1],
3128 a->fcp_hdr.s_id[2]);
3132 sess = q2t_find_sess_by_loop_id(tgt,
3133 GET_TARGET_ID(ha, (atio_entry_t *)atio));
3134 if (unlikely(sess == NULL)) {
3135 TRACE_MGMT_DBG("qla2x00tgt(%ld): Unable to find "
3136 "wwn login (loop_id=%d), trying to create it "
3137 "manually", ha->instance,
3138 GET_TARGET_ID(ha, (atio_entry_t *)atio));
3143 res = q2t_do_send_cmd_to_scst(ha, cmd, sess);
3144 if (unlikely(res != 0))
3148 TRACE_EXIT_RES(res);
3157 struct q2t_sess_work_param *prm;
3158 unsigned long flags;
3160 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
3162 PRINT_ERROR("%s", "Unable to create session work, "
3163 "command will be refused");
3168 TRACE_MGMT_DBG("Scheduling work to find session for cmd %p",
3173 spin_lock_irqsave(&tgt->sess_work_lock, flags);
3174 if (!tgt->sess_works_pending)
3175 tgt->tm_to_unknown = 0;
3176 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
3177 tgt->sess_works_pending = 1;
3178 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
3180 schedule_work(&tgt->sess_work);
3185 /* ha->hardware_lock supposed to be held on entry */
3186 static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
3187 int lun_size, int fn, void *iocb, int flags)
3189 int res = 0, rc = -1;
3190 struct q2t_mgmt_cmd *mcmd;
3194 mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
3196 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Allocation of management "
3197 "command failed, some commands and their data could "
3198 "leak", sess->tgt->ha->instance);
3202 memset(mcmd, 0, sizeof(*mcmd));
3206 memcpy(&mcmd->orig_iocb.notify_entry, iocb,
3207 sizeof(mcmd->orig_iocb.notify_entry));
3209 mcmd->flags = flags;
3213 TRACE(TRACE_MGMT, "%s", "CLEAR_ACA received");
3214 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_ACA,
3215 lun, lun_size, SCST_ATOMIC, mcmd);
3218 case Q2T_TARGET_RESET:
3219 TRACE(TRACE_MGMT, "%s", "TARGET_RESET received");
3220 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_TARGET_RESET,
3221 lun, lun_size, SCST_ATOMIC, mcmd);
3225 TRACE(TRACE_MGMT, "%s", "LUN_RESET received");
3226 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_LUN_RESET,
3227 lun, lun_size, SCST_ATOMIC, mcmd);
3231 TRACE(TRACE_MGMT, "%s", "CLEAR_TS received");
3232 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_TASK_SET,
3233 lun, lun_size, SCST_ATOMIC, mcmd);
3237 TRACE(TRACE_MGMT, "%s", "ABORT_TS received");
3238 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_ABORT_TASK_SET,
3239 lun, lun_size, SCST_ATOMIC, mcmd);
3243 TRACE(TRACE_MGMT, "%s", "Doing ABORT_ALL_TASKS");
3244 rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
3245 SCST_ABORT_ALL_TASKS,
3246 lun, lun_size, SCST_ATOMIC, mcmd);
3249 case Q2T_ABORT_ALL_SESS:
3250 TRACE(TRACE_MGMT, "%s", "Doing ABORT_ALL_TASKS_SESS");
3251 rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
3252 SCST_ABORT_ALL_TASKS_SESS,
3253 lun, lun_size, SCST_ATOMIC, mcmd);
3256 case Q2T_NEXUS_LOSS_SESS:
3257 TRACE(TRACE_MGMT, "%s", "Doing NEXUS_LOSS_SESS");
3258 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS_SESS,
3259 lun, lun_size, SCST_ATOMIC, mcmd);
3262 case Q2T_NEXUS_LOSS:
3263 TRACE(TRACE_MGMT, "%s", "Doing NEXUS_LOSS");
3264 rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS,
3265 lun, lun_size, SCST_ATOMIC, mcmd);
3269 PRINT_ERROR("qla2x00tgt(%ld): Unknown task mgmt fn 0x%x",
3270 sess->tgt->ha->instance, fn);
3276 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_lun() failed: %d",
3277 sess->tgt->ha->instance, rc);
3283 TRACE_EXIT_RES(res);
3287 mempool_free(mcmd, q2t_mgmt_cmd_mempool);
3291 /* ha->hardware_lock supposed to be held on entry */
3292 static int q2t_handle_task_mgmt(scsi_qla_host_t *ha, void *iocb)
3295 struct q2t_tgt *tgt;
3296 struct q2t_sess *sess;
3305 if (IS_FWI2_CAPABLE(ha)) {
3306 atio7_entry_t *a = (atio7_entry_t *)iocb;
3307 lun = (uint8_t *)&a->fcp_cmnd.lun;
3308 lun_size = sizeof(a->fcp_cmnd.lun);
3309 fn = a->fcp_cmnd.task_mgmt_flags;
3310 sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
3312 sess->s_id.b.al_pa = a->fcp_hdr.s_id[2];
3313 sess->s_id.b.area = a->fcp_hdr.s_id[1];
3314 sess->s_id.b.domain = a->fcp_hdr.s_id[0];
3317 notify_entry_t *n = (notify_entry_t *)iocb;
3318 /* make it be in network byte order */
3319 lun_data = swab16(le16_to_cpu(n->lun));
3320 lun = (uint8_t *)&lun_data;
3321 lun_size = sizeof(lun_data);
3322 fn = n->task_flags >> IMM_NTFY_TASK_MGMT_SHIFT;
3323 sess = q2t_find_sess_by_loop_id(tgt, GET_TARGET_ID(ha, n));
3327 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task mgmt fn 0x%x for "
3328 "non-existant session", ha->instance, fn);
3329 tgt->tm_to_unknown = 1;
3334 res = q2t_issue_task_mgmt(sess, lun, lun_size, fn, iocb, 0);
3337 TRACE_EXIT_RES(res);
3341 /* ha->hardware_lock supposed to be held on entry */
3342 static int q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb)
3345 struct q2t_mgmt_cmd *mcmd;
3346 struct q2t_sess *sess;
3352 loop_id = GET_TARGET_ID(ha, iocb);
3353 tag = le16_to_cpu(iocb->seq_id);
3355 sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
3357 TRACE(TRACE_MGMT, "qla2x00tgt(%ld): task abort for unexisting "
3358 "session", ha->instance);
3359 ha->tgt->tm_to_unknown = 1;
3364 mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
3366 PRINT_ERROR("%s: Allocation of ABORT cmd failed", __func__);
3370 memset(mcmd, 0, sizeof(*mcmd));
3373 memcpy(&mcmd->orig_iocb.notify_entry, iocb,
3374 sizeof(mcmd->orig_iocb.notify_entry));
3376 rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
3379 PRINT_ERROR("qla2x00tgt(%ld): scst_rx_mgmt_fn_tag() failed: %d",
3386 TRACE_EXIT_RES(res);
3390 mempool_free(mcmd, q2t_mgmt_cmd_mempool);
3395 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3397 static int q24_handle_els(scsi_qla_host_t *ha, notify24xx_entry_t *iocb)
3403 TRACE(TRACE_MGMT, "ELS opcode %x", iocb->status_subcode);
3405 switch (iocb->status_subcode) {
3411 res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
3417 struct q2t_tgt *tgt = ha->tgt;
3418 if (tgt->link_reinit_iocb_pending) {
3419 q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
3420 tgt->link_reinit_iocb_pending = 0;
3422 res = 1; /* send notify ack */
3427 PRINT_ERROR("qla2x00tgt(%ld): Unsupported ELS command %x "
3428 "received", ha->instance, iocb->status_subcode);
3429 res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
3433 TRACE_EXIT_RES(res);
3437 static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset)
3440 int cnt, first_sg, first_page = 0, first_page_offs = 0, i;
3442 int cur_dst, cur_src;
3443 struct scatterlist *sg;
3451 for (i = 0; i < cmd->sg_cnt; i++) {
3452 l += cmd->sg[i].length;
3454 int sg_offs = l - cmd->sg[i].length;
3456 if (cmd->sg[i].offset == 0) {
3457 first_page_offs = offset % PAGE_SIZE;
3458 first_page = (offset - sg_offs) >> PAGE_SHIFT;
3460 TRACE_SG("i=%d, sg[i].offset=%d, "
3461 "sg_offs=%d", i, cmd->sg[i].offset, sg_offs);
3462 if ((cmd->sg[i].offset + sg_offs) > offset) {
3463 first_page_offs = offset - sg_offs;
3466 int sec_page_offs = sg_offs +
3467 (PAGE_SIZE - cmd->sg[i].offset);
3468 first_page_offs = sec_page_offs % PAGE_SIZE;
3470 ((offset - sec_page_offs) >>
3474 cnt = cmd->sg_cnt - i + (first_page_offs != 0);
3478 if (first_sg == -1) {
3479 PRINT_ERROR("qla2x00tgt(%ld): Wrong offset %d, buf length %d",
3480 cmd->tgt->ha->instance, offset, cmd->bufflen);
3485 TRACE_SG("offset=%d, first_sg=%d, first_page=%d, "
3486 "first_page_offs=%d, cmd->bufflen=%d, cmd->sg_cnt=%d", offset,
3487 first_sg, first_page, first_page_offs, cmd->bufflen,
3490 sg = kmalloc(cnt * sizeof(sg[0]), GFP_KERNEL);
3492 PRINT_CRIT_ERROR("qla2x00tgt(%ld): Unable to allocate cut "
3493 "SG (len %zd)", cmd->tgt->ha->instance,
3494 cnt * sizeof(sg[0]));
3498 sg_init_table(sg, cnt);
3502 if (first_page_offs != 0) {
3504 sg_set_page(&sg[cur_dst], &sg_page(&cmd->sg[cur_src])[first_page],
3505 PAGE_SIZE - first_page_offs, first_page_offs);
3506 bufflen += sg[cur_dst].length;
3507 TRACE_SG("cur_dst=%d, cur_src=%d, sg[].page=%p, "
3508 "sg[].offset=%d, sg[].length=%d, bufflen=%zu",
3509 cur_dst, cur_src, sg_page(&sg[cur_dst]), sg[cur_dst].offset,
3510 sg[cur_dst].length, bufflen);