2 * Copyright (c) 1997-2007 by Matthew Jacob
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Alternatively, this software may be distributed under the terms of the
29 * the GNU Public License ("GPL") with platforms where the prevalant license
30 * is the GNU Public License:
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of The Version 2 GNU General Public License as published
34 * by the Free Software Foundation.
36 * This program is distributed in the hope that it will be useful,
37 * but WITHOUT ANY WARRANTY; without even the implied warranty of
38 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
39 * GNU General Public License for more details.
41 * You should have received a copy of the GNU General Public License
42 * along with this program; if not, write to the Free Software
43 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
49 * Menlo Park, CA 94025
57 * Qlogic ISP target driver for SCST.
58 * Copyright (c) 2007 Stanislaw Gruszka
59 * Copyright (c) 2007, 2008 Open-E Inc
63 * This file connects tpublic API from the low level ISP driver (see common/isp_tpublic.h)
64 * with the SCST target driver API. Such a design does have certain disadvantages as
65 * opposed to using SCST target API directly in the low level driver:
66 * - we need to maintain duplicate data structures which are already maintained in the low
67 * level driver (commands queue, initiator data),
68 * - processing takes additional cpu time for calling procedures and processing data.
69 * However, the performance/memory cost is not so big, and such a design is flexible, as we
70 * don't need to worry about low level details (e.g. if there is support for a new chipset
71 * added to the low level ISP driver this code will not need to be changed).
75 #error "this can only be built as a module"
78 #include <linux/version.h>
79 #include <linux/module.h>
80 #include <linux/kernel.h>
81 #include <linux/ctype.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 #include <linux/kthread.h>
86 #define LOG_PREFIX "qla_isp"
88 #include <scsi/scsi_host.h>
89 #include <scsi/scsi.h>
91 #include <scst_debug.h>
93 #include "isp_tpublic.h"
94 #include "isp_linux.h"
95 #include "linux/smp_lock.h"
100 /* usefull pointers when data is processed */
101 #define cd_scst_cmd cd_hreserved[0].ptrs[0]
102 #define cd_bus cd_hreserved[1].ptrs[0]
103 #define cd_hnext cd_hreserved[2].ptrs[0]
104 #define cd_ini cd_hreserved[3].ptrs[0]
105 #define nt_ini nt_hreserved
107 /* command private flags */
108 #define CDF_PRIVATE_ABORTED 0x1000
111 #define SCSI_GOOD 0x0
114 #define SCSI_BUSY 0x8
117 #define SCSI_CHECK 0x2
120 #define SCSI_QFULL 0x28
123 typedef struct bus bus_t;
124 typedef struct bus_chan bus_chan_t;
125 typedef struct initiator ini_t;
129 uint64_t ini_iid; /* initiator identifier */
130 struct scst_session * ini_scst_sess; /* sesson established by this remote initiator */
131 int ini_refcnt; /* reference counter, protected by bus_chan_t::tmds_lock */
134 #define HASH_WIDTH 16
135 #define INI_HASH_LISTP(bc, ini_id) bc->list[ini_id & (HASH_WIDTH - 1)]
138 ini_t * list[HASH_WIDTH]; /* hash list of known initiators */
139 spinlock_t tmds_lock;
140 tmd_cmd_t * tmds_front;
141 tmd_cmd_t * tmds_tail;
142 struct tasklet_struct tasklet;
143 struct scst_tgt * scst_tgt;
144 uint64_t enable; /* is target mode enabled in low level driver, one bit per lun */
145 struct rw_semaphore disable_sem; /* help to synchronize when disabling target mode */
146 bus_t * bus; /* back pointer */
147 wait_queue_head_t unreg_waitq;
152 hba_register_t h; /* must be first */
153 int need_reg; /* helpers for registration / unregistration */
154 hba_register_t * unreg_hp;
155 bus_chan_t * bchan; /* channels */
156 struct scst_proc_data proc_data;
162 #define BUS_DBG(bp, fmt, args...) if (debug > 0) printk("%s%d: %s " fmt, bp->h.r_name, bp->h.r_inst, __func__, ##args)
163 #define BUS_DBG2(bp, fmt, args...) if (debug > 1) printk("%s%d: %s " fmt, bp->h.r_name, bp->h.r_inst, __func__, ##args)
164 static int debug = 0;
165 module_param(debug, int, 0);
167 #define BUS_DBG(bp, fmt, args...)
168 #define BUS_DBG2(bp, fmt, args...)
171 #define Eprintk(fmt, args...) printk(KERN_ERR "isp_scst(%s): " fmt, __func__, ##args)
172 #define Iprintk(fmt, args...) printk(KERN_INFO "isp_scst(%s): " fmt, __func__, ##args)
174 static void scsi_target_handler(qact_e, void *);
176 static __inline bus_t *bus_from_tmd(tmd_cmd_t *);
177 static __inline bus_t *bus_from_name(const char *);
179 static void scsi_target_start_cmd(tmd_cmd_t *);
180 static void scsi_target_done_cmd(tmd_cmd_t *);
181 static int scsi_target_enadis(bus_t *, uint64_t, int, int);
182 static void bus_chan_unregister_sessions(bus_chan_t *bc, int wait);
184 static bus_t busses[MAX_BUS];
186 static DEFINE_SPINLOCK(scsi_target_lock);
188 DECLARE_WAIT_QUEUE_HEAD(qlaispd_waitq);
189 struct task_struct *qlaispd_task;
191 static unsigned long qlaispd_flags = 0;
192 #define SF_ADD_INITIATORS 0
193 #define SF_REGISTER_SCST 1
194 #define SF_UNREGISTER_SCST 2
197 schedule_qlaispd(int flag)
199 set_bit(flag, &qlaispd_flags);
200 wake_up_interruptible(&qlaispd_waitq);
204 validate_bus_pointer(bus_t *bp, void *identity)
206 if (bp >= busses && bp < &busses[MAX_BUS]) {
207 if (bp->h.r_action) {
208 if (bp->h.r_identity == identity) {
216 static __inline bus_t *
217 bus_from_tmd(tmd_cmd_t *tmd)
220 for (bp = busses; bp < &busses[MAX_BUS]; bp++) {
221 if (validate_bus_pointer(bp, tmd->cd_hba)) {
228 static __inline bus_t *
229 bus_from_name(const char *name)
232 for (bp = busses; bp < &busses[MAX_BUS]; bp++) {
234 if (bp->h.r_action == NULL) {
237 snprintf(localbuf, sizeof (localbuf), "%s%d", bp->h.r_name, bp->h.r_inst);
238 if (strncmp(name, localbuf, sizeof (localbuf) - 1) == 0) {
245 static __inline bus_t *
246 bus_from_notify(isp_notify_t *np)
249 for (bp = busses; bp < &busses[MAX_BUS]; bp++) {
250 if (bp->h.r_action == NULL) {
253 if (bp->h.r_identity == np->nt_hba) {
260 static __inline ini_t *
261 ini_from_iid(bus_chan_t *bc, uint64_t iid)
263 ini_t *ptr = INI_HASH_LISTP(bc, iid);
266 if (ptr->ini_iid == iid) {
269 } while ((ptr = ptr->ini_next) != NULL);
275 alloc_ini(bus_chan_t *bc, uint64_t iid)
280 nptr = kmalloc(sizeof(ini_t), GFP_KERNEL);
282 Eprintk("cannot allocate initiator data\n");
285 memset(nptr, 0, sizeof(ini_t));
287 #define GET(byte) (uint8_t) ((iid >> 8*byte) & 0xff)
288 snprintf(ini_name, sizeof(ini_name), "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
289 GET(7), GET(6), GET(5) , GET(4), GET(3), GET(2), GET(1), GET(0));
292 nptr->ini_scst_sess = scst_register_session(bc->scst_tgt, 0, ini_name, NULL, NULL);
293 if (!nptr->ini_scst_sess) {
294 Eprintk("cannot register SCST session\n");
298 atomic_inc(&bc->sess_count);
299 BUS_DBG(bc->bus, "0x%016llx, ++sess_count %d\n", iid, atomic_read(&bc->sess_count));
304 free_ini(bus_chan_t *bc, ini_t *ini, int wait)
306 BUS_DBG(bc->bus, "0x%016llx, sess_count-- %d, wait %d\n", ini->ini_iid, atomic_read(&bc->sess_count), wait);
307 scst_unregister_session(ini->ini_scst_sess, wait, NULL);
308 /* no wait call is only when there are no pending commands, so we can free stuff here */
310 atomic_dec(&bc->sess_count);
311 wake_up(&bc->unreg_waitq);
315 add_ini(bus_chan_t *bc, uint64_t iid, ini_t *nptr)
317 ini_t **ptrlptr = &INI_HASH_LISTP(bc, iid);
320 nptr->ini_next = *ptrlptr;
321 nptr->ini_refcnt = 0;
326 del_ini(bus_chan_t *bc, uint64_t iid)
329 ini_t **ptrlptr = &INI_HASH_LISTP(bc, iid);
335 if (ptr->ini_iid == iid) {
336 *ptrlptr = ptr->ini_next;
337 ptr->ini_next = NULL;
345 if (ptr->ini_iid == iid) {
346 prev->ini_next = ptr->ini_next;
347 ptr->ini_next = NULL;
356 __ini_get(bus_chan_t *bc, ini_t *ini)
360 BUS_DBG2(bc->bus, "0x%016llx ++refcnt (%d)\n", ini->ini_iid, ini->ini_refcnt);
365 ini_get(bus_chan_t *bc, ini_t *ini)
368 spin_lock_irqsave(&bc->tmds_lock, flags);
370 spin_unlock_irqrestore(&bc->tmds_lock, flags);
374 __ini_put(bus_chan_t *bc, ini_t *ini)
378 BUS_DBG2(bc->bus, "0x%016llx --refcnt (%d)\n", ini->ini_iid, ini->ini_refcnt);
379 if (ini->ini_refcnt < 0) {
380 free_ini(bc, ini, 0);
386 ini_put(bus_chan_t *bc, ini_t *ini)
389 spin_lock_irqsave(&bc->tmds_lock, flags);
391 spin_unlock_irqrestore(&bc->tmds_lock, flags);
395 tasklet_rx_cmds(unsigned long data)
397 bus_chan_t *bc = (bus_chan_t *) data;
402 struct scst_cmd *scst_cmd;
405 spin_lock_irq(&bc->tmds_lock);
406 tmd = bc->tmds_front;
407 if (tmd == NULL || tmd->cd_ini == NULL) {
408 spin_unlock_irq(&bc->tmds_lock);
412 /* remove from queue */
413 bc->tmds_front = tmd->cd_hnext;
414 if (bc->tmds_front == NULL) {
415 bc->tmds_tail = NULL;
418 /* free command if aborted */
419 if (tmd->cd_flags & CDF_PRIVATE_ABORTED) {
420 __ini_put(bc, tmd->cd_ini);
421 spin_unlock_irq(&bc->tmds_lock);
422 BUS_DBG(bp, "ABORTED TMD_FIN[%llx]\n", tmd->cd_tagval);
423 (*bp->h.r_action)(QIN_TMD_FIN, tmd);
428 scst_cmd = scst_rx_cmd(ini->ini_scst_sess, tmd->cd_lun, sizeof(tmd->cd_lun), tmd->cd_cdb, sizeof(tmd->cd_cdb), 1);
429 if (scst_cmd == NULL) {
430 spin_unlock_irq(&bc->tmds_lock);
431 tmd->cd_scsi_status = SCSI_BUSY;
432 xact = &tmd->cd_xact;
433 xact->td_hflags = TDFH_STSVALID;
436 (*bp->h.r_action)(QIN_TMD_CONT, xact);
440 scst_cmd_set_tgt_priv(scst_cmd, tmd);
441 scst_cmd_set_tag(scst_cmd, tmd->cd_tagval);
442 tmd->cd_scst_cmd = scst_cmd;
444 switch (tmd->cd_tagtype) {
446 scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
449 scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
452 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
455 scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
458 scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
461 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
465 if (bp->h.r_type == R_FC) {
466 scst_data_direction dir;
469 dir = SCST_DATA_NONE;
470 if ((tmd->cd_flags & CDF_BIDIR) == CDF_BIDIR) {
471 dir = SCST_DATA_UNKNOWN;
472 } else if (tmd->cd_flags & CDF_DATA_OUT) {
473 dir = SCST_DATA_WRITE;
474 } else if (tmd->cd_flags & CDF_DATA_IN) {
475 dir = SCST_DATA_READ;
477 len = tmd->cd_totlen;
478 if (tmd->cd_cdb[0] == INQUIRY) {
479 len = min(len, tmd->cd_cdb[4]);
481 scst_cmd_set_expected(scst_cmd, dir, len);
483 spin_unlock_irq(&bc->tmds_lock);
485 scst_cmd_init_done(scst_cmd, SCST_CONTEXT_DIRECT_ATOMIC);
491 scsi_target_start_cmd(tmd_cmd_t *tmd)
497 /* first, find the bus */
498 spin_lock_irqsave(&scsi_target_lock, flags);
499 bp = bus_from_tmd(tmd);
500 if (unlikely(bp == NULL || bp->bchan == NULL)) {
501 spin_unlock_irqrestore(&scsi_target_lock, flags);
502 Eprintk("cannot find %s for incoming command\n", (bp == NULL) ? "bus" : "channel");
505 spin_unlock_irqrestore(&scsi_target_lock, flags);
507 BUS_DBG2(bp, "TMD_START[%llx] %p cdb0=%x\n", tmd->cd_tagval, tmd, tmd->cd_cdb[0] & 0xff);
510 tmd->cd_hnext = NULL;
511 bc = &bp->bchan[tmd->cd_channel];
513 /* then, add commands to queue */
514 spin_lock_irqsave(&bc->tmds_lock, flags);
515 tmd->cd_ini = ini_from_iid(bc, tmd->cd_iid);
516 __ini_get(bc, tmd->cd_ini);
517 if (bc->tmds_front == NULL) {
518 bc->tmds_front = tmd;
520 bc->tmds_tail->cd_hnext = tmd;
523 spin_unlock_irqrestore(&bc->tmds_lock, flags);
525 /* finally, schedule proper action */
526 if (unlikely(tmd->cd_ini == NULL)) {
527 schedule_qlaispd(SF_ADD_INITIATORS);
529 tasklet_schedule(&bc->tasklet);
534 bus_chan_add_initiators(bus_t *bp, int chan)
536 bus_chan_t *bc = &bp->bchan[chan];
539 tmd_cmd_t *prev_tmd = NULL;
542 BUS_DBG(bp, "Chan %d searching new initiators\n", chan);
544 /* iterate over queue and find any commands not assigned to initiator */
545 spin_lock_irq(&bc->tmds_lock);
546 tmd = bc->tmds_front;
548 BUG_ON(tmd->cd_channel != chan);
549 if (tmd->cd_ini != NULL) {
550 /* ini assigned, go to the next command */
554 /* check if proper initiator exist already */
555 ini = ini_from_iid(bc, tmd->cd_iid);
560 spin_unlock_irq(&bc->tmds_lock);
562 ini = alloc_ini(bc, tmd->cd_iid);
564 spin_lock_irq(&bc->tmds_lock);
567 add_ini(bc, tmd->cd_iid, ini);
570 /* fail to alloc initiator, remove from queue and send busy */
571 if (prev_tmd == NULL) {
572 bc->tmds_front = tmd->cd_hnext;
574 prev_tmd->cd_hnext = tmd->cd_hnext;
576 if (bc->tmds_tail == tmd) {
577 bc->tmds_tail = prev_tmd;
579 spin_unlock_irq(&bc->tmds_lock);
581 tmd->cd_scsi_status = SCSI_BUSY;
582 xact = &tmd->cd_xact;
583 xact->td_hflags = TDFH_STSVALID;
586 (*bp->h.r_action)(QIN_TMD_CONT, xact);
588 spin_lock_irq(&bc->tmds_lock);
589 /* iterate to the next command, previous is not changed */
595 spin_unlock_irq(&bc->tmds_lock);
596 /* now we can run queue and pass commands to scst */
597 tasklet_schedule(&bc->tasklet);
601 bus_add_initiators(void)
606 for (bp = busses; bp < &busses[MAX_BUS]; bp++) {
607 spin_lock_irq(&scsi_target_lock);
608 if (bp->h.r_action == NULL) {
609 spin_unlock_irq(&scsi_target_lock);
612 spin_unlock_irq(&scsi_target_lock);
614 for (chan = 0; chan < bp->h.r_nchannels; chan++) {
615 bus_chan_add_initiators(bp, chan);
621 scsi_target_done_cmd(tmd_cmd_t *tmd)
624 struct scst_cmd *scst_cmd;
625 tmd_xact_t *xact = &tmd->cd_xact;
626 enum scst_exec_context context = scst_estimate_context();
630 BUS_DBG2(bp,"TMD_DONE[%llx] %p hf %x lf %x xfrlen %d totlen %d moved %d\n",
631 tmd->cd_tagval, tmd, xact->td_hflags, xact->td_lflags, xact->td_xfrlen, tmd->cd_totlen, tmd->cd_moved);
633 scst_cmd = tmd->cd_scst_cmd;
635 /* command returned by us with status BUSY */
636 BUS_DBG(bp, "BUSY TMD_FIN[%llx]\n", tmd->cd_tagval);
637 ini_put(&bp->bchan[tmd->cd_channel], tmd->cd_ini);
638 (*bp->h.r_action)(QIN_TMD_FIN, tmd);
642 if (xact->td_hflags & TDFH_STSVALID) {
643 if (xact->td_hflags & TDFH_DATA_IN) {
644 xact->td_hflags &= ~TDFH_DATA_MASK;
647 if (unlikely(xact->td_error)) {
648 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
650 scst_tgt_cmd_done(scst_cmd, context);
654 if (xact->td_hflags & TDFH_DATA_OUT) {
655 if (likely(tmd->cd_totlen == tmd->cd_moved) || unlikely(xact->td_error)) {
656 if (xact->td_xfrlen) {
657 int rx_status = SCST_RX_STATUS_SUCCESS;
658 if (unlikely(xact->td_error)) {
659 rx_status = SCST_RX_STATUS_ERROR;
661 scst_rx_data(scst_cmd, rx_status, context);
663 if (unlikely(xact->td_error)) {
664 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
666 scst_tgt_cmd_done(scst_cmd, context);
669 ; /* we don't have all data, do nothing */
671 } else if (xact->td_hflags & TDFH_DATA_IN) {
672 xact->td_hflags &= ~TDFH_DATA_MASK;
674 if (unlikely(xact->td_error)) {
675 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
677 scst_tgt_cmd_done(scst_cmd, context);
679 Eprintk("don't know what to do with TMD_DONE[%llx] cdb0 %x hf %x lf %x xfrlen %d totlen %d moved %d\n",
680 tmd->cd_tagval, tmd->cd_cdb[0], xact->td_hflags, xact->td_lflags, xact->td_xfrlen, tmd->cd_totlen, tmd->cd_moved);
685 abort_task(bus_chan_t *bc, uint64_t iid, uint64_t tagval)
690 spin_lock_irqsave(&bc->tmds_lock, flags);
691 for (tmd = bc->tmds_front; tmd; tmd = tmd->cd_hnext) {
692 if (tmd->cd_tagval == tagval && tmd->cd_iid == iid) {
693 tmd->cd_flags |= CDF_PRIVATE_ABORTED;
694 spin_unlock_irqrestore(&bc->tmds_lock, flags);
695 tasklet_schedule(&bc->tasklet);
699 spin_unlock_irqrestore(&bc->tmds_lock, flags);
704 abort_all_tasks(bus_chan_t *bc, uint64_t iid)
709 spin_lock_irqsave(&bc->tmds_lock, flags);
710 for (tmd = bc->tmds_front; tmd; tmd = tmd->cd_hnext) {
711 if (tmd->cd_iid == iid) {
712 tmd->cd_flags |= CDF_PRIVATE_ABORTED;
715 spin_unlock_irqrestore(&bc->tmds_lock, flags);
716 tasklet_schedule(&bc->tasklet);
720 scsi_target_notify(isp_notify_t *np)
731 spin_lock_irqsave(&scsi_target_lock, flags);
732 bp = bus_from_notify(np);
733 if (unlikely(bp == NULL || bp->bchan == NULL)) {
734 spin_unlock_irqrestore(&scsi_target_lock, flags);
735 Eprintk("cannot find %s for incoming notify\n", bp == NULL ? "bus" : "channel");
738 spin_unlock_irqrestore(&scsi_target_lock, flags);
740 BUS_DBG(bp, "TMD_NOTIFY %p code %x iid 0x%016llx tag %llx\n", np, np->nt_ncode, np->nt_wwn, np->nt_tagval);
742 bc = &bp->bchan[np->nt_channel];
744 spin_lock_irqsave(&bc->tmds_lock, flags);
745 ini = ini_from_iid(bc, np->nt_wwn);
747 __ini_get(bc, np->nt_ini);
748 spin_unlock_irqrestore(&bc->tmds_lock, flags);
750 switch (np->nt_ncode) {
756 if (abort_task(bc, np->nt_wwn, np->nt_tagval)) {
757 BUS_DBG(bp, "TMD_NOTIFY abort task [%llx]\n", np->nt_tagval);
760 if (scst_rx_mgmt_fn_tag(ini->ini_scst_sess, SCST_ABORT_TASK, np->nt_tagval, 1, np) < 0) {
764 /* wait for SCST now */
766 case NT_ABORT_TASK_SET:
767 tmf = "ABORT TASK SET";
771 abort_all_tasks(bc, np->nt_wwn);
772 fn = SCST_ABORT_TASK_SET;
774 case NT_CLEAR_TASK_SET:
775 tmf = "CLEAR TASK SET";
779 abort_all_tasks(bc, np->nt_wwn);
780 fn = SCST_CLEAR_TASK_SET;
788 if (np->nt_lun == LUN_ANY) {
794 case NT_TARGET_RESET:
795 tmf = "TARGET RESET";
796 fn = SCST_TARGET_RESET;
806 /* we don't care about lip resets and link up/down */
809 spin_lock_irqsave(&bc->tmds_lock, flags);
811 * If someone disables the target during this notify, reference to initiator
812 * is currently dropped, so we need to check if IID is still in initiators
813 * table to avoid double free
815 if (del_ini(bc, np->nt_wwn)) {
816 BUS_DBG(bp, "droping reference to initiator 0x%016llx\n", np->nt_wwn);
819 Eprintk("cannot logout initiator 0x%016llx\n", np->nt_wwn);
821 spin_unlock_irqrestore(&bc->tmds_lock, flags);
824 Eprintk("unknown notify 0x%x\n", np->nt_ncode);
833 if (np->nt_lun == LUN_ANY) {
838 FLATLUN_TO_L0LUN(lunbuf, lun);
839 if (scst_rx_mgmt_fn_lun(ini->ini_scst_sess, fn, lunbuf, sizeof(lunbuf), 1, np) < 0) {
847 Eprintk("cannot find initiator 0x%016llx for %s\n", np->nt_wwn, tmf);
851 (*bp->h.r_action) (QIN_NOTIFY_ACK, np);
855 scsi_target_handler(qact_e action, void *arg)
865 spin_lock_irqsave(&scsi_target_lock, flags);
866 for (bp = busses; bp < &busses[MAX_BUS]; bp++) {
867 if (bp->h.r_action == NULL) {
871 if (bp == &busses[MAX_BUS]) {
872 spin_unlock_irqrestore(&scsi_target_lock, flags);
873 Eprintk("cannot register any more SCSI busses\n");
877 if (hp->r_version != QR_VERSION) {
878 spin_unlock_irqrestore(&scsi_target_lock, flags);
879 Eprintk("version mismatch - compiled with %d, got %d\n", QR_VERSION, hp->r_version);
884 spin_unlock_irqrestore(&scsi_target_lock, flags);
885 schedule_qlaispd(SF_REGISTER_SCST);
891 if (ep->en_private) {
899 if (ep->en_private) {
906 tmd_cmd_t *tmd = arg;
907 tmd->cd_xact.td_cmd = tmd;
908 scsi_target_start_cmd(arg);
913 tmd_xact_t *xact = arg;
914 tmd_cmd_t *tmd = xact->td_cmd;
915 scsi_target_done_cmd(tmd);
920 isp_notify_t *np = arg;
921 scsi_target_notify(np);
926 hba_register_t *hp = arg;
928 spin_lock_irqsave(&scsi_target_lock, flags);
929 for (bp = busses; bp < &busses[MAX_BUS]; bp++) {
930 if (bp->h.r_action == NULL) {
933 if (bp->h.r_identity == hp->r_identity) {
937 if (bp == &busses[MAX_BUS]) {
938 spin_unlock_irqrestore(&scsi_target_lock, flags);
939 Eprintk("HBA_UNREG cannot find bus\n");
943 spin_unlock_irqrestore(&scsi_target_lock, flags);
944 schedule_qlaispd(SF_UNREGISTER_SCST);
948 Eprintk("action code %d (0x%x)?\n", action, action);
953 static void register_scst(void);
954 static void unregister_scst(void);
957 qlaispd_function(void *arg)
959 printk(KERN_DEBUG "qlaispd starting\n");
960 while (!kthread_should_stop()) {
961 printk(KERN_DEBUG "qlaispd sleeping\n");
962 wait_event_interruptible(qlaispd_waitq, qlaispd_flags || kthread_should_stop());
963 printk(KERN_DEBUG "qlaispd running\n");
965 if (test_and_clear_bit(SF_REGISTER_SCST, &qlaispd_flags)) {
968 if (test_and_clear_bit(SF_ADD_INITIATORS, &qlaispd_flags)) {
969 bus_add_initiators();
971 if (test_and_clear_bit(SF_UNREGISTER_SCST, &qlaispd_flags)) {
975 printk(KERN_DEBUG "qlaispd exiting\n");
980 scsi_target_enadis(bus_t *bp, uint64_t en, int chan, int lun)
982 struct semaphore rsem;
988 BUG_ON(chan < 0 || chan >= bp->h.r_nchannels);
989 BUG_ON(lun != LUN_ANY && (lun < 0 || lun >= MAX_LUN));
990 bc = &bp->bchan[chan];
993 if (bp->h.r_type == R_FC) {
994 if (en == bc->enable) {
998 if (lun == LUN_ANY) {
1002 if ((en << lun) == (bc->enable & mask)) {
1008 memset(&info, 0, sizeof (info));
1009 info.i_identity = bp->h.r_identity;
1010 info.i_channel = chan;
1011 (*bp->h.r_action)(QIN_GETINFO, &info);
1013 return (info.i_error);
1016 memset(&ec, 0, sizeof (ec));
1017 ec.en_hba = bp->h.r_identity;
1019 if (bp->h.r_type == R_FC) {
1020 ec.en_lun = LUN_ANY;
1025 /* Locking disable_sem prevent moving pending commands to low level driver
1026 * during disabling luns, as we can't get them back, what leads to SCST
1027 * commands leakage */
1028 BUS_DBG(bp, "Chan %d before down_write disable_sem\n", chan);
1029 down_write(&bc->disable_sem);
1030 BUS_DBG(bp, "Chan %d after down_write disable_sem\n", chan);
1032 ec.en_private = &rsem;
1033 (*bp->h.r_action)(en ? QIN_ENABLE : QIN_DISABLE, &ec);
1036 up_write(&bc->disable_sem);
1037 return (ec.en_error);
1040 if (bp->h.r_type == R_FC) {
1045 bc->enable |= (en << lun);
1048 if (bc->enable == 0) {
1049 BUS_DBG(bp, "Chan %d drop all initiators references\n", chan);
1050 /* If no lun is active on channel we want to logoff from SCST. At this point no new
1051 * commands and notifies come from low level driver, but we need to care on pendgin
1052 * ones. We just drop reference to initiators. When last command/notify finish
1053 * for initiator, we will unregister session from SCST */
1054 bus_chan_unregister_sessions(bc, 0);
1056 up_write(&bc->disable_sem);
1061 isp_detect(struct scst_tgt_template *tgt_template)
1063 schedule_qlaispd(SF_REGISTER_SCST);
1068 isp_release(struct scst_tgt *tgt)
1074 isp_rdy_to_xfer(struct scst_cmd *scst_cmd)
1076 /* don't need to check against aborted, low level driver handle
1077 * this and call us back with error */
1079 if (scst_cmd_get_data_direction(scst_cmd) == SCST_DATA_WRITE) {
1080 tmd_cmd_t *tmd = (tmd_cmd_t *) scst_cmd_get_tgt_priv(scst_cmd);
1081 tmd_xact_t *xact = &tmd->cd_xact;
1082 bus_t *bp = tmd->cd_bus;
1083 bus_chan_t *bc = &bp->bchan[tmd->cd_channel];
1084 int len = scst_cmd_get_bufflen(scst_cmd);
1086 xact->td_hflags = TDFH_DATA_OUT;
1087 xact->td_lflags = 0;
1088 xact->td_data = scst_cmd_get_sg(scst_cmd);
1089 xact->td_xfrlen = len;
1090 if (bp->h.r_type == R_SPI) {
1091 tmd->cd_totlen = len;
1094 if (unlikely(down_read_trylock(&bc->disable_sem) != 1)) {
1095 BUS_DBG(bp, "TMD[%llx] Chan %d disable_sem trylock failed, atomic %d\n",
1096 tmd->cd_tagval, tmd->cd_channel, scst_cmd_atomic(scst_cmd));
1097 if (scst_cmd_atomic(scst_cmd)) {
1098 return (SCST_TGT_RES_NEED_THREAD_CTX);
1100 down_read(&bc->disable_sem);
1103 if (unlikely(bc->enable == 0)) {
1104 BUS_DBG(bp, "TMD[%llx] Chan %d not enabled\n", tmd->cd_tagval, tmd->cd_channel);
1105 up_read(&bc->disable_sem);
1106 scst_rx_data(scst_cmd, SCST_RX_STATUS_ERROR, SCST_CONTEXT_SAME);
1107 return (SCST_TGT_RES_SUCCESS);
1110 BUS_DBG2(bp, "TMD[%llx] write nbytes %u\n", tmd->cd_tagval, scst_cmd_get_bufflen(scst_cmd));
1111 (*bp->h.r_action)(QIN_TMD_CONT, xact);
1112 up_read(&bc->disable_sem);
1114 * Did we have an error starting this particular transaction?
1116 if (unlikely((xact->td_lflags & (TDFL_ERROR|TDFL_SYNCERROR)) == (TDFL_ERROR|TDFL_SYNCERROR))) {
1117 if (xact->td_error == -ENOMEM) {
1118 return (SCST_TGT_RES_QUEUE_FULL);
1120 return (SCST_TGT_RES_FATAL_ERROR);
1124 return (SCST_TGT_RES_SUCCESS);
1128 isp_xmit_response(struct scst_cmd *scst_cmd)
1130 tmd_cmd_t *tmd = (tmd_cmd_t *) scst_cmd_get_tgt_priv(scst_cmd);
1131 bus_t *bp = tmd->cd_bus;
1132 bus_chan_t *bc = &bp->bchan[tmd->cd_channel];
1133 tmd_xact_t *xact = &tmd->cd_xact;
1135 if (unlikely(scst_cmd_aborted(scst_cmd))) {
1136 scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
1137 scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_SAME);
1138 return (SCST_TGT_RES_SUCCESS);
1141 if (scst_cmd_get_data_direction(scst_cmd) == SCST_DATA_READ) {
1142 unsigned int len = scst_cmd_get_resp_data_len(scst_cmd);
1143 if (bp->h.r_type == R_SPI) {
1144 tmd->cd_totlen = len;
1146 if (unlikely(len > tmd->cd_totlen)) {
1147 /* some broken FC initiators may send SCSI commands with data load
1148 * larger than underlaying transport specified */
1149 const uint8_t ifailure[TMD_SENSELEN] = { 0xf0, 0, 0x4, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x44 };
1151 Eprintk("data size too big (totlen %u len %u)\n", tmd->cd_totlen, len);
1153 memcpy(tmd->cd_sense, ifailure, TMD_SENSELEN);
1154 xact->td_hflags = TDFH_STSVALID;
1155 tmd->cd_scsi_status = SCSI_CHECK;
1158 xact->td_hflags = TDFH_DATA_IN;
1159 xact->td_xfrlen = len;
1160 xact->td_data = scst_cmd_get_sg(scst_cmd);
1163 /* finished write to target or command with no data */
1164 xact->td_xfrlen = 0;
1165 xact->td_hflags &= ~TDFH_DATA_MASK;
1168 xact->td_lflags = 0;
1170 if (scst_cmd_get_is_send_status(scst_cmd)) {
1171 xact->td_hflags |= TDFH_STSVALID;
1172 tmd->cd_scsi_status = scst_cmd_get_status(scst_cmd);
1174 if (tmd->cd_scsi_status == SCSI_CHECK) {
1175 uint8_t *sbuf = scst_cmd_get_sense_buffer(scst_cmd);
1176 unsigned int slen = scst_cmd_get_sense_buffer_len(scst_cmd);
1177 if (likely(slen > TMD_SENSELEN)) {
1178 /* 18 bytes sense code not cover vendor specific sense data,
1179 * we can't send more than 18 bytes through low level driver,
1180 * however SCST give us 96 bytes, so truncate */
1181 slen = TMD_SENSELEN;
1183 memcpy(tmd->cd_sense, sbuf, slen);
1185 if (unlikely(debug > 0)) {
1186 uint8_t key, asc, ascq;
1187 key = (slen >= 2) ? sbuf[2] : 0;
1188 asc = (slen >= 12) ? sbuf[12] : 0;
1189 ascq = (slen >= 13) ? sbuf[13] : 0;
1190 BUS_DBG(bp, "sense code: key 0x%02x asc 0x%02x ascq 0x%02x\n", key, asc, ascq);
1194 BUS_DBG2(bp, "TMD[%llx] status %d\n", tmd->cd_tagval, scst_cmd_get_status(scst_cmd));
1198 if ((xact->td_hflags & TDFH_STSVALID) && (tmd->cd_scsi_status == SCSI_CHECK)) {
1199 xact->td_xfrlen = 0;
1200 xact->td_hflags &= ~TDFH_DATA_MASK;
1201 xact->td_hflags |= TDFH_SNSVALID;
1204 if (unlikely(down_read_trylock(&bc->disable_sem) != 1)) {
1205 BUS_DBG(bp, "TMD[%llx] Chan %d disable_sem trylock failed, atomic %d\n",
1206 tmd->cd_tagval, tmd->cd_channel, scst_cmd_atomic(scst_cmd));
1207 if (scst_cmd_atomic(scst_cmd)) {
1208 return (SCST_TGT_RES_NEED_THREAD_CTX);
1210 down_read(&bc->disable_sem);
1213 if (unlikely(bc->enable == 0)) {
1214 BUS_DBG(bp, "TMD[%llx] Chan %d not enabled\n", tmd->cd_tagval, tmd->cd_channel);
1215 up_read(&bc->disable_sem);
1216 scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_SAME);
1217 return (SCST_TGT_RES_SUCCESS);
1220 BUS_DBG2(bp, "TMD[%llx] %p hf %x lf %x xfrlen %d totlen %d moved %d\n",
1221 tmd->cd_tagval, tmd, xact->td_hflags, xact->td_lflags, xact->td_xfrlen, tmd->cd_totlen, tmd->cd_moved);
1222 up_read(&bc->disable_sem);
1223 (*bp->h.r_action)(QIN_TMD_CONT, xact);
1225 * Did we have an error starting this particular transaction?
1227 if (unlikely((xact->td_lflags & (TDFL_ERROR|TDFL_SYNCERROR)) == (TDFL_ERROR|TDFL_SYNCERROR))) {
1228 if (xact->td_error == -ENOMEM) {
1229 return (SCST_TGT_RES_QUEUE_FULL);
1231 return (SCST_TGT_RES_FATAL_ERROR);
1234 return (SCST_TGT_RES_SUCCESS);
1238 isp_on_free_cmd(struct scst_cmd *scst_cmd)
1240 tmd_cmd_t *tmd = (tmd_cmd_t *) scst_cmd_get_tgt_priv(scst_cmd);
1241 bus_t *bp = tmd->cd_bus;
1242 tmd_xact_t *xact = &tmd->cd_xact;
1244 xact->td_data = NULL;
1245 ini_put(&bp->bchan[tmd->cd_channel], tmd->cd_ini);
1246 BUS_DBG2(bp, "TMD_FIN[%llx]\n", tmd->cd_tagval);
1247 (*bp->h.r_action)(QIN_TMD_FIN, tmd);
1251 isp_task_mgmt_fn_done(struct scst_mgmt_cmd *mgmt_cmd)
1253 isp_notify_t *np = mgmt_cmd->tgt_priv;
1254 bus_t *bp = bus_from_notify(np);
1256 ini_put(&bp->bchan[np->nt_channel], np->nt_ini);
1257 BUS_DBG(bp, "NOTIFY_ACK[%llx]\n", np->nt_tagval);
1258 (*bp->h.r_action) (QIN_NOTIFY_ACK, np);
1261 static DEFINE_MUTEX(proc_mutex);
1264 * Many procfs things is taken from scst/src/scst_proc.c
1267 #if !defined(CONFIG_PPC) && (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
1269 static int strncasecmp(const char *s1, const char *s2, size_t n)
1273 c1 = tolower(*s1++);
1274 c2 = tolower(*s2++);
1275 } while ((--n > 0) && c1 == c2 && c1 != 0);
1282 isp_read_proc(struct seq_file *seq, void *v)
1284 bus_t *bp = seq->private;
1288 if (bp == NULL || bp->bchan == NULL) {
1292 if (mutex_lock_interruptible(&proc_mutex)) {
1293 return (-ERESTARTSYS);
1296 seq_printf(seq, "%s HBA %s%d DEVID %x\n", bp->h.r_type == R_FC ? "FC" : "SPI", bp->h.r_name, bp->h.r_inst, bp->h.r_locator);
1297 for (chan = 0; chan < bp->h.r_nchannels; chan++) {
1298 bc = &bp->bchan[chan];
1299 if (bp->h.r_type == R_FC) {
1300 seq_printf(seq, "%-2d: %d\n", chan, bc->enable ? 1 : 0);
1302 seq_printf(seq, "%-2d: 0x%llx\n", chan, bc->enable);
1306 mutex_unlock(&proc_mutex);
1311 isp_write_proc(struct file *file, const char __user *buf, size_t len, loff_t *off)
1313 char *ptr, *p, *old;
1314 enum { DISABLE = 0, ENABLE = 1, TEST } action;
1315 int en = -1, res = -EINVAL;
1316 int all_channels = 0, all_luns = 0;
1317 int lun = 0, chan = 0;
1318 bus_t *bp = PDE(file->f_dentry->d_inode)->data;
1320 if (bp == NULL || bp->bchan == NULL) {
1326 ptr = (char *)__get_free_page(GFP_KERNEL);
1331 if (copy_from_user(ptr, buf, len)) {
1335 if (len < PAGE_SIZE) {
1337 } else if (ptr[PAGE_SIZE-1]) {
1342 * Usage: echo "enable|disable chan lun" > /proc/scsi_tgt/qla_isp/N
1343 * or echo "test" > /proc/scsi_tgt/qla_isp/N
1346 if (p[strlen(p) - 1] == '\n') {
1347 p[strlen(p) - 1] = '\0';
1349 if (!strncasecmp("enable", p, 6)) {
1352 } else if (!strncasecmp("disable", p, 7)) {
1355 } else if (!strncasecmp("test", p, 4)) {
1358 PRINT_ERROR("unknown action \"%s\"", p);
1366 PRINT_ERROR("cannot parse arguments for action \"%s\"", action == DISABLE ? "disable" : "enable");
1371 while (isspace(*p) && *p != '\0') {
1375 chan = simple_strtoul(p, &p, 0);
1377 if (!strncasecmp("all", p, 3)) {
1380 PRINT_ERROR("cannot parse channel for action \"%s\"", action == DISABLE ? "disable" : "enable");
1383 } else if (chan < 0 || chan >= bp->h.r_nchannels) {
1384 PRINT_ERROR("bad channel number %d", chan);
1389 if (bp->h.r_type == R_SPI) {
1390 while (isspace(*p) && *p != '\0') {
1394 lun = simple_strtoul(p, &p, 0);
1396 if (!strncasecmp("all", p, 3)) {
1399 PRINT_ERROR("cannot parse lun for action \"%s\"", action == DISABLE ? "disable" : "enable");
1402 } else if (lun < 0 && lun >= MAX_LUN) {
1403 PRINT_ERROR("bad lun %d", lun);
1413 printk("%s test\n", __FUNCTION__);
1418 if (en == 0 || en == 1) {
1420 * channel 0 must be enabled first and disabled last, so when enabling all
1421 * channels do it in ascending order and when disabling all in descending order
1423 int chan_srt, chan_end, chan_inc;
1424 int lun_srt, lun_end;
1429 chan_end = bp->h.r_nchannels;
1432 chan_srt = bp->h.r_nchannels - 1;
1438 chan_end = chan + 1;
1442 if (bp->h.r_type == R_FC) {
1444 lun_end = LUN_ANY + 1;
1455 if (mutex_lock_interruptible(&proc_mutex)) {
1459 for (chan = chan_srt; chan != chan_end; chan += chan_inc) {
1460 for (lun = lun_srt; lun != lun_end; lun++) {
1461 res = scsi_target_enadis(bp, en, chan, lun);
1463 PRINT_ERROR("%s channel %d failed with error %d", en ? "enable" : "disable", chan, res);
1464 /* processed anyway */
1469 mutex_unlock(&proc_mutex);
1473 free_page((unsigned long)ptr);
1478 static struct scst_tgt_template isp_tgt_template =
1480 .sg_tablesize = SG_ALL, /* we set this value lately based on hardware */
1482 .unchecked_isa_dma = 0,
1483 .use_clustering = 1,
1484 .xmit_response_atomic = 1,
1485 .rdy_to_xfer_atomic = 1,
1486 //.report_aen_atomic = 0,
1488 .detect = isp_detect,
1489 .release = isp_release,
1491 .xmit_response = isp_xmit_response,
1492 .rdy_to_xfer = isp_rdy_to_xfer,
1493 .on_free_cmd = isp_on_free_cmd,
1494 .task_mgmt_fn_done = isp_task_mgmt_fn_done,
1496 //.report_aen = isp_report_aen,
1499 #ifdef ISP_DAC_SUPPORTED
1506 get_sg_tablesize(ispsoftc_t *isp)
1508 // FIXME: check if this is correct? What about multichannel ?
1509 // FIXME: move to the low level driver and export via tpublic API
1510 int rq_seglim, ct_seglim;
1511 int nctios = (isp->isp_maxcmds < 4) ? 0 : isp->isp_maxcmds - 4;
1515 ct_seglim = ISP_CDSEG64;
1516 } else if (IS_2322(isp) || ISP_A64) {
1517 rq_seglim = ISP_RQDSEG_T3;
1518 ct_seglim = ISP_CDSEG64;
1519 } else if (IS_FC(isp)) {
1520 rq_seglim = ISP_RQDSEG_T2;
1521 ct_seglim = ISP_CDSEG;
1523 rq_seglim = ISP_RQDSEG;
1524 ct_seglim = ISP_RQDSEG;
1527 return rq_seglim + nctios * ct_seglim;
1531 bus_set_proc_data(bus_t *bp)
1533 const struct scst_proc_data proc_data = {
1534 SCST_DEF_RW_SEQ_OP(isp_write_proc)
1535 .show = isp_read_proc,
1537 memcpy(&bp->proc_data, &proc_data, sizeof(bp->proc_data));
1538 bp->proc_data.data = bp;
1542 register_hba(bus_t *bp)
1547 bus_chan_t *bchan, *bc;
1548 struct scst_tgt *scst_tgt;
1549 struct proc_dir_entry *pde;
1551 bchan = kzalloc(bp->h.r_nchannels * sizeof(bus_chan_t), GFP_KERNEL);
1552 if (bchan == NULL) {
1553 Eprintk("cannot allocate %d channels for %s%d\n", bp->h.r_nchannels, bp->h.r_name, bp->h.r_inst);
1557 for (chan = 0; chan < bp->h.r_nchannels; chan++) {
1558 memset(&info, 0, sizeof(info_t));
1559 info.i_identity = bp->h.r_identity;
1560 if (bp->h.r_type == R_FC) {
1563 info.i_type = I_SPI;
1565 info.i_channel = chan;
1566 (*bp->h.r_action)(QIN_GETINFO, &info);
1568 Eprintk("cannot get device name from %s%d!\n", bp->h.r_name, bp->h.r_inst);
1572 if (info.i_type == I_FC) {
1573 #define GET(byte) (uint8_t) ((info.i_id.fc.wwpn >> 8*byte) & 0xff)
1574 snprintf(name, sizeof(name), "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1575 GET(7), GET(6), GET(5) , GET(4), GET(3), GET(2), GET(1), GET(0));
1578 #define GET(byte) (uint8_t) ((info.i_id.spi.iid >> 8*byte) & 0xff)
1579 snprintf(name, sizeof(name), "%02x:%02x:%02x:%02x", GET(3), GET(2), GET(1), GET(0));
1583 isp_tgt_template.sg_tablesize = get_sg_tablesize(bp->h.r_identity);
1584 scst_tgt = scst_register(&isp_tgt_template, name);
1585 if (scst_tgt == NULL) {
1586 Eprintk("cannot register scst device %s for %s%d\n", name, bp->h.r_name, bp->h.r_inst);
1591 spin_lock_init(&bc->tmds_lock);
1592 tasklet_init(&bc->tasklet, tasklet_rx_cmds, (unsigned long) bc);
1593 init_rwsem(&bc->disable_sem);
1594 init_waitqueue_head(&bc->unreg_waitq);
1595 atomic_set(&bc->sess_count, 0);
1597 bc->scst_tgt = scst_tgt;
1598 scst_tgt->tgt_priv = bc;
1601 snprintf(name, sizeof(name), "%d", ((ispsoftc_t *)bp->h.r_identity)->isp_osinfo.host->host_no);
1602 bus_set_proc_data(bp);
1603 pde = scst_create_proc_entry(scst_proc_get_tgt_root(&isp_tgt_template), name, &bp->proc_data);
1605 Eprintk("cannot create entry %s in /proc\n", name);
1609 spin_lock_irq(&scsi_target_lock);
1611 spin_unlock_irq(&scsi_target_lock);
1613 Iprintk("registering %s%d\n", bp->h.r_name, bp->h.r_inst);
1614 (bp->h.r_action)(QIN_HBA_REG, &bp->h);
1618 for (chan = bp->h.r_nchannels -1; chan >= 0; chan--) {
1619 if (bchan[chan].scst_tgt) {
1620 scst_unregister(bchan[chan].scst_tgt);
1626 spin_lock_irq(&scsi_target_lock);
1627 memset(&bp->h, 0, sizeof (hba_register_t));
1628 spin_unlock_irq(&scsi_target_lock);
1632 bus_chan_unregister_sessions(bus_chan_t *bc, int wait)
1635 ini_t *ini_next, *ptr;
1637 for (i = 0; i < HASH_WIDTH; i++) {
1638 spin_lock_irq(&bc->tmds_lock);
1641 spin_unlock_irq(&bc->tmds_lock);
1645 ini_next = ptr->ini_next;
1647 free_ini(bc, ptr, 1);
1651 } while ((ptr = ini_next) != NULL);
1657 unregister_hba(bus_t *bp, hba_register_t *unreg_hp)
1663 snprintf(name, sizeof(name), "%d", ((ispsoftc_t *)bp->h.r_identity)->isp_osinfo.host->host_no);
1664 remove_proc_entry(name, scst_proc_get_tgt_root(&isp_tgt_template));
1666 /* it's safe now to unregister and reinit bp */
1667 for (chan = 0; chan < bp->h.r_nchannels; chan++) {
1668 bc = &bp->bchan[chan];
1669 bus_chan_unregister_sessions(bc, 1);
1671 BUS_DBG(bp, "Chan %d waiting for finishing %d sessions\n", chan, atomic_read(&bc->sess_count));
1672 wait_event(bc->unreg_waitq, atomic_read(&bc->sess_count) == 0);
1673 BUS_DBG(bp, "Chan %d all sessions finished\n", chan);
1674 scst_unregister(bc->scst_tgt);
1678 spin_lock_irq(&scsi_target_lock);
1679 memset(bp, 0, sizeof(bus_t));
1680 spin_unlock_irq(&scsi_target_lock);
1682 Iprintk("unregistering %s%d\n", unreg_hp->r_name, unreg_hp->r_inst);
1683 (unreg_hp->r_action)(QIN_HBA_UNREG, unreg_hp);
1686 /* Register SCST target, must be called in process context */
1692 for (bp = busses; bp < &busses[MAX_BUS]; bp++) {
1693 spin_lock_irq(&scsi_target_lock);
1694 if (bp->need_reg == 0) {
1695 spin_unlock_irq(&scsi_target_lock);
1699 spin_unlock_irq(&scsi_target_lock);
1705 /* Unregister SCST target, must be called in process context */
1707 unregister_scst(void)
1710 hba_register_t *unreg_hp;
1712 for (bp = busses; bp < &busses[MAX_BUS]; bp++) {
1713 spin_lock_irq(&scsi_target_lock);
1714 if (bp->unreg_hp == NULL) {
1715 spin_unlock_irq(&scsi_target_lock);
1718 unreg_hp = bp->unreg_hp;
1719 bp->unreg_hp = NULL;
1720 spin_unlock_irq(&scsi_target_lock);
1722 unregister_hba(bp, unreg_hp);
1726 EXPORT_SYMBOL(scsi_target_handler);
1728 #ifdef MODULE_LICENSE
1729 MODULE_LICENSE("Dual BSD/GPL");
1732 int init_module(void)
1736 qlaispd_task = kthread_run(qlaispd_function, NULL, "qlaispd");
1737 if (IS_ERR(qlaispd_task)) {
1738 Eprintk("running qlaispd failed\n");
1739 return PTR_ERR(qlaispd_task);
1742 ret = scst_register_target_template(&isp_tgt_template);
1744 Eprintk("cannot register scst target template\n");
1745 kthread_stop(qlaispd_task);
1751 * We can't get here until all hbas have deregistered
1753 void cleanup_module(void)
1755 kthread_stop(qlaispd_task);
1756 scst_unregister_target_template(&isp_tgt_template);
1759 * vim:ts=4:sw=4:expandtab