Patch from andy yan <andyysj@gmail.com>:
authorvlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Tue, 14 Apr 2009 16:00:08 +0000 (16:00 +0000)
committervlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Tue, 14 Apr 2009 16:00:08 +0000 (16:00 +0000)
Target driver for Marvell 88SE64xx(3G) and 88SE94xx(6G) SAS cards

git-svn-id: https://scst.svn.sourceforge.net/svnroot/scst/trunk@772 d57e44dd-8a1f-0410-8b47-8ef2f437770f

15 files changed:
Makefile
mvsas_tgt/Makefile [new file with mode: 0644]
mvsas_tgt/mv_64xx.c [new file with mode: 0644]
mvsas_tgt/mv_64xx.h [new file with mode: 0644]
mvsas_tgt/mv_94xx.c [new file with mode: 0644]
mvsas_tgt/mv_94xx.h [new file with mode: 0644]
mvsas_tgt/mv_chips.h [new file with mode: 0644]
mvsas_tgt/mv_defs.h [new file with mode: 0644]
mvsas_tgt/mv_init.c [new file with mode: 0644]
mvsas_tgt/mv_sas.c [new file with mode: 0644]
mvsas_tgt/mv_sas.h [new file with mode: 0644]
mvsas_tgt/mv_spi.c [new file with mode: 0644]
mvsas_tgt/mv_spi.h [new file with mode: 0644]
mvsas_tgt/mv_tgt.c [new file with mode: 0644]
mvsas_tgt/mv_tgt.h [new file with mode: 0644]

index d710897..7ec63c4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -28,6 +28,7 @@ LSI_DIR=mpt
 USR_DIR=usr/fileio
 SRP_DIR=srpt
 SCST_LOCAL_DIR=scst_local
+MVSAS_DIR=mvsas_tgt
 
 ISCSI_DIR=iscsi-scst
 #ISCSI_DESTDIR=../../../iscsi_scst_inst
@@ -285,6 +286,22 @@ usr_clean:
 usr_extraclean:
        cd $(USR_DIR) && $(MAKE) extraclean
 
+mvsas:
+       cd $(MVSAS_DIR) && $(MAKE) all
+
+mvsas_install:
+       cd $(MVSAS_DIR) && $(MAKE) install
+
+mvsas_uninstall:
+       cd $(MVSAS_DIR) && $(MAKE) uninstall
+
+mvsas_clean:
+       cd $(MVSAS_DIR) && $(MAKE) clean
+
+mvsas_extraclean:
+       cd $(MVSAS_DIR) && $(MAKE) extraclean
+
+
 debug2perf:
        echo "Changing current debug state from full debug to full performance"
        patch -p0 <scst-full_perf.patch
@@ -327,4 +344,5 @@ release2debug:
        srpt srpt_install srpt_uninstall srpt_clean srpt_extraclean \
        usr usr_install usr_uninstall usr_clean usr_extraclean \
        scst_local scst_local_install scst_local_uninstall scst_local_clean scst_local_extraclean \
+       mvsas mvsas_install mvsas_uninstall mvsas_clean mvsas_extraclean \
        debug2perf, debug2release, perf2debug, release2debug
diff --git a/mvsas_tgt/Makefile b/mvsas_tgt/Makefile
new file mode 100644 (file)
index 0000000..de5d54e
--- /dev/null
@@ -0,0 +1,112 @@
+#
+#  Marvell SCSI target driver makefile
+#  
+#  Copyright (C) 2006 - 2008 Jacky Feng <jfeng@marvell.com>
+#  
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation, version 2
+#  of the License.
+# 
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#  GNU General Public License for more details.
+#
+#
+# Main targets:
+#    all (the default) : make all
+#    clean             : clean files
+#    extraclean        : clean + clean dependencies
+#    install           : install 
+#    uninstall         : uninstall 
+#
+# Notes :
+#    - install and uninstall must be made as root
+#
+
+
+ifeq ($(KVER),)
+  ifeq ($(KDIR),)
+    KVER = $(shell uname -r)
+    KDIR := /lib/modules/$(KVER)/build
+  endif
+else
+  KDIR := /lib/modules/$(KVER)/build
+endif
+
+export PWD    := $(shell pwd)
+export LIBSAS := m
+
+#SCST_INC_DIR := /usr/local/include/scst
+#SCST_DIR := $(SCST_INC_DIR)
+SCST_INC_DIR := $(SUBDIRS)/scst/include
+SCST_DIR := $(shell pwd)/scst/src
+
+EXTRA_CFLAGS += -I$(SCST_INC_DIR)
+
+EXTRA_CFLAGS +=  #-DSUPPORT_TARGET
+MODULE_NAME = mvsas_tgt
+
+EXTRA_CFLAGS += -DMV_DEBUG
+
+INSTALL_DIR := /lib/modules/$(shell uname -r)/extra
+
+#EXTRA_CFLAGS += -DCONFIG_SCST_TRACING
+#EXTRA_CFLAGS += -DDEBUG_WORK_IN_THREAD
+#EXTRA_CFLAGS += -DCONFIG_SCST_DEBUG
+
+ifneq ($(KERNELRELEASE),)
+obj-m := mvsas.o
+mvsas-y := mv_init.o  \
+           mv_sas.o   \
+           mv_tgt.o   \
+           mv_64xx.o  \
+           mv_94xx.o  \
+           mv_spi.o
+else
+all: Modules.symvers Module.symvers
+       $(MAKE) -C $(KDIR) SUBDIRS=$(shell pwd) BUILD_INI=m
+
+tgt: Modules.symvers Module.symvers
+       $(MAKE) -C $(KDIR) SUBDIRS=$(shell pwd) BUILD_INI=n
+
+install: all
+       $(MAKE) -C $(KDIR) SUBDIRS=$(shell pwd) BUILD_INI=m \
+               modules_install
+       -depmod -a $(KVER)
+
+ins:
+       ./config
+       insmod mvsas.ko
+       
+SCST_MOD_VERS := $(shell ls $(SCST_DIR)/Modules.symvers 2>/dev/null)
+ifneq ($(SCST_MOD_VERS),)
+Modules.symvers: $(SCST_DIR)/Modules.symvers
+       cp $(SCST_DIR)/Modules.symvers .
+else
+.PHONY: Modules.symvers
+endif
+
+# It's renamed in 2.6.18
+SCST_MOD_VERS := $(shell ls $(SCST_DIR)/Module.symvers 2>/dev/null)
+ifneq ($(SCST_MOD_VERS),)
+Module.symvers: $(SCST_DIR)/Module.symvers
+       cp $(SCST_DIR)/Module.symvers .
+else
+.PHONY: Module.symvers
+endif
+
+uninstall:
+       rm -f $(INSTALL_DIR)/$(MODULE_NAME).ko
+       -/sbin/depmod -a $(KVER)
+endif
+
+clean:
+       rm -f *.o *.ko .*.cmd *.mod.c .*.d .depend *~ Modules.symvers \
+               Module.symvers Module.markers modules.order
+       rm -rf .tmp_versions
+
+extraclean: clean
+
+.PHONY: all tgt install uninstall clean extraclean
diff --git a/mvsas_tgt/mv_64xx.c b/mvsas_tgt/mv_64xx.c
new file mode 100644 (file)
index 0000000..48c1479
--- /dev/null
@@ -0,0 +1,851 @@
+/*
+ * Marvell 88SE64xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_64xx.h"
+#include "mv_chips.h"
+
+static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+       void __iomem *regs = mvi->regs;
+       u32 reg;
+       struct mvs_phy *phy = &mvi->phy[i];
+
+       /* TODO check & save device type */
+       reg = mr32(MVS_GBL_PORT_TYPE);
+       phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+       if (reg & MODE_SAS_SATA & (1 << i))
+               phy->phy_type |= PORT_TYPE_SAS;
+       else
+               phy->phy_type |= PORT_TYPE_SATA;
+}
+
+static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       tmp = mr32(MVS_PCS);
+       if (mvi->chip->n_phy <= 4)
+               tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
+       else
+               tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+       mw32(MVS_PCS, tmp);
+}
+
+static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+
+       mvs_phy_hacks(mvi);
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               /* TEST - for phy decoding error, adjust voltage levels */
+               mw32(MVS_P0_VSR_ADDR + 0, 0x8);
+               mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
+
+               mw32(MVS_P0_VSR_ADDR + 8, 0x8);
+               mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
+
+               mw32(MVS_P0_VSR_ADDR + 16, 0x8);
+               mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
+
+               mw32(MVS_P0_VSR_ADDR + 24, 0x8);
+               mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
+       } else {
+               int i;
+               /* disable auto port detection */
+               mw32(MVS_GBL_PORT_TYPE, 0);
+               for (i = 0; i < mvi->chip->n_phy; i++) {
+                       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
+                       mvs_write_port_vsr_data(mvi, i, 0x90000000);
+                       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
+                       mvs_write_port_vsr_data(mvi, i, 0x50f2);
+                       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
+                       mvs_write_port_vsr_data(mvi, i, 0x0e);
+               }
+       }
+}
+
+static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 reg, tmp;
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               if (phy_id < 4)
+                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
+               else
+                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
+
+       } else
+               reg = mr32(MVS_PHY_CTL);
+
+       tmp = reg;
+       if (phy_id < 4)
+               tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
+       else
+               tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               if (phy_id < 4) {
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+                       mdelay(10);
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
+               } else {
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+                       mdelay(10);
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
+               }
+       } else {
+               mw32(MVS_PHY_CTL, tmp);
+               mdelay(10);
+               mw32(MVS_PHY_CTL, reg);
+       }
+}
+
+static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+       u32 tmp;
+       tmp = mvs_read_port_irq_stat(mvi, phy_id);
+       tmp &= ~PHYEV_RDY_CH;
+       mvs_write_port_irq_stat(mvi, phy_id, tmp);
+       tmp = mvs_read_phy_ctl(mvi, phy_id);
+       if (hard)
+               tmp |= PHY_RST_HARD;
+       else
+               tmp |= PHY_RST;
+       mvs_write_phy_ctl(mvi, phy_id, tmp);
+       if (hard) {
+               do {
+                       tmp = mvs_read_phy_ctl(mvi, phy_id);
+               } while (tmp & PHY_RST_HARD);
+       }
+}
+
+static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+       int i;
+
+       /* make sure interrupts are masked immediately (paranoia) */
+       mw32(MVS_GBL_CTL, 0);
+       tmp = mr32(MVS_GBL_CTL);
+
+       /* Reset Controller */
+       if (!(tmp & HBA_RST)) {
+               if (mvi->flags & MVF_PHY_PWR_FIX) {
+                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+                       tmp &= ~PCTL_PWR_OFF;
+                       tmp |= PCTL_PHY_DSBL;
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+                       tmp &= ~PCTL_PWR_OFF;
+                       tmp |= PCTL_PHY_DSBL;
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+               }
+       }
+
+       /* make sure interrupts are masked immediately (paranoia) */
+       mw32(MVS_GBL_CTL, 0);
+       tmp = mr32(MVS_GBL_CTL);
+
+       /* Reset Controller */
+       if (!(tmp & HBA_RST)) {
+               /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
+               mw32_f(MVS_GBL_CTL, HBA_RST);
+       }
+
+       /* wait for reset to finish; timeout is just a guess */
+       i = 1000;
+       while (i-- > 0) {
+               msleep(10);
+
+               if (!(mr32(MVS_GBL_CTL) & HBA_RST))
+                       break;
+       }
+       if (mr32(MVS_GBL_CTL) & HBA_RST) {
+               dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
+               return -EBUSY;
+       }
+       return 0;
+}
+
+static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               u32 offs;
+               if (phy_id < 4)
+                       offs = PCR_PHY_CTL;
+               else {
+                       offs = PCR_PHY_CTL2;
+                       phy_id -= 4;
+               }
+               pci_read_config_dword(mvi->pdev, offs, &tmp);
+               tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+               pci_write_config_dword(mvi->pdev, offs, tmp);
+       } else {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+               mw32(MVS_PHY_CTL, tmp);
+       }
+}
+
+static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               u32 offs;
+               if (phy_id < 4)
+                       offs = PCR_PHY_CTL;
+               else {
+                       offs = PCR_PHY_CTL2;
+                       phy_id -= 4;
+               }
+               pci_read_config_dword(mvi->pdev, offs, &tmp);
+               tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+               pci_write_config_dword(mvi->pdev, offs, tmp);
+       } else {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+               mw32(MVS_PHY_CTL, tmp);
+       }
+}
+
+static int __devinit mvs_64xx_init(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       int i;
+       u32 tmp, cctl;
+
+       if (mvi->pdev && mvi->pdev->revision == 0)
+               mvi->flags |= MVF_PHY_PWR_FIX;
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               mvs_show_pcie_usage(mvi);
+               tmp = mvs_64xx_chip_reset(mvi);
+               if (tmp)
+                       return tmp;
+       } else {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp |= PCTL_PHY_DSBL;
+               mw32(MVS_PHY_CTL, tmp);
+       }
+
+       /* Init Chip */
+       /* make sure RST is set; HBA_RST /should/ have done that for us */
+       cctl = mr32(MVS_CTL) & 0xFFFF;
+       if (cctl & CCTL_RST)
+               cctl &= ~CCTL_RST;
+       else
+               mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               /* write to device control _AND_ device status register */
+               pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
+               tmp &= ~PRD_REQ_MASK;
+               tmp |= PRD_REQ_SIZE;
+               pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
+
+               pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp &= ~PCTL_PHY_DSBL;
+               pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+               pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+               tmp &= PCTL_PWR_OFF;
+               tmp &= ~PCTL_PHY_DSBL;
+               pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+       } else {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp |= PCTL_COM_ON;
+               tmp &= ~PCTL_PHY_DSBL;
+               tmp |= PCTL_LINK_RST;
+               mw32(MVS_PHY_CTL, tmp);
+               msleep(100);
+               tmp &= ~PCTL_LINK_RST;
+               mw32(MVS_PHY_CTL, tmp);
+               msleep(100);
+       }
+
+       /* reset control */
+       mw32(MVS_PCS, 0);               /* MVS_PCS */
+       /* init phys */
+       mvs_64xx_phy_hacks(mvi);
+
+       /* enable auto port detection */
+       mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
+
+       mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+       mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+       mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+       mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+       mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+       mw32(MVS_TX_LO, mvi->tx_dma);
+       mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+       mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+       mw32(MVS_RX_LO, mvi->rx_dma);
+       mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               /* set phy local SAS address */
+               /* should set little endian SAS address to 64xx chip */
+               mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
+                               cpu_to_be64(mvi->phy[i].dev_sas_addr));
+
+               mvs_64xx_enable_xmt(mvi, i);
+
+               mvs_64xx_phy_reset(mvi, i, 1);
+               msleep(500);
+               mvs_64xx_detect_porttype(mvi, i);
+       }
+       if (mvi->flags & MVF_FLAG_SOC) {
+               /* set select registers */
+               writel(0x0E008000, regs + 0x000);
+               writel(0x59000008, regs + 0x004);
+               writel(0x20, regs + 0x008);
+               writel(0x20, regs + 0x00c);
+               writel(0x20, regs + 0x010);
+               writel(0x20, regs + 0x014);
+               writel(0x20, regs + 0x018);
+               writel(0x20, regs + 0x01c);
+       }
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               /* clear phy int status */
+               tmp = mvs_read_port_irq_stat(mvi, i);
+               tmp &= ~PHYEV_SIG_FIS;
+               mvs_write_port_irq_stat(mvi, i, tmp);
+
+               /* set phy int mask */
+               tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
+                       PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
+                       PHYEV_DEC_ERR;
+               mvs_write_port_irq_mask(mvi, i, tmp);
+
+               msleep(100);
+               mvs_update_phyinfo(mvi, i, 1);
+       }
+
+       /* FIXME: update wide port bitmaps */
+
+       /* little endian for open address and command table, etc. */
+       /*
+        * it seems that ( from the spec ) turning on big-endian won't
+        * do us any good on big-endian machines, need further confirmation
+        */
+       cctl = mr32(MVS_CTL);
+       cctl |= CCTL_ENDIAN_CMD;
+       cctl |= CCTL_ENDIAN_DATA;
+       cctl &= ~CCTL_ENDIAN_OPEN;
+       cctl |= CCTL_ENDIAN_RSP;
+       mw32_f(MVS_CTL, cctl);
+
+       /* reset CMD queue */
+       tmp = mr32(MVS_PCS);
+       tmp |= PCS_CMD_RST;
+       mw32(MVS_PCS, tmp);
+       /* interrupt coalescing may cause missing HW interrput in some case,
+        * and the max count is 0x1ff, while our max slot is 0x200,
+        * it will make count 0.
+        */
+       tmp = 0;
+       mw32(MVS_INT_COAL, tmp);
+
+       tmp = 0x100;
+       mw32(MVS_INT_COAL_TMOUT, tmp);
+
+       /* ladies and gentlemen, start your engines */
+       mw32(MVS_TX_CFG, 0);
+       mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+       mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+       /* enable CMD/CMPL_Q/RESP mode */
+       mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
+               PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+       /* enable completion queue interrupt */
+       tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+               CINT_DMA_PCIE);
+
+       mw32(MVS_INT_MASK, tmp);
+
+       /* Enable SRS interrupt */
+       mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+       return 0;
+}
+
+static int mvs_64xx_ioremap(struct mvs_info *mvi)
+{
+       if (!mvs_ioremap(mvi, 4, 2))
+               return 0;
+       return -1;
+}
+
+static void mvs_64xx_iounmap(struct mvs_info *mvi)
+{
+       mvs_iounmap(mvi->regs);
+       mvs_iounmap(mvi->regs_ex);
+}
+
+static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       tmp = mr32(MVS_GBL_CTL);
+       mw32(MVS_GBL_CTL, tmp | INT_EN);
+}
+
+static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       tmp = mr32(MVS_GBL_CTL);
+       mw32(MVS_GBL_CTL, tmp & ~INT_EN);
+}
+
+static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
+{
+       void __iomem *regs = mvi->regs;
+       u32 stat;
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               stat = mr32(MVS_GBL_INT_STAT);
+
+               if (stat == 0 || stat == 0xffffffff)
+                       return 0;
+       } else
+               stat = 1;
+       return stat;
+}
+
+static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+       void __iomem *regs = mvi->regs;
+
+       /* clear CMD_CMPLT ASAP */
+       mw32_f(MVS_INT_STAT, CINT_DONE);
+#ifndef MVS_USE_TASKLET
+       spin_lock(&mvi->lock);
+#endif
+       mvs_int_full(mvi);
+#ifndef MVS_USE_TASKLET
+       spin_unlock(&mvi->lock);
+#endif
+       return IRQ_HANDLED;
+}
+
+static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+       u32 tmp;
+       mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
+       mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
+       do {
+               tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
+       } while (tmp & 1 << (slot_idx % 32));
+       do {
+               tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
+       } while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+                               u32 tfs)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       if (type == PORT_TYPE_SATA) {
+               tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+               mw32(MVS_INT_STAT_SRS_0, tmp);
+       }
+       mw32(MVS_INT_STAT, CINT_CI_STOP);
+       tmp = mr32(MVS_PCS) | 0xFF00;
+       mw32(MVS_PCS, tmp);
+}
+
+static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp, offs;
+
+       if (*tfs == MVS_ID_NOT_MAPPED)
+               return;
+
+       offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+       if (*tfs < 16) {
+               tmp = mr32(MVS_PCS);
+               mw32(MVS_PCS, tmp & ~offs);
+       } else {
+               tmp = mr32(MVS_CTL);
+               mw32(MVS_CTL, tmp & ~offs);
+       }
+
+       tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
+       if (tmp)
+               mw32(MVS_INT_STAT_SRS_0, tmp);
+
+       *tfs = MVS_ID_NOT_MAPPED;
+       return;
+}
+
+static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+       int i;
+       u32 tmp, offs;
+       void __iomem *regs = mvi->regs;
+
+       if (*tfs != MVS_ID_NOT_MAPPED)
+               return 0;
+
+       tmp = mr32(MVS_PCS);
+
+       for (i = 0; i < mvi->chip->srs_sz; i++) {
+               if (i == 16)
+                       tmp = mr32(MVS_CTL);
+               offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+               if (!(tmp & offs)) {
+                       *tfs = i;
+
+                       if (i < 16)
+                               mw32(MVS_PCS, tmp | offs);
+                       else
+                               mw32(MVS_CTL, tmp | offs);
+                       tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
+                       if (tmp)
+                               mw32(MVS_INT_STAT_SRS_0, tmp);
+                       return 0;
+               }
+       }
+       return MVS_ID_NOT_MAPPED;
+}
+
+void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+       int i;
+       struct scatterlist *sg;
+       struct mvs_prd *buf_prd = prd;
+       for_each_sg(scatter, sg, nr, i) {
+               buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+               buf_prd->len = cpu_to_le32(sg_dma_len(sg));
+               buf_prd++;
+       }
+}
+
+static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
+{
+       u32 phy_st;
+       mvs_write_port_cfg_addr(mvi, i,
+                       PHYR_PHY_STAT);
+       phy_st = mvs_read_port_cfg_data(mvi, i);
+       if (phy_st & PHY_OOB_DTCTD)
+               return 1;
+       return 0;
+}
+
+static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
+                               struct sas_identify_frame *id)
+
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+       sas_phy->linkrate =
+               (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+                       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+
+       phy->minimum_linkrate =
+               (phy->phy_status &
+                       PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
+       phy->maximum_linkrate =
+               (phy->phy_status &
+                       PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
+
+       mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
+       phy->dev_info = mvs_read_port_cfg_data(mvi, i);
+
+       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
+       phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
+
+       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
+       phy->att_dev_sas_addr =
+            (u64) mvs_read_port_cfg_data(mvi, i) << 32;
+       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
+       phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
+       phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
+}
+
+static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
+{
+       u32 tmp;
+       struct mvs_phy *phy = &mvi->phy[i];
+       /* workaround for HW phy decoding error on 1.5g disk drive */
+       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
+       tmp = mvs_read_port_vsr_data(mvi, i);
+       if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+            PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
+               SAS_LINK_RATE_1_5_GBPS)
+               tmp &= ~PHY_MODE6_LATECLK;
+       else
+               tmp |= PHY_MODE6_LATECLK;
+       mvs_write_port_vsr_data(mvi, i, tmp);
+}
+
+void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+                       struct sas_phy_linkrates *rates)
+{
+       u32 lrmin = 0, lrmax = 0;
+       u32 tmp;
+
+       tmp = mvs_read_phy_ctl(mvi, phy_id);
+       lrmin = (rates->minimum_linkrate << 8);
+       lrmax = (rates->maximum_linkrate << 12);
+
+       if (lrmin) {
+               tmp &= ~(0xf << 8);
+               tmp |= lrmin;
+       }
+       if (lrmax) {
+               tmp &= ~(0xf << 12);
+               tmp |= lrmax;
+       }
+       mvs_write_phy_ctl(mvi, phy_id, tmp);
+       mvs_64xx_phy_reset(mvi, phy_id, 1);
+}
+
+#ifdef SUPPORT_TARGET
+static void
+mvs_64xx_enable_ssp_target(struct mvs_info *mvi, u32 phy_id)
+{
+       u32 dev_info;
+#if 0
+       struct mvst_port *port = &mvi->tgt_port[port_id];
+       int j, no;
+       for_each_phy(port->wide_port_phymap, j, no) {
+               if (j & 1) {
+                       mvs_write_port_cfg_addr(mvi, no, PHYR_IDENTIFY);
+                       dev_info = mvs_read_port_cfg_data(mvi, no);
+                       dev_info |= PORT_DEV_SSP_TRGT;
+                       mvs_write_port_cfg_data(mvi, no, dev_info);
+               }
+       }
+#else
+       mvs_write_port_cfg_addr(mvi, phy_id, PHYR_IDENTIFY);
+       dev_info = mvs_read_port_cfg_data(mvi, phy_id);
+       dev_info |= PORT_DEV_SSP_TRGT;
+       mvs_write_port_cfg_data(mvi, phy_id, dev_info);
+       mvi->phy[phy_id].dev_info |= PORT_DEV_SSP_TRGT;
+#endif
+
+}
+
+static void
+mvs_64xx_disable_ssp_target(struct mvs_info *mvi, u32 phy_id)
+{
+       u32 dev_info;
+#if    0
+       struct mvst_port *port = &mvi->tgt_port[port_id];
+       int j, no;
+
+       for_each_phy(port->wide_port_phymap, j, no) {
+               if (j & 1) {
+                       mvs_write_port_cfg_addr(mvi, no, PHYR_IDENTIFY);
+                       dev_info = mvs_read_port_cfg_data(mvi, no);
+                       dev_info &= ~PORT_DEV_SSP_TRGT;
+                       mvs_write_port_cfg_data(mvi, no, dev_info);
+               }
+       }
+#else
+       mvs_write_port_cfg_addr(mvi, phy_id, PHYR_IDENTIFY);
+       dev_info = mvs_read_port_cfg_data(mvi, phy_id);
+       dev_info &= ~PORT_DEV_SSP_TRGT;
+       mvs_write_port_cfg_data(mvi, phy_id, dev_info);
+       mvi->phy[phy_id].dev_info &= ~PORT_DEV_SSP_TRGT;
+#endif
+
+}
+#endif /* end SUPPORT_TARGET */
+
+
+static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
+{
+       u32 tmp;
+       void __iomem *regs = mvi->regs;
+       tmp = mr32(MVS_PCS);
+       mw32(MVS_PCS, tmp & 0xFFFF);
+       mw32(MVS_PCS, tmp);
+       tmp = mr32(MVS_CTL);
+       mw32(MVS_CTL, tmp & 0xFFFF);
+       mw32(MVS_CTL, tmp);
+}
+
+
+u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs_ex;
+       return ior32(SPI_DATA_REG_64XX);
+}
+
+void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+       void __iomem *regs = mvi->regs_ex;
+        iow32(SPI_DATA_REG_64XX, data);
+}
+
+
+int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
+                       u32      *dwCmd,
+                       u8       cmd,
+                       u8       read,
+                       u8       length,
+                       u32      addr
+                       )
+{
+       u32  dwTmp;
+
+       dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
+       if (read)
+               dwTmp |= 1U<<23;
+
+       if (addr != MV_MAX_U32) {
+               dwTmp |= 1U<<22;
+               dwTmp |= (addr & 0x0003FFFF);
+       }
+
+       *dwCmd = dwTmp;
+       return 0;
+}
+
+
+int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+       void __iomem *regs = mvi->regs_ex;
+       int     retry;
+
+       for (retry = 0; retry < 1; retry++) {
+               iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
+               iow32(SPI_CMD_REG_64XX, cmd);
+               iow32(SPI_CTRL_REG_64XX,
+                       SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
+       }
+
+       return 0;
+}
+
+int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+       void __iomem *regs = mvi->regs_ex;
+       u32 i, dwTmp;
+
+       for (i = 0; i < timeout; i++) {
+               dwTmp = ior32(SPI_CTRL_REG_64XX);
+               if (!(dwTmp & SPI_CTRL_SPISTART))
+                       return 0;
+               msleep(10);
+       }
+
+       return -1;
+}
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+{
+       int i;
+       struct mvs_prd *buf_prd = prd;
+       buf_prd += from;
+       for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+               buf_prd->addr = cpu_to_le64(buf_dma);
+               buf_prd->len = cpu_to_le32(buf_len);
+               ++buf_prd;
+       }
+}
+#endif
+
+const struct mvs_dispatch mvs_64xx_dispatch = {
+       "mv64xx",
+       mvs_64xx_init,
+       NULL,
+       mvs_64xx_ioremap,
+       mvs_64xx_iounmap,
+       mvs_64xx_isr,
+       mvs_64xx_isr_status,
+       mvs_64xx_interrupt_enable,
+       mvs_64xx_interrupt_disable,
+       mvs_read_phy_ctl,
+       mvs_write_phy_ctl,
+       mvs_read_port_cfg_data,
+       mvs_write_port_cfg_data,
+       mvs_write_port_cfg_addr,
+       mvs_read_port_vsr_data,
+       mvs_write_port_vsr_data,
+       mvs_write_port_vsr_addr,
+       mvs_read_port_irq_stat,
+       mvs_write_port_irq_stat,
+       mvs_read_port_irq_mask,
+       mvs_write_port_irq_mask,
+       mvs_get_sas_addr,
+       mvs_64xx_command_active,
+       mvs_64xx_issue_stop,
+       mvs_start_delivery,
+       mvs_rx_update,
+       mvs_int_full,
+       mvs_64xx_assign_reg_set,
+       mvs_64xx_free_reg_set,
+       mvs_get_prd_size,
+       mvs_get_prd_count,
+       mvs_64xx_make_prd,
+       mvs_64xx_detect_porttype,
+       mvs_64xx_oob_done,
+       mvs_64xx_fix_phy_info,
+       mvs_64xx_phy_work_around,
+       mvs_64xx_phy_set_link_rate,
+       mvs_hw_max_link_rate,
+       mvs_64xx_phy_disable,
+       mvs_64xx_phy_enable,
+       mvs_64xx_phy_reset,
+       mvs_64xx_stp_reset,
+#ifdef SUPPORT_TARGET
+       mvs_64xx_enable_ssp_target,
+       mvs_64xx_disable_ssp_target,
+#endif
+       mvs_64xx_clear_active_cmds,
+       mvs_64xx_spi_read_data,
+       mvs_64xx_spi_write_data,
+       mvs_64xx_spi_buildcmd,
+       mvs_64xx_spi_issuecmd,
+       mvs_64xx_spi_waitdataready,
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       mvs_64xx_fix_dma,
+#endif
+};
+
diff --git a/mvsas_tgt/mv_64xx.h b/mvsas_tgt/mv_64xx.h
new file mode 100644 (file)
index 0000000..42e947d
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Marvell 88SE64xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS64XX_REG_H_
+#define _MVS64XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE          SAS_LINK_RATE_3_0_GBPS
+
+/* enhanced mode registers (BAR4) */
+enum hw_registers {
+       MVS_GBL_CTL             = 0x04,  /* global control */
+       MVS_GBL_INT_STAT        = 0x08,  /* global irq status */
+       MVS_GBL_PI              = 0x0C,  /* ports implemented bitmask */
+
+       MVS_PHY_CTL             = 0x40,  /* SOC PHY Control */
+       MVS_PORTS_IMP           = 0x9C,  /* SOC Port Implemented */
+
+       MVS_GBL_PORT_TYPE       = 0xa0,  /* port type */
+
+       MVS_CTL                 = 0x100, /* SAS/SATA port configuration */
+       MVS_PCS                 = 0x104, /* SAS/SATA port control/status */
+       MVS_CMD_LIST_LO         = 0x108, /* cmd list addr */
+       MVS_CMD_LIST_HI         = 0x10C,
+       MVS_RX_FIS_LO           = 0x110, /* RX FIS list addr */
+       MVS_RX_FIS_HI           = 0x114,
+
+       MVS_TX_CFG              = 0x120, /* TX configuration */
+       MVS_TX_LO               = 0x124, /* TX (delivery) ring addr */
+       MVS_TX_HI               = 0x128,
+
+       MVS_TX_PROD_IDX         = 0x12C, /* TX producer pointer */
+       MVS_TX_CONS_IDX         = 0x130, /* TX consumer pointer (RO) */
+       MVS_RX_CFG              = 0x134, /* RX configuration */
+       MVS_RX_LO               = 0x138, /* RX (completion) ring addr */
+       MVS_RX_HI               = 0x13C,
+       MVS_RX_CONS_IDX         = 0x140, /* RX consumer pointer (RO) */
+
+       MVS_INT_COAL            = 0x148, /* Int coalescing config */
+       MVS_INT_COAL_TMOUT      = 0x14C, /* Int coalescing timeout */
+       MVS_INT_STAT            = 0x150, /* Central int status */
+       MVS_INT_MASK            = 0x154, /* Central int enable */
+       MVS_INT_STAT_SRS_0      = 0x158, /* SATA register set status */
+       MVS_INT_MASK_SRS_0      = 0x15C,
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_INT_STAT         = 0x160, /* port0 interrupt status */
+       MVS_P0_INT_MASK         = 0x164, /* port0 interrupt mask */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_INT_STAT         = 0x200, /* Port4 interrupt status */
+       MVS_P4_INT_MASK         = 0x204, /* Port4 interrupt enable mask */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_SER_CTLSTAT      = 0x180, /* port0 serial control/status */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_SER_CTLSTAT      = 0x220, /* port4 serial control/status */
+
+       MVS_CMD_ADDR            = 0x1B8, /* Command register port (addr) */
+       MVS_CMD_DATA            = 0x1BC, /* Command register port (data) */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_CFG_ADDR         = 0x1C0, /* port0 phy register address */
+       MVS_P0_CFG_DATA         = 0x1C4, /* port0 phy register data */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_CFG_ADDR         = 0x230, /* Port4 config address */
+       MVS_P4_CFG_DATA         = 0x234, /* Port4 config data */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_VSR_ADDR         = 0x1E0, /* port0 VSR address */
+       MVS_P0_VSR_DATA         = 0x1E4, /* port0 VSR data */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_VSR_ADDR         = 0x250, /* port4 VSR addr */
+       MVS_P4_VSR_DATA         = 0x254, /* port4 VSR data */
+};
+
+enum pci_cfg_registers {
+       PCR_PHY_CTL             = 0x40,
+       PCR_PHY_CTL2            = 0x90,
+       PCR_DEV_CTRL            = 0xE8,
+       PCR_LINK_STAT           = 0xF2,
+};
+
+/*  SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+       VSR_PHY_STAT            = 0x00, /* Phy Status */
+       VSR_PHY_MODE1           = 0x01, /* phy tx */
+       VSR_PHY_MODE2           = 0x02, /* tx scc */
+       VSR_PHY_MODE3           = 0x03, /* pll */
+       VSR_PHY_MODE4           = 0x04, /* VCO */
+       VSR_PHY_MODE5           = 0x05, /* Rx */
+       VSR_PHY_MODE6           = 0x06, /* CDR */
+       VSR_PHY_MODE7           = 0x07, /* Impedance */
+       VSR_PHY_MODE8           = 0x08, /* Voltage */
+       VSR_PHY_MODE9           = 0x09, /* Test */
+       VSR_PHY_MODE10          = 0x0A, /* Power */
+       VSR_PHY_MODE11          = 0x0B, /* Phy Mode */
+       VSR_PHY_VS0             = 0x0C, /* Vednor Specific 0 */
+       VSR_PHY_VS1             = 0x0D, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+       PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
+       PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
+       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
+       PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+                       (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+#define MAX_SG_ENTRY           64
+
+struct mvs_prd {
+       __le64                  addr;           /* 64-bit buffer address */
+       __le32                  reserved;
+       __le32                  len;            /* 16-bit length */
+};
+
+#define SPI_CTRL_REG                           0xc0
+#define SPI_CTRL_VENDOR_ENABLE         (1U<<29)
+#define SPI_CTRL_SPIRDY                        (1U<<22)
+#define SPI_CTRL_SPISTART                      (1U<<20)
+
+#define SPI_CMD_REG            0xc4
+#define SPI_DATA_REG           0xc8
+
+#define SPI_CTRL_REG_64XX              0x10
+#define SPI_CMD_REG_64XX               0x14
+#define SPI_DATA_REG_64XX              0x18
+
+#endif
diff --git a/mvsas_tgt/mv_94xx.c b/mvsas_tgt/mv_94xx.c
new file mode 100644 (file)
index 0000000..5eb5f66
--- /dev/null
@@ -0,0 +1,702 @@
+/*
+ * Marvell 88SE94xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_94xx.h"
+#include "mv_chips.h"
+
+static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+       u32 reg;
+       struct mvs_phy *phy = &mvi->phy[i];
+       u32 phy_status;
+
+       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
+       reg = mvs_read_port_vsr_data(mvi, i);
+       phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
+       phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+       switch (phy_status) {
+       case 0x10:
+               phy->phy_type |= PORT_TYPE_SAS;
+               break;
+       case 0x1d:
+       default:
+               phy->phy_type |= PORT_TYPE_SATA;
+               break;
+       }
+}
+
+static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       tmp = mr32(MVS_PCS);
+       tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+       mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+       u32 tmp;
+
+       tmp = mvs_read_port_irq_stat(mvi, phy_id);
+       tmp &= ~PHYEV_RDY_CH;
+       mvs_write_port_irq_stat(mvi, phy_id, tmp);
+       if (hard) {
+               tmp = mvs_read_phy_ctl(mvi, phy_id);
+               tmp |= PHY_RST_HARD;
+               mvs_write_phy_ctl(mvi, phy_id, tmp);
+               do {
+                       tmp = mvs_read_phy_ctl(mvi, phy_id);
+               } while (tmp & PHY_RST_HARD);
+       } else {
+               mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
+               tmp = mvs_read_port_vsr_data(mvi, phy_id);
+               tmp |= PHY_RST;
+               mvs_write_port_vsr_data(mvi, phy_id, tmp);
+       }
+}
+
+static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+       u32 tmp;
+       mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+       tmp = mvs_read_port_vsr_data(mvi, phy_id);
+       mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
+}
+
+static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+       mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
+       mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
+       mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
+       mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
+       mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+       mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
+}
+
+static int __devinit mvs_94xx_init(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       int i;
+       u32 tmp, cctl;
+
+       mvs_show_pcie_usage(mvi);
+       if (mvi->flags & MVF_FLAG_SOC) {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp |= PCTL_PHY_DSBL;
+               mw32(MVS_PHY_CTL, tmp);
+       }
+
+       /* Init Chip */
+       /* make sure RST is set; HBA_RST /should/ have done that for us */
+       cctl = mr32(MVS_CTL) & 0xFFFF;
+       if (cctl & CCTL_RST)
+               cctl &= ~CCTL_RST;
+       else
+               mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+       if (mvi->flags & MVF_FLAG_SOC) {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp |= PCTL_COM_ON;
+               tmp &= ~PCTL_PHY_DSBL;
+               tmp |= PCTL_LINK_RST;
+               mw32(MVS_PHY_CTL, tmp);
+               msleep(100);
+               tmp &= ~PCTL_LINK_RST;
+               mw32(MVS_PHY_CTL, tmp);
+               msleep(100);
+       }
+
+       /* reset control */
+       mw32(MVS_PCS, 0);               /* MVS_PCS */
+       mw32(MVS_STP_REG_SET_0, 0);
+       mw32(MVS_STP_REG_SET_1, 0);
+
+       /* init phys */
+       mvs_phy_hacks(mvi);
+
+       /* disable Multiplexing, enable phy implemented */
+       mw32(MVS_PORTS_IMP, 0xFF);
+
+
+       mw32(MVS_PA_VSR_ADDR, 0x00000104);
+       mw32(MVS_PA_VSR_PORT, 0x00018080);
+       mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
+       mw32(MVS_PA_VSR_PORT, 0x0084ffff);
+
+       /* set LED blink when IO*/
+       mw32(MVS_PA_VSR_ADDR, 0x00000030);
+       tmp = mr32(MVS_PA_VSR_PORT);
+       tmp &= 0xFFFF00FF;
+       tmp |= 0x00003300;
+       mw32(MVS_PA_VSR_PORT, tmp);
+
+       mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+       mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+       mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+       mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+       mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+       mw32(MVS_TX_LO, mvi->tx_dma);
+       mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+       mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+       mw32(MVS_RX_LO, mvi->rx_dma);
+       mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               mvs_94xx_phy_disable(mvi, i);
+               /* set phy local SAS address */
+               mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
+                                               (mvi->phy[i].dev_sas_addr));
+
+               mvs_94xx_enable_xmt(mvi, i);
+               mvs_94xx_phy_enable(mvi, i);
+
+               mvs_94xx_phy_reset(mvi, i, 1);
+               msleep(500);
+               mvs_94xx_detect_porttype(mvi, i);
+       }
+
+       if (mvi->flags & MVF_FLAG_SOC) {
+               /* set select registers */
+               writel(0x0E008000, regs + 0x000);
+               writel(0x59000008, regs + 0x004);
+               writel(0x20, regs + 0x008);
+               writel(0x20, regs + 0x00c);
+               writel(0x20, regs + 0x010);
+               writel(0x20, regs + 0x014);
+               writel(0x20, regs + 0x018);
+               writel(0x20, regs + 0x01c);
+       }
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               /* clear phy int status */
+               tmp = mvs_read_port_irq_stat(mvi, i);
+               tmp &= ~PHYEV_SIG_FIS;
+               mvs_write_port_irq_stat(mvi, i, tmp);
+
+               /* set phy int mask */
+               tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
+                       PHYEV_ID_DONE  | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
+               mvs_write_port_irq_mask(mvi, i, tmp);
+
+               msleep(100);
+               mvs_update_phyinfo(mvi, i, 1);
+       }
+
+       /* FIXME: update wide port bitmaps */
+
+       /* little endian for open address and command table, etc. */
+       /*
+        * it seems that ( from the spec ) turning on big-endian won't
+        * do us any good on big-endian machines, need further confirmation
+        */
+       cctl = mr32(MVS_CTL);
+       cctl |= CCTL_ENDIAN_CMD;
+       cctl |= CCTL_ENDIAN_DATA;
+       cctl &= ~CCTL_ENDIAN_OPEN;
+       cctl |= CCTL_ENDIAN_RSP;
+       mw32_f(MVS_CTL, cctl);
+
+       /* reset CMD queue */
+       tmp = mr32(MVS_PCS);
+       tmp |= PCS_CMD_RST;
+       mw32(MVS_PCS, tmp);
+       /* interrupt coalescing may cause missing HW interrput in some case,
+        * and the max count is 0x1ff, while our max slot is 0x200,
+        * it will make count 0.
+        */
+       tmp = 0;
+       mw32(MVS_INT_COAL, tmp);
+
+       tmp = 0x100;
+       mw32(MVS_INT_COAL_TMOUT, tmp);
+
+       /* ladies and gentlemen, start your engines */
+       mw32(MVS_TX_CFG, 0);
+       mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+       mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+       /* enable CMD/CMPL_Q/RESP mode */
+       mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
+               PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+       /* enable completion queue interrupt */
+       tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+               CINT_DMA_PCIE);
+       tmp |= CINT_PHY_MASK;
+       mw32(MVS_INT_MASK, tmp);
+
+       /* Enable SRS interrupt */
+       mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+       return 0;
+}
+
+static int mvs_94xx_ioremap(struct mvs_info *mvi)
+{
+       if (!mvs_ioremap(mvi, 2, -1)) {
+               mvi->regs_ex = mvi->regs + 0x10200;
+               mvi->regs += 0x20000;
+               if (mvi->id == 1)
+                       mvi->regs += 0x4000;
+               return 0;
+       }
+       return -1;
+}
+
+static void mvs_94xx_iounmap(struct mvs_info *mvi)
+{
+       if (mvi->regs) {
+               mvi->regs -= 0x20000;
+               if (mvi->id == 1)
+                       mvi->regs -= 0x4000;
+               mvs_iounmap(mvi->regs);
+       }
+}
+
+static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs_ex;
+       u32 tmp;
+
+       tmp = mr32(MVS_GBL_CTL);
+       tmp |= (IRQ_SAS_A | IRQ_SAS_B);
+       mw32(MVS_GBL_INT_STAT, tmp);
+       writel(tmp, regs + 0x0C);
+       writel(tmp, regs + 0x10);
+       writel(tmp, regs + 0x14);
+       writel(tmp, regs + 0x18);
+       mw32(MVS_GBL_CTL, tmp);
+}
+
+static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs_ex;
+       u32 tmp;
+
+       tmp = mr32(MVS_GBL_CTL);
+
+       tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
+       mw32(MVS_GBL_INT_STAT, tmp);
+       writel(tmp, regs + 0x0C);
+       writel(tmp, regs + 0x10);
+       writel(tmp, regs + 0x14);
+       writel(tmp, regs + 0x18);
+       mw32(MVS_GBL_CTL, tmp);
+}
+
+static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
+{
+       void __iomem *regs = mvi->regs_ex;
+       u32 stat = 0;
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               stat = mr32(MVS_GBL_INT_STAT);
+
+               if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
+                       return 0;
+       }
+       return stat;
+}
+
+static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+       void __iomem *regs = mvi->regs;
+
+       if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
+                       ((stat & IRQ_SAS_B) && mvi->id == 1)) {
+               mw32_f(MVS_INT_STAT, CINT_DONE);
+       #ifndef MVS_USE_TASKLET
+               spin_lock(&mvi->lock);
+       #endif
+               mvs_int_full(mvi);
+       #ifndef MVS_USE_TASKLET
+               spin_unlock(&mvi->lock);
+       #endif
+       }
+       return IRQ_HANDLED;
+}
+
+static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+       u32 tmp;
+       mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
+       do {
+               tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
+       } while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+                               u32 tfs)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       if (type == PORT_TYPE_SATA) {
+               tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+               mw32(MVS_INT_STAT_SRS_0, tmp);
+       }
+       mw32(MVS_INT_STAT, CINT_CI_STOP);
+       tmp = mr32(MVS_PCS) | 0xFF00;
+       mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+       u8 reg_set = *tfs;
+
+       if (*tfs == MVS_ID_NOT_MAPPED)
+               return;
+
+       mvi->sata_reg_set &= ~bit(reg_set);
+       if (reg_set < 32) {
+               w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
+               tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
+               if (tmp)
+                       mw32(MVS_INT_STAT_SRS_0, tmp);
+       } else {
+               w_reg_set_enable(reg_set, mvi->sata_reg_set);
+               tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
+               if (tmp)
+                       mw32(MVS_INT_STAT_SRS_1, tmp);
+       }
+
+       *tfs = MVS_ID_NOT_MAPPED;
+
+       return;
+}
+
+static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+       int i;
+       void __iomem *regs = mvi->regs;
+
+       if (*tfs != MVS_ID_NOT_MAPPED)
+               return 0;
+
+       i = mv_ffc64(mvi->sata_reg_set);
+       if (i > 32) {
+               mvi->sata_reg_set |= bit(i);
+               w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
+               *tfs = i;
+               return 0;
+       } else if (i >= 0) {
+               mvi->sata_reg_set |= bit(i);
+               w_reg_set_enable(i, (u32)mvi->sata_reg_set);
+               *tfs = i;
+               return 0;
+       }
+       return MVS_ID_NOT_MAPPED;
+}
+
+static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+       int i;
+       struct scatterlist *sg;
+       struct mvs_prd *buf_prd = prd;
+       for_each_sg(scatter, sg, nr, i) {
+               buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+               buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
+               buf_prd++;
+       }
+}
+
+static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
+{
+       u32 phy_st;
+       phy_st = mvs_read_phy_ctl(mvi, i);
+       if (phy_st & PHY_READY_MASK)    /* phy ready */
+               return 1;
+       return 0;
+}
+
+static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
+                                       struct sas_identify_frame *id)
+{
+       int i;
+       u32 id_frame[7];
+
+       for (i = 0; i < 7; i++) {
+               mvs_write_port_cfg_addr(mvi, port_id,
+                                       CONFIG_ID_FRAME0 + i * 4);
+               id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+       }
+       memcpy(id, id_frame, 28);
+}
+
+static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
+                                       struct sas_identify_frame *id)
+{
+       int i;
+       u32 id_frame[7];
+
+       /* mvs_hexdump(28, (u8 *)id_frame, 0); */
+       for (i = 0; i < 7; i++) {
+               mvs_write_port_cfg_addr(mvi, port_id,
+                                       CONFIG_ATT_ID_FRAME0 + i * 4);
+               id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+               mv_dprintk("94xx phy %d atta frame %d %x.\n",
+                       port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
+       }
+       /* mvs_hexdump(28, (u8 *)id_frame, 0); */
+       memcpy(id, id_frame, 28);
+}
+
+static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
+{
+       u32 att_dev_info = 0;
+
+       att_dev_info |= id->dev_type;
+       if (id->stp_iport)
+               att_dev_info |= PORT_DEV_STP_INIT;
+       if (id->smp_iport)
+               att_dev_info |= PORT_DEV_SMP_INIT;
+       if (id->ssp_iport)
+               att_dev_info |= PORT_DEV_SSP_INIT;
+       if (id->stp_tport)
+               att_dev_info |= PORT_DEV_STP_TRGT;
+       if (id->smp_tport)
+               att_dev_info |= PORT_DEV_SMP_TRGT;
+       if (id->ssp_tport)
+               att_dev_info |= PORT_DEV_SSP_TRGT;
+
+       att_dev_info |= (u32)id->phy_id<<24;
+       return att_dev_info;
+}
+
+static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
+{
+       return mvs_94xx_make_dev_info(id);
+}
+
+static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
+                               struct sas_identify_frame *id)
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
+       sas_phy->linkrate =
+               (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+                       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+       sas_phy->linkrate += 0x8;
+       mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
+       phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+       phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
+       mvs_94xx_get_dev_identify_frame(mvi, i, id);
+       phy->dev_info = mvs_94xx_make_dev_info(id);
+
+       if (phy->phy_type & PORT_TYPE_SAS) {
+               mvs_94xx_get_att_identify_frame(mvi, i, id);
+               phy->att_dev_info = mvs_94xx_make_att_info(id);
+               phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
+       } else {
+               phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
+       }
+
+}
+
+void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+                       struct sas_phy_linkrates *rates)
+{
+       /* TODO */
+}
+
+#ifdef SUPPORT_TARGET
+static void
+mvs_91xx_enable_ssp_target(struct mvs_info *mvi, u32 phy_id)
+{
+       u32 dev_attr;
+       mvs_write_port_cfg_addr(mvi, phy_id,
+                               CONFIG_ID_FRAME0);
+       dev_attr = mvs_read_port_cfg_data(mvi, phy_id);
+       dev_attr |= 1L<<27;
+       mvs_write_port_cfg_data(mvi, phy_id, dev_attr);
+       mvi->phy[phy_id].dev_info |= PORT_DEV_SSP_TRGT;
+}
+
+static void
+mvs_91xx_disable_ssp_target(struct mvs_info *mvi, u32 phy_id)
+{
+       u32 dev_attr;
+       mvs_write_port_cfg_addr(mvi, phy_id,
+                               CONFIG_ID_FRAME0);
+       dev_attr = mvs_read_port_cfg_data(mvi, phy_id);
+       dev_attr &= ~(1L<<27);
+       mvs_write_port_cfg_data(mvi, phy_id, dev_attr);
+       mvi->phy[phy_id].dev_info &= ~PORT_DEV_SSP_TRGT;
+}
+#endif /* end SUPPORT_TARGET */
+
+static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
+{
+       u32 tmp;
+       void __iomem *regs = mvi->regs;
+       tmp = mr32(MVS_STP_REG_SET_0);
+       mw32(MVS_STP_REG_SET_0, 0);
+       mw32(MVS_STP_REG_SET_0, tmp);
+       tmp = mr32(MVS_STP_REG_SET_1);
+       mw32(MVS_STP_REG_SET_1, 0);
+       mw32(MVS_STP_REG_SET_1, tmp);
+}
+
+
+u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+       return mr32(SPI_RD_DATA_REG_94XX);
+}
+
+void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+        mw32(SPI_RD_DATA_REG_94XX, data);
+}
+
+
+int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
+                               u32      *dwCmd,
+                               u8       cmd,
+                               u8       read,
+                               u8       length,
+                               u32      addr
+                               )
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+       u32  dwTmp;
+
+       dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
+       if (read)
+               dwTmp |= SPI_CTRL_READ_94XX;
+
+       if (addr != MV_MAX_U32) {
+               mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
+               dwTmp |= SPI_ADDR_VLD_94XX;
+       }
+
+       *dwCmd = dwTmp;
+       return 0;
+}
+
+
+int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+       mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
+
+       return 0;
+}
+
+int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+       u32   i, dwTmp;
+
+       for (i = 0; i < timeout; i++) {
+               dwTmp = mr32(SPI_CTRL_REG_94XX);
+               if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
+                       return 0;
+               msleep(10);
+       }
+
+       return -1;
+}
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+{
+       int i;
+       struct mvs_prd *buf_prd = prd;
+       buf_prd += from;
+       for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+               buf_prd->addr = cpu_to_le64(buf_dma);
+               buf_prd->im_len.len = cpu_to_le32(buf_len);
+               ++buf_prd;
+       }
+}
+#endif
+
+const struct mvs_dispatch mvs_94xx_dispatch = {
+       "mv94xx",
+       mvs_94xx_init,
+       NULL,
+       mvs_94xx_ioremap,
+       mvs_94xx_iounmap,
+       mvs_94xx_isr,
+       mvs_94xx_isr_status,
+       mvs_94xx_interrupt_enable,
+       mvs_94xx_interrupt_disable,
+       mvs_read_phy_ctl,
+       mvs_write_phy_ctl,
+       mvs_read_port_cfg_data,
+       mvs_write_port_cfg_data,
+       mvs_write_port_cfg_addr,
+       mvs_read_port_vsr_data,
+       mvs_write_port_vsr_data,
+       mvs_write_port_vsr_addr,
+       mvs_read_port_irq_stat,
+       mvs_write_port_irq_stat,
+       mvs_read_port_irq_mask,
+       mvs_write_port_irq_mask,
+       mvs_get_sas_addr,
+       mvs_94xx_command_active,
+       mvs_94xx_issue_stop,
+       mvs_start_delivery,
+       mvs_rx_update,
+       mvs_int_full,
+       mvs_94xx_assign_reg_set,
+       mvs_94xx_free_reg_set,
+       mvs_get_prd_size,
+       mvs_get_prd_count,
+       mvs_94xx_make_prd,
+       mvs_94xx_detect_porttype,
+       mvs_94xx_oob_done,
+       mvs_94xx_fix_phy_info,
+       NULL,
+       mvs_94xx_phy_set_link_rate,
+       mvs_hw_max_link_rate,
+       mvs_94xx_phy_disable,
+       mvs_94xx_phy_enable,
+       mvs_94xx_phy_reset,
+       NULL,
+#ifdef SUPPORT_TARGET
+       mvs_91xx_enable_ssp_target,
+       mvs_91xx_disable_ssp_target,
+#endif
+       mvs_94xx_clear_active_cmds,
+       mvs_94xx_spi_read_data,
+       mvs_94xx_spi_write_data,
+       mvs_94xx_spi_buildcmd,
+       mvs_94xx_spi_issuecmd,
+       mvs_94xx_spi_waitdataready,
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       mvs_94xx_fix_dma,
+#endif
+};
+
diff --git a/mvsas_tgt/mv_94xx.h b/mvsas_tgt/mv_94xx.h
new file mode 100644 (file)
index 0000000..23ed9b1
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * Marvell 88SE94xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS94XX_REG_H_
+#define _MVS94XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE          SAS_LINK_RATE_6_0_GBPS
+
+enum hw_registers {
+       MVS_GBL_CTL             = 0x04,  /* global control */
+       MVS_GBL_INT_STAT        = 0x00,  /* global irq status */
+       MVS_GBL_PI              = 0x0C,  /* ports implemented bitmask */
+
+       MVS_PHY_CTL             = 0x40,  /* SOC PHY Control */
+       MVS_PORTS_IMP           = 0x9C,  /* SOC Port Implemented */
+
+       MVS_GBL_PORT_TYPE       = 0xa0,  /* port type */
+
+       MVS_CTL                 = 0x100, /* SAS/SATA port configuration */
+       MVS_PCS                 = 0x104, /* SAS/SATA port control/status */
+       MVS_CMD_LIST_LO         = 0x108, /* cmd list addr */
+       MVS_CMD_LIST_HI         = 0x10C,
+       MVS_RX_FIS_LO           = 0x110, /* RX FIS list addr */
+       MVS_RX_FIS_HI           = 0x114,
+       MVS_STP_REG_SET_0       = 0x118, /* STP/SATA Register Set Enable */
+       MVS_STP_REG_SET_1       = 0x11C,
+       MVS_TX_CFG              = 0x120, /* TX configuration */
+       MVS_TX_LO               = 0x124, /* TX (delivery) ring addr */
+       MVS_TX_HI               = 0x128,
+
+       MVS_TX_PROD_IDX         = 0x12C, /* TX producer pointer */
+       MVS_TX_CONS_IDX         = 0x130, /* TX consumer pointer (RO) */
+       MVS_RX_CFG              = 0x134, /* RX configuration */
+       MVS_RX_LO               = 0x138, /* RX (completion) ring addr */
+       MVS_RX_HI               = 0x13C,
+       MVS_RX_CONS_IDX         = 0x140, /* RX consumer pointer (RO) */
+
+       MVS_INT_COAL            = 0x148, /* Int coalescing config */
+       MVS_INT_COAL_TMOUT      = 0x14C, /* Int coalescing timeout */
+       MVS_INT_STAT            = 0x150, /* Central int status */
+       MVS_INT_MASK            = 0x154, /* Central int enable */
+       MVS_INT_STAT_SRS_0      = 0x158, /* SATA register set status */
+       MVS_INT_MASK_SRS_0      = 0x15C,
+       MVS_INT_STAT_SRS_1      = 0x160,
+       MVS_INT_MASK_SRS_1      = 0x164,
+       MVS_NON_NCQ_ERR_0       = 0x168, /* SRS Non-specific NCQ Error */
+       MVS_NON_NCQ_ERR_1       = 0x16C,
+       MVS_CMD_ADDR            = 0x170, /* Command register port (addr) */
+       MVS_CMD_DATA            = 0x174, /* Command register port (data) */
+       MVS_MEM_PARITY_ERR      = 0x178, /* Memory parity error */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_INT_STAT         = 0x180, /* port0 interrupt status */
+       MVS_P0_INT_MASK         = 0x184, /* port0 interrupt mask */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_INT_STAT         = 0x1A0, /* Port4 interrupt status */
+       MVS_P4_INT_MASK         = 0x1A4, /* Port4 interrupt enable mask */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_SER_CTLSTAT      = 0x1D0, /* port0 serial control/status */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_SER_CTLSTAT      = 0x1E0, /* port4 serial control/status */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_CFG_ADDR         = 0x200, /* port0 phy register address */
+       MVS_P0_CFG_DATA         = 0x204, /* port0 phy register data */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_CFG_ADDR         = 0x220, /* Port4 config address */
+       MVS_P4_CFG_DATA         = 0x224, /* Port4 config data */
+
+                                        /* phys 1-3 follow after this */
+       MVS_P0_VSR_ADDR         = 0x250, /* phy0 VSR address */
+       MVS_P0_VSR_DATA         = 0x254, /* phy0 VSR data */
+                                        /* phys 1-3 follow after this */
+                                        /* multiplexing */
+       MVS_P4_VSR_ADDR         = 0x250, /* phy4 VSR address */
+       MVS_P4_VSR_DATA         = 0x254, /* phy4 VSR data */
+       MVS_PA_VSR_ADDR         = 0x290, /* All port VSR addr */
+       MVS_PA_VSR_PORT         = 0x294, /* All port VSR data */
+};
+
+enum pci_cfg_registers {
+       PCR_PHY_CTL             = 0x40,
+       PCR_PHY_CTL2            = 0x90,
+       PCR_DEV_CTRL            = 0x78,
+       PCR_LINK_STAT           = 0x82,
+};
+
+/*  SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+       VSR_PHY_STAT            = 0x00 * 4, /* Phy Status */
+       VSR_PHY_MODE1           = 0x01 * 4, /* phy tx */
+       VSR_PHY_MODE2           = 0x02 * 4, /* tx scc */
+       VSR_PHY_MODE3           = 0x03 * 4, /* pll */
+       VSR_PHY_MODE4           = 0x04 * 4, /* VCO */
+       VSR_PHY_MODE5           = 0x05 * 4, /* Rx */
+       VSR_PHY_MODE6           = 0x06 * 4, /* CDR */
+       VSR_PHY_MODE7           = 0x07 * 4, /* Impedance */
+       VSR_PHY_MODE8           = 0x08 * 4, /* Voltage */
+       VSR_PHY_MODE9           = 0x09 * 4, /* Test */
+       VSR_PHY_MODE10          = 0x0A * 4, /* Power */
+       VSR_PHY_MODE11          = 0x0B * 4, /* Phy Mode */
+       VSR_PHY_VS0             = 0x0C * 4, /* Vednor Specific 0 */
+       VSR_PHY_VS1             = 0x0D * 4, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+       PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+       PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
+       PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+                       (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+enum pci_interrupt_cause {
+       /*  MAIN_IRQ_CAUSE (R10200) Bits*/
+       IRQ_COM_IN_I2O_IOP0            = (1 << 0),
+       IRQ_COM_IN_I2O_IOP1            = (1 << 1),
+       IRQ_COM_IN_I2O_IOP2            = (1 << 2),
+       IRQ_COM_IN_I2O_IOP3            = (1 << 3),
+       IRQ_COM_OUT_I2O_HOS0           = (1 << 4),
+       IRQ_COM_OUT_I2O_HOS1           = (1 << 5),
+       IRQ_COM_OUT_I2O_HOS2           = (1 << 6),
+       IRQ_COM_OUT_I2O_HOS3           = (1 << 7),
+       IRQ_PCIF_TO_CPU_DRBL0          = (1 << 8),
+       IRQ_PCIF_TO_CPU_DRBL1          = (1 << 9),
+       IRQ_PCIF_TO_CPU_DRBL2          = (1 << 10),
+       IRQ_PCIF_TO_CPU_DRBL3          = (1 << 11),
+       IRQ_PCIF_DRBL0                 = (1 << 12),
+       IRQ_PCIF_DRBL1                 = (1 << 13),
+       IRQ_PCIF_DRBL2                 = (1 << 14),
+       IRQ_PCIF_DRBL3                 = (1 << 15),
+       IRQ_XOR_A                      = (1 << 16),
+       IRQ_XOR_B                      = (1 << 17),
+       IRQ_SAS_A                      = (1 << 18),
+       IRQ_SAS_B                      = (1 << 19),
+       IRQ_CPU_CNTRL                  = (1 << 20),
+       IRQ_GPIO                       = (1 << 21),
+       IRQ_UART                       = (1 << 22),
+       IRQ_SPI                        = (1 << 23),
+       IRQ_I2C                        = (1 << 24),
+       IRQ_SGPIO                      = (1 << 25),
+       IRQ_COM_ERR                    = (1 << 29),
+       IRQ_I2O_ERR                    = (1 << 30),
+       IRQ_PCIE_ERR                   = (1 << 31),
+};
+
+#define MAX_SG_ENTRY           255
+
+struct mvs_prd_imt {
+       __le32                  len:22;
+       u8                      _r_a:2;
+       u8                      misc_ctl:4;
+       u8                      inter_sel:4;
+};
+
+struct mvs_prd {
+       /* 64-bit buffer address */
+       __le64                  addr;
+       /* 22-bit length */
+       struct mvs_prd_imt      im_len;
+} __attribute__ ((packed));
+
+#define SPI_CTRL_REG_94XX              0xc800
+#define SPI_ADDR_REG_94XX              0xc804
+#define SPI_WR_DATA_REG_94XX         0xc808
+#define SPI_RD_DATA_REG_94XX           0xc80c
+#define SPI_CTRL_READ_94XX             (1U << 2)
+#define SPI_ADDR_VLD_94XX              (1U << 1)
+#define SPI_CTRL_SpiStart_94XX         (1U << 0)
+
+#define mv_ffc(x)   ffz(x)
+
+static inline int
+mv_ffc64(u64 v)
+{
+       int i;
+       i = mv_ffc((u32)v);
+       if (i >= 0)
+               return i;
+       i = mv_ffc((u32)(v>>32));
+
+       if (i != 0)
+               return 32 + i;
+
+       return -1;
+}
+
+#define r_reg_set_enable(i) \
+       (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
+       mr32(MVS_STP_REG_SET_0))
+
+#define w_reg_set_enable(i, tmp) \
+       (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
+       mw32(MVS_STP_REG_SET_0, tmp))
+
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+#endif
+
diff --git a/mvsas_tgt/mv_chips.h b/mvsas_tgt/mv_chips.h
new file mode 100644 (file)
index 0000000..a67e1c4
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Marvell 88SE64xx/88SE94xx register IO interface
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#ifndef _MV_CHIPS_H_
+#define _MV_CHIPS_H_
+
+#define mr32(reg)      readl(regs + reg)
+#define mw32(reg, val) writel((val), regs + reg)
+#define mw32_f(reg, val)       do {                    \
+                               mw32(reg, val); \
+                               mr32(reg);      \
+                       } while (0)
+
+#define iow32(reg, val)        outl(val, (unsigned long)(regs + reg))
+#define ior32(reg)             inl((unsigned long)(regs + reg))
+#define iow16(reg, val)        outw((unsigned long)(val, regs + reg))
+#define ior16(reg)             inw((unsigned long)(regs + reg))
+#define iow8(reg, val)                 outb((unsigned long)(val, regs + reg))
+#define ior8(reg)              inb((unsigned long)(regs + reg))
+
+static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
+{
+       void __iomem *regs = mvi->regs;
+       mw32(MVS_CMD_ADDR, addr);
+       return mr32(MVS_CMD_DATA);
+}
+
+static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
+{
+       void __iomem *regs = mvi->regs;
+       mw32(MVS_CMD_ADDR, addr);
+       mw32(MVS_CMD_DATA, val);
+}
+
+static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
+{
+       void __iomem *regs = mvi->regs;
+       return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
+               mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
+}
+
+static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
+{
+       void __iomem *regs = mvi->regs;
+       if (port < 4)
+               mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
+       else
+               mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
+}
+
+static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
+                               u32 off2, u32 port)
+{
+       void __iomem *regs = mvi->regs + off;
+       void __iomem *regs2 = mvi->regs + off2;
+       return (port < 4) ? readl(regs + port * 8) :
+               readl(regs2 + (port - 4) * 8);
+}
+
+static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
+                               u32 port, u32 val)
+{
+       void __iomem *regs = mvi->regs + off;
+       void __iomem *regs2 = mvi->regs + off2;
+       if (port < 4)
+               writel(val, regs + port * 8);
+       else
+               writel(val, regs2 + (port - 4) * 8);
+}
+
+static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
+{
+       return mvs_read_port(mvi, MVS_P0_CFG_DATA,
+                       MVS_P4_CFG_DATA, port);
+}
+
+static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
+                                               u32 port, u32 val)
+{
+       mvs_write_port(mvi, MVS_P0_CFG_DATA,
+                       MVS_P4_CFG_DATA, port, val);
+}
+
+static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
+                                               u32 port, u32 addr)
+{
+       mvs_write_port(mvi, MVS_P0_CFG_ADDR,
+                       MVS_P4_CFG_ADDR, port, addr);
+       mdelay(10);
+}
+
+static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
+{
+       return mvs_read_port(mvi, MVS_P0_VSR_DATA,
+                       MVS_P4_VSR_DATA, port);
+}
+
+static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
+                                               u32 port, u32 val)
+{
+       mvs_write_port(mvi, MVS_P0_VSR_DATA,
+                       MVS_P4_VSR_DATA, port, val);
+}
+
+static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
+                                               u32 port, u32 addr)
+{
+       mvs_write_port(mvi, MVS_P0_VSR_ADDR,
+                       MVS_P4_VSR_ADDR, port, addr);
+       mdelay(10);
+}
+
+static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
+{
+       return mvs_read_port(mvi, MVS_P0_INT_STAT,
+                       MVS_P4_INT_STAT, port);
+}
+
+static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
+                                               u32 port, u32 val)
+{
+       mvs_write_port(mvi, MVS_P0_INT_STAT,
+                       MVS_P4_INT_STAT, port, val);
+}
+
+static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
+{
+       return mvs_read_port(mvi, MVS_P0_INT_MASK,
+                       MVS_P4_INT_MASK, port);
+
+}
+
+static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
+                                               u32 port, u32 val)
+{
+       mvs_write_port(mvi, MVS_P0_INT_MASK,
+                       MVS_P4_INT_MASK, port, val);
+}
+
+static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
+{
+       u32 tmp;
+
+       /* workaround for SATA R-ERR, to ignore phy glitch */
+       tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+       tmp &= ~(1 << 9);
+       tmp |= (1 << 10);
+       mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+
+       /* enable retry 127 times */
+       mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
+
+       /* extend open frame timeout to max */
+       tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
+       tmp &= ~0xffff;
+       tmp |= 0x3fff;
+       mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
+
+       /* workaround for WDTIMEOUT , set to 550 ms */
+       mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
+
+       /* not to halt for different port op during wideport link change */
+       mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
+
+       /* workaround for Seagate disk not-found OOB sequence, recv
+        * COMINIT before sending out COMWAKE */
+       tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
+       tmp &= 0x0000ffff;
+       tmp |= 0x00fa0000;
+       mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
+
+       tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+       tmp &= 0x1fffffff;
+       tmp |= (2U << 29);      /* 8 ms retry */
+       mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+}
+
+static inline void mvs_int_sata(struct mvs_info *mvi)
+{
+       u32 tmp;
+       void __iomem *regs = mvi->regs;
+       tmp = mr32(MVS_INT_STAT_SRS_0);
+       if (tmp)
+               mw32(MVS_INT_STAT_SRS_0, tmp);
+       MVS_CHIP_DISP->clear_active_cmds(mvi);
+}
+
+static inline void mvs_int_full(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp, stat;
+       int i;
+
+       stat = mr32(MVS_INT_STAT);
+       mvs_int_rx(mvi, false);
+
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
+               if (tmp)
+                       mvs_int_port(mvi, i, tmp);
+       }
+
+       if (stat & CINT_SRS)
+               mvs_int_sata(mvi);
+
+       mw32(MVS_INT_STAT, stat);
+}
+
+static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
+{
+       void __iomem *regs = mvi->regs;
+       mw32(MVS_TX_PROD_IDX, tx);
+}
+
+static inline u32 mvs_rx_update(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       return mr32(MVS_RX_CONS_IDX);
+}
+
+static inline u32 mvs_get_prd_size(void)
+{
+       return sizeof(struct mvs_prd);
+}
+
+static inline u32 mvs_get_prd_count(void)
+{
+       return MAX_SG_ENTRY;
+}
+
+static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
+{
+       u16 link_stat, link_spd;
+       const char *spd[] = {
+               "UnKnown",
+               "2.5",
+               "5.0",
+       };
+       if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
+               return;
+
+       pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
+       link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
+       if (link_spd >= 3)
+               link_spd = 0;
+       dev_printk(KERN_INFO, mvi->dev,
+               "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
+              (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
+              spd[link_spd]);
+}
+
+static inline u32 mvs_hw_max_link_rate(void)
+{
+       return MAX_LINK_RATE;
+}
+
+#endif  /* _MV_CHIPS_H_ */
+
diff --git a/mvsas_tgt/mv_defs.h b/mvsas_tgt/mv_defs.h
new file mode 100644 (file)
index 0000000..6f8e908
--- /dev/null
@@ -0,0 +1,514 @@
+/*
+ * Marvell 88SE64xx/88SE94xx const head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_DEFS_H_
+#define _MV_DEFS_H_
+
+
+enum chip_flavors {
+       chip_6320,
+       chip_6440,
+       chip_6485,
+       chip_9480,
+       chip_9180,
+};
+
+/* driver compile-time configuration */
+enum driver_configuration {
+       MVS_SLOTS               = 512,  /* command slots */
+       MVS_TX_RING_SZ          = 1024, /* TX ring size (12-bit) */
+       MVS_RX_RING_SZ          = 1024, /* RX ring size (12-bit) */
+                                       /* software requires power-of-2
+                                          ring size */
+       MVS_SOC_SLOTS           = 64,
+       MVS_SOC_TX_RING_SZ      = MVS_SOC_SLOTS * 2,
+       MVS_SOC_RX_RING_SZ      = MVS_SOC_SLOTS * 2,
+
+       MVS_SLOT_BUF_SZ         = 8192, /* cmd tbl + IU + status + PRD */
+       MVS_SSP_CMD_SZ          = 64,   /* SSP command table buffer size */
+       MVS_ATA_CMD_SZ          = 96,   /* SATA command table buffer size */
+       MVS_OAF_SZ              = 64,   /* Open address frame buffer size */
+#ifdef SUPPORT_TARGET
+       MVS_TARGET_QUEUE        = 32,
+#endif
+       MVS_QUEUE_SIZE  = 32,   /* Support Queue depth */
+       MVS_CAN_QUEUE           = MVS_SLOTS - 2,        /* SCSI Queue depth */
+       MVS_SOC_CAN_QUEUE       = MVS_SOC_SLOTS - 2,
+#ifdef SUPPORT_TARGET
+       MVS_MAX_STP_FRAME               = 0x10,
+       MVS_MAX_SSP_FRAME               = 0x4D,
+       MVS_MAX_SMP_FRAME               = 0x101,
+#endif
+};
+
+/* unchangeable hardware details */
+enum hardware_details {
+       MVS_MAX_PHYS            = 8,    /* max. possible phys */
+       MVS_MAX_PORTS           = 8,    /* max. possible ports */
+       MVS_SOC_PHYS            = 4,    /* soc phys */
+       MVS_SOC_PORTS           = 4,    /* soc phys */
+       MVS_MAX_DEVICES = 1024, /* max supported device */
+};
+
+/* peripheral registers (BAR2) */
+enum peripheral_registers {
+       SPI_CTL                 = 0x10, /* EEPROM control */
+       SPI_CMD                 = 0x14, /* EEPROM command */
+       SPI_DATA                = 0x18, /* EEPROM data */
+};
+
+enum peripheral_register_bits {
+       TWSI_RDY                = (1U << 7),    /* EEPROM interface ready */
+       TWSI_RD                 = (1U << 4),    /* EEPROM read access */
+
+       SPI_ADDR_MASK           = 0x3ffff,      /* bits 17:0 */
+};
+
+enum hw_register_bits {
+       /* MVS_GBL_CTL */
+       INT_EN                  = (1U << 1),    /* Global int enable */
+       HBA_RST                 = (1U << 0),    /* HBA reset */
+
+       /* MVS_GBL_INT_STAT */
+       INT_XOR                 = (1U << 4),    /* XOR engine event */
+       INT_SAS_SATA            = (1U << 0),    /* SAS/SATA event */
+
+       /* MVS_GBL_PORT_TYPE */                 /* shl for ports 1-3 */
+       SATA_TARGET             = (1U << 16),   /* port0 SATA target enable */
+       MODE_AUTO_DET_PORT7 = (1U << 15),       /* port0 SAS/SATA autodetect */
+       MODE_AUTO_DET_PORT6 = (1U << 14),
+       MODE_AUTO_DET_PORT5 = (1U << 13),
+       MODE_AUTO_DET_PORT4 = (1U << 12),
+       MODE_AUTO_DET_PORT3 = (1U << 11),
+       MODE_AUTO_DET_PORT2 = (1U << 10),
+       MODE_AUTO_DET_PORT1 = (1U << 9),
+       MODE_AUTO_DET_PORT0 = (1U << 8),
+       MODE_AUTO_DET_EN    =   MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
+                               MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
+                               MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
+                               MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
+       MODE_SAS_PORT7_MASK = (1U << 7),  /* port0 SAS(1), SATA(0) mode */
+       MODE_SAS_PORT6_MASK = (1U << 6),
+       MODE_SAS_PORT5_MASK = (1U << 5),
+       MODE_SAS_PORT4_MASK = (1U << 4),
+       MODE_SAS_PORT3_MASK = (1U << 3),
+       MODE_SAS_PORT2_MASK = (1U << 2),
+       MODE_SAS_PORT1_MASK = (1U << 1),
+       MODE_SAS_PORT0_MASK = (1U << 0),
+       MODE_SAS_SATA   =       MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
+                               MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
+                               MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
+                               MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
+
+                               /* SAS_MODE value may be
+                                * dictated (in hw) by values
+                                * of SATA_TARGET & AUTO_DET
+                                */
+
+       /* MVS_TX_CFG */
+       TX_EN                   = (1U << 16),   /* Enable TX */
+       TX_RING_SZ_MASK         = 0xfff,        /* TX ring size, bits 11:0 */
+
+       /* MVS_RX_CFG */
+       RX_EN                   = (1U << 16),   /* Enable RX */
+       RX_RING_SZ_MASK         = 0xfff,        /* RX ring size, bits 11:0 */
+
+       /* MVS_INT_COAL */
+       COAL_EN                 = (1U << 16),   /* Enable int coalescing */
+
+       /* MVS_INT_STAT, MVS_INT_MASK */
+       CINT_I2C                = (1U << 31),   /* I2C event */
+       CINT_SW0                = (1U << 30),   /* software event 0 */
+       CINT_SW1                = (1U << 29),   /* software event 1 */
+       CINT_PRD_BC             = (1U << 28),   /* PRD BC err for read cmd */
+       CINT_DMA_PCIE           = (1U << 27),   /* DMA to PCIE timeout */
+       CINT_MEM                = (1U << 26),   /* int mem parity err */
+       CINT_I2C_SLAVE          = (1U << 25),   /* slave I2C event */
+       CINT_SRS                = (1U << 3),    /* SRS event */
+       CINT_CI_STOP            = (1U << 1),    /* cmd issue stopped */
+       CINT_DONE               = (1U << 0),    /* cmd completion */
+
+                                               /* shl for ports 1-3 */
+       CINT_PORT_STOPPED       = (1U << 16),   /* port0 stopped */
+       CINT_PORT               = (1U << 8),    /* port0 event */
+       CINT_PORT_MASK_OFFSET   = 8,
+       CINT_PORT_MASK          = (0xFF << CINT_PORT_MASK_OFFSET),
+       CINT_PHY_MASK_OFFSET    = 4,
+       CINT_PHY_MASK           = (0x0F << CINT_PHY_MASK_OFFSET),
+
+       /* TX (delivery) ring bits */
+       TXQ_CMD_SHIFT           = 29,
+       TXQ_CMD_SSP             = 1,            /* SSP protocol */
+       TXQ_CMD_SMP             = 2,            /* SMP protocol */
+       TXQ_CMD_STP             = 3,            /* STP/SATA protocol */
+       TXQ_CMD_SSP_FREE_LIST   = 4,            /* add to SSP targ free list */
+       TXQ_CMD_SLOT_RESET      = 7,            /* reset command slot */
+       TXQ_MODE_I              = (1U << 28),   /* mode: 0=target,1=initiator */
+       TXQ_MODE_TARGET         = 0,
+       TXQ_MODE_INITIATOR      = 1,
+       TXQ_PRIO_HI             = (1U << 27),   /* priority: 0=normal, 1=high */
+       TXQ_PRI_NORMAL          = 0,
+       TXQ_PRI_HIGH            = 1,
+       TXQ_SRS_SHIFT           = 20,           /* SATA register set */
+       TXQ_SRS_MASK            = 0x7f,
+       TXQ_PHY_SHIFT           = 12,           /* PHY bitmap */
+       TXQ_PHY_MASK            = 0xff,
+       TXQ_SLOT_MASK           = 0xfff,        /* slot number */
+
+       /* RX (completion) ring bits */
+       RXQ_GOOD                = (1U << 23),   /* Response good */
+       RXQ_SLOT_RESET          = (1U << 21),   /* Slot reset complete */
+       RXQ_CMD_RX              = (1U << 20),   /* target cmd received */
+       RXQ_ATTN                = (1U << 19),   /* attention */
+       RXQ_RSP                 = (1U << 18),   /* response frame xfer'd */
+       RXQ_ERR                 = (1U << 17),   /* err info rec xfer'd */
+       RXQ_DONE                = (1U << 16),   /* cmd complete */
+       RXQ_SLOT_MASK           = 0xfff,        /* slot number */
+
+       /* mvs_cmd_hdr bits */
+       MCH_PRD_LEN_SHIFT       = 16,           /* 16-bit PRD table len */
+       MCH_SSP_FR_TYPE_SHIFT   = 13,           /* SSP frame type */
+
+                                               /* SSP initiator only */
+       MCH_SSP_FR_CMD          = 0x0,          /* COMMAND frame */
+
+                                               /* SSP initiator or target */
+       MCH_SSP_FR_TASK         = 0x1,          /* TASK frame */
+
+                                               /* SSP target only */
+       MCH_SSP_FR_XFER_RDY     = 0x4,          /* XFER_RDY frame */
+       MCH_SSP_FR_RESP         = 0x5,          /* RESPONSE frame */
+       MCH_SSP_FR_READ         = 0x6,          /* Read DATA frame(s) */
+       MCH_SSP_FR_READ_RESP    = 0x7,          /* ditto, plus RESPONSE */
+
+       MCH_SSP_MODE_PASSTHRU   = 1,
+       MCH_SSP_MODE_NORMAL     = 0,
+       MCH_PASSTHRU            = (1U << 12),   /* pass-through (SSP) */
+       MCH_FBURST              = (1U << 11),   /* first burst (SSP) */
+       MCH_CHK_LEN             = (1U << 10),   /* chk xfer len (SSP) */
+       MCH_RETRY               = (1U << 9),    /* tport layer retry (SSP) */
+       MCH_PROTECTION          = (1U << 8),    /* protection info rec (SSP) */
+       MCH_RESET               = (1U << 7),    /* Reset (STP/SATA) */
+       MCH_FPDMA               = (1U << 6),    /* First party DMA (STP/SATA) */
+       MCH_ATAPI               = (1U << 5),    /* ATAPI (STP/SATA) */
+       MCH_BIST                = (1U << 4),    /* BIST activate (STP/SATA) */
+       MCH_PMP_MASK            = 0xf,          /* PMP from cmd FIS (STP/SATA)*/
+
+       CCTL_RST                = (1U << 5),    /* port logic reset */
+
+                                               /* 0(LSB first), 1(MSB first) */
+       CCTL_ENDIAN_DATA        = (1U << 3),    /* PRD data */
+       CCTL_ENDIAN_RSP         = (1U << 2),    /* response frame */
+       CCTL_ENDIAN_OPEN        = (1U << 1),    /* open address frame */
+       CCTL_ENDIAN_CMD         = (1U << 0),    /* command table */
+
+       /* MVS_Px_SER_CTLSTAT (per-phy control) */
+       PHY_SSP_RST             = (1U << 3),    /* reset SSP link layer */
+       PHY_BCAST_CHG           = (1U << 2),    /* broadcast(change) notif */
+       PHY_RST_HARD            = (1U << 1),    /* hard reset + phy reset */
+       PHY_RST                 = (1U << 0),    /* phy reset */
+       PHY_READY_MASK          = (1U << 20),
+
+       /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
+       PHYEV_DEC_ERR           = (1U << 24),   /* Phy Decoding Error */
+       PHYEV_DCDR_ERR          = (1U << 23),   /* STP Deocder Error */
+       PHYEV_CRC_ERR           = (1U << 22),   /* STP CRC Error */
+       PHYEV_UNASSOC_FIS       = (1U << 19),   /* unassociated FIS rx'd */
+       PHYEV_AN                = (1U << 18),   /* SATA async notification */
+       PHYEV_BIST_ACT          = (1U << 17),   /* BIST activate FIS */
+       PHYEV_SIG_FIS           = (1U << 16),   /* signature FIS */
+       PHYEV_POOF              = (1U << 12),   /* phy ready from 1 -> 0 */
+       PHYEV_IU_BIG            = (1U << 11),   /* IU too long err */
+       PHYEV_IU_SMALL          = (1U << 10),   /* IU too short err */
+       PHYEV_UNK_TAG           = (1U << 9),    /* unknown tag */
+       PHYEV_BROAD_CH          = (1U << 8),    /* broadcast(CHANGE) */
+       PHYEV_COMWAKE           = (1U << 7),    /* COMWAKE rx'd */
+       PHYEV_PORT_SEL          = (1U << 6),    /* port selector present */
+       PHYEV_HARD_RST          = (1U << 5),    /* hard reset rx'd */
+       PHYEV_ID_TMOUT          = (1U << 4),    /* identify timeout */
+       PHYEV_ID_FAIL           = (1U << 3),    /* identify failed */
+       PHYEV_ID_DONE           = (1U << 2),    /* identify done */
+       PHYEV_HARD_RST_DONE     = (1U << 1),    /* hard reset done */
+       PHYEV_RDY_CH            = (1U << 0),    /* phy ready changed state */
+
+       /* MVS_PCS */
+       PCS_EN_SATA_REG_SHIFT   = (16),         /* Enable SATA Register Set */
+       PCS_EN_PORT_XMT_SHIFT   = (12),         /* Enable Port Transmit */
+       PCS_EN_PORT_XMT_SHIFT2  = (8),          /* For 6485 */
+       PCS_SATA_RETRY          = (1U << 8),    /* retry ctl FIS on R_ERR */
+       PCS_RSP_RX_EN           = (1U << 7),    /* raw response rx */
+       PCS_SATA_RETRY_2        = (1U << 6),    /* For 9180 */
+       PCS_SELF_CLEAR          = (1U << 5),    /* self-clearing int mode */
+       PCS_FIS_RX_EN           = (1U << 4),    /* FIS rx enable */
+       PCS_CMD_STOP_ERR        = (1U << 3),    /* cmd stop-on-err enable */
+       PCS_CMD_RST             = (1U << 1),    /* reset cmd issue */
+       PCS_CMD_EN              = (1U << 0),    /* enable cmd issue */
+
+       /* Port n Attached Device Info */
+       PORT_DEV_SSP_TRGT       = (1U << 19),
+       PORT_DEV_SMP_TRGT       = (1U << 18),
+       PORT_DEV_STP_TRGT       = (1U << 17),
+       PORT_DEV_SSP_INIT       = (1U << 11),
+       PORT_DEV_SMP_INIT       = (1U << 10),
+       PORT_DEV_STP_INIT       = (1U << 9),
+       PORT_PHY_ID_MASK        = (0xFFU << 24),
+       PORT_SSP_TRGT_MASK      = (0x1U << 19),
+       PORT_SSP_INIT_MASK      = (0x1U << 11),
+       PORT_DEV_TRGT_MASK      = (0x7U << 17),
+       PORT_DEV_INIT_MASK      = (0x7U << 9),
+       PORT_DEV_TYPE_MASK      = (0x7U << 0),
+
+       /* Port n PHY Status */
+       PHY_RDY                 = (1U << 2),
+       PHY_DW_SYNC             = (1U << 1),
+       PHY_OOB_DTCTD           = (1U << 0),
+
+       /* VSR */
+       /* PHYMODE 6 (CDB) */
+       PHY_MODE6_LATECLK       = (1U << 29),   /* Lock Clock */
+       PHY_MODE6_DTL_SPEED     = (1U << 27),   /* Digital Loop Speed */
+       PHY_MODE6_FC_ORDER      = (1U << 26),   /* Fibre Channel Mode Order*/
+       PHY_MODE6_MUCNT_EN      = (1U << 24),   /* u Count Enable */
+       PHY_MODE6_SEL_MUCNT_LEN = (1U << 22),   /* Training Length Select */
+       PHY_MODE6_SELMUPI       = (1U << 20),   /* Phase Multi Select (init) */
+       PHY_MODE6_SELMUPF       = (1U << 18),   /* Phase Multi Select (final) */
+       PHY_MODE6_SELMUFF       = (1U << 16),   /* Freq Loop Multi Sel(final) */
+       PHY_MODE6_SELMUFI       = (1U << 14),   /* Freq Loop Multi Sel(init) */
+       PHY_MODE6_FREEZE_LOOP   = (1U << 12),   /* Freeze Rx CDR Loop */
+       PHY_MODE6_INT_RXFOFFS   = (1U << 3),    /* Rx CDR Freq Loop Enable */
+       PHY_MODE6_FRC_RXFOFFS   = (1U << 2),    /* Initial Rx CDR Offset */
+       PHY_MODE6_STAU_0D8      = (1U << 1),    /* Rx CDR Freq Loop Saturate */
+       PHY_MODE6_RXSAT_DIS     = (1U << 0),    /* Saturate Ctl */
+};
+
+/* SAS/SATA configuration port registers, aka phy registers */
+enum sas_sata_config_port_regs {
+       PHYR_IDENTIFY           = 0x00, /* info for IDENTIFY frame */
+       PHYR_ADDR_LO            = 0x04, /* my SAS address (low) */
+       PHYR_ADDR_HI            = 0x08, /* my SAS address (high) */
+       PHYR_ATT_DEV_INFO       = 0x0C, /* attached device info */
+       PHYR_ATT_ADDR_LO        = 0x10, /* attached dev SAS addr (low) */
+       PHYR_ATT_ADDR_HI        = 0x14, /* attached dev SAS addr (high) */
+       PHYR_SATA_CTL           = 0x18, /* SATA control */
+       PHYR_PHY_STAT           = 0x1C, /* PHY status */
+       PHYR_SATA_SIG0  = 0x20, /*port SATA signature FIS(Byte 0-3) */
+       PHYR_SATA_SIG1  = 0x24, /*port SATA signature FIS(Byte 4-7) */
+       PHYR_SATA_SIG2  = 0x28, /*port SATA signature FIS(Byte 8-11) */
+       PHYR_SATA_SIG3  = 0x2c, /*port SATA signature FIS(Byte 12-15) */
+       PHYR_R_ERR_COUNT        = 0x30, /* port R_ERR count register */
+       PHYR_CRC_ERR_COUNT      = 0x34, /* port CRC error count register */
+       PHYR_WIDE_PORT  = 0x38, /* wide port participating */
+       PHYR_CURRENT0           = 0x80, /* current connection info 0 */
+       PHYR_CURRENT1           = 0x84, /* current connection info 1 */
+       PHYR_CURRENT2           = 0x88, /* current connection info 2 */
+       CONFIG_ID_FRAME0       = 0x100, /* Port device ID frame register 0 */
+       CONFIG_ID_FRAME1       = 0x104, /* Port device ID frame register 1 */
+       CONFIG_ID_FRAME2       = 0x108, /* Port device ID frame register 2 */
+       CONFIG_ID_FRAME3       = 0x10c, /* Port device ID frame register 3 */
+       CONFIG_ID_FRAME4       = 0x110, /* Port device ID frame register 4 */
+       CONFIG_ID_FRAME5       = 0x114, /* Port device ID frame register 5 */
+       CONFIG_ID_FRAME6       = 0x118, /* Port device ID frame register 6 */
+       CONFIG_ATT_ID_FRAME0   = 0x11c, /* attached ID frame register 0 */
+       CONFIG_ATT_ID_FRAME1   = 0x120, /* attached ID frame register 1 */
+       CONFIG_ATT_ID_FRAME2   = 0x124, /* attached ID frame register 2 */
+       CONFIG_ATT_ID_FRAME3   = 0x128, /* attached ID frame register 3 */
+       CONFIG_ATT_ID_FRAME4   = 0x12c, /* attached ID frame register 4 */
+       CONFIG_ATT_ID_FRAME5   = 0x130, /* attached ID frame register 5 */
+       CONFIG_ATT_ID_FRAME6   = 0x134, /* attached ID frame register 6 */
+};
+
+enum sas_cmd_port_registers {
+       CMD_CMRST_OOB_DET       = 0x100, /* COMRESET OOB detect register */
+       CMD_CMWK_OOB_DET        = 0x104, /* COMWAKE OOB detect register */
+       CMD_CMSAS_OOB_DET       = 0x108, /* COMSAS OOB detect register */
+       CMD_BRST_OOB_DET        = 0x10c, /* burst OOB detect register */
+       CMD_OOB_SPACE   = 0x110, /* OOB space control register */
+       CMD_OOB_BURST   = 0x114, /* OOB burst control register */
+       CMD_PHY_TIMER           = 0x118, /* PHY timer control register */
+       CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
+       CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
+       CMD_SAS_CTL0            = 0x124, /* SAS control register 0 */
+       CMD_SAS_CTL1            = 0x128, /* SAS control register 1 */
+       CMD_SAS_CTL2            = 0x12c, /* SAS control register 2 */
+       CMD_SAS_CTL3            = 0x130, /* SAS control register 3 */
+       CMD_ID_TEST             = 0x134, /* ID test register */
+       CMD_PL_TIMER            = 0x138, /* PL timer register */
+       CMD_WD_TIMER            = 0x13c, /* WD timer register */
+       CMD_PORT_SEL_COUNT      = 0x140, /* port selector count register */
+       CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
+       CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
+       CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
+       CMD_PORT_MEM_CTL0       = 0x150, /* Port Memory Control 0 */
+       CMD_PORT_MEM_CTL1       = 0x154, /* Port Memory Control 1 */
+       CMD_SATA_PORT_MEM_CTL0  = 0x158, /* SATA Port Memory Control 0 */
+       CMD_SATA_PORT_MEM_CTL1  = 0x15c, /* SATA Port Memory Control 1 */
+       CMD_XOR_MEM_BIST_CTL    = 0x160, /* XOR Memory BIST Control */
+       CMD_XOR_MEM_BIST_STAT   = 0x164, /* XOR Memroy BIST Status */
+       CMD_DMA_MEM_BIST_CTL    = 0x168, /* DMA Memory BIST Control */
+       CMD_DMA_MEM_BIST_STAT   = 0x16c, /* DMA Memory BIST Status */
+       CMD_PORT_MEM_BIST_CTL   = 0x170, /* Port Memory BIST Control */
+       CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
+       CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
+       CMD_STP_MEM_BIST_CTL    = 0x17c, /* STP Memory BIST Control */
+       CMD_STP_MEM_BIST_STAT0  = 0x180, /* STP Memory BIST Status 0 */
+       CMD_STP_MEM_BIST_STAT1  = 0x184, /* STP Memory BIST Status 1 */
+       CMD_RESET_COUNT         = 0x188, /* Reset Count */
+       CMD_MONTR_DATA_SEL      = 0x18C, /* Monitor Data/Select */
+       CMD_PLL_PHY_CONFIG      = 0x190, /* PLL/PHY Configuration */
+       CMD_PHY_CTL             = 0x194, /* PHY Control and Status */
+       CMD_PHY_TEST_COUNT0     = 0x198, /* Phy Test Count 0 */
+       CMD_PHY_TEST_COUNT1     = 0x19C, /* Phy Test Count 1 */
+       CMD_PHY_TEST_COUNT2     = 0x1A0, /* Phy Test Count 2 */
+       CMD_APP_ERR_CONFIG      = 0x1A4, /* Application Error Configuration */
+       CMD_PND_FIFO_CTL0       = 0x1A8, /* Pending FIFO Control 0 */
+       CMD_HOST_CTL            = 0x1AC, /* Host Control Status */
+       CMD_HOST_WR_DATA        = 0x1B0, /* Host Write Data */
+       CMD_HOST_RD_DATA        = 0x1B4, /* Host Read Data */
+       CMD_PHY_MODE_21         = 0x1B8, /* Phy Mode 21 */
+       CMD_SL_MODE0            = 0x1BC, /* SL Mode 0 */
+       CMD_SL_MODE1            = 0x1C0, /* SL Mode 1 */
+       CMD_PND_FIFO_CTL1       = 0x1C4, /* Pending FIFO Control 1 */
+};
+
+enum mvs_info_flags {
+       MVF_MSI         = (1U << 0),    /* MSI is enabled */
+       MVF_PHY_PWR_FIX = (1U << 1),    /* bug workaround */
+       MVF_FLAG_SOC            = (1U << 2),    /* SoC integrated controllers */
+#ifdef SUPPORT_TARGET
+       MVF_TARGET_MODE_ENABLE  = (1U << 3),    /* Target Mode Enable */
+       MVF_HOST_SHUTTING_DOWN  = (1U << 4),    /* Shutting down HBA */
+#endif
+};
+
+enum mvs_event_flags {
+       PHY_PLUG_EVENT  = (3U),
+       PHY_PLUG_IN             = (1U << 0),    /* phy plug in */
+       PHY_PLUG_OUT            = (1U << 1),    /* phy plug out */
+};
+
+enum mvs_port_type {
+       PORT_TGT_MASK   =  (1U << 5),
+       PORT_INIT_PORT  =  (1U << 4),
+       PORT_TGT_PORT   =  (1U << 3),
+       PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
+       PORT_TYPE_SAS   =  (1U << 1),
+       PORT_TYPE_SATA  =  (1U << 0),
+};
+
+/* Command Table Format */
+enum ct_format {
+       /* SSP */
+       SSP_F_H         =  0x00,
+       SSP_F_IU        =  0x18,
+       SSP_F_MAX       =  0x4D,
+       /* STP */
+       STP_CMD_FIS     =  0x00,
+       STP_ATAPI_CMD   =  0x40,
+       STP_F_MAX       =  0x10,
+       /* SMP */
+       SMP_F_T         =  0x00,
+       SMP_F_DEP       =  0x01,
+       SMP_F_MAX       =  0x101,
+};
+
+enum status_buffer {
+       SB_EIR_OFF      =  0x00,        /* Error Information Record */
+       SB_RFB_OFF      =  0x08,        /* Response Frame Buffer */
+       SB_RFB_MAX      =  0x400,       /* RFB size*/
+};
+
+enum error_info_rec {
+       CMD_ISS_STPD    = (1U << 31),   /* Cmd Issue Stopped */
+       CMD_PI_ERR      = (1U << 30),   /* Protection info error.  see flags2 */
+       RSP_OVER        = (1U << 29),   /* rsp buffer overflow */
+       RETRY_LIM       = (1U << 28),   /* FIS/frame retry limit exceeded */
+       UNK_FIS         = (1U << 27),   /* unknown FIS */
+       DMA_TERM        = (1U << 26),   /* DMA terminate primitive rx'd */
+       SYNC_ERR        = (1U << 25),   /* SYNC rx'd during frame xmit */
+       TFILE_ERR       = (1U << 24),   /* SATA taskfile Error bit set */
+       R_ERR           = (1U << 23),   /* SATA returned R_ERR prim */
+       RD_OFS          = (1U << 20),   /* Read DATA frame invalid offset */
+       XFER_RDY_OFS    = (1U << 19),   /* XFER_RDY offset error */
+       UNEXP_XFER_RDY  = (1U << 18),   /* unexpected XFER_RDY error */
+       DATA_OVER_UNDER = (1U << 16),   /* data overflow/underflow */
+       INTERLOCK       = (1U << 15),   /* interlock error */
+       NAK             = (1U << 14),   /* NAK rx'd */
+       ACK_NAK_TO      = (1U << 13),   /* ACK/NAK timeout */
+       CXN_CLOSED      = (1U << 12),   /* cxn closed w/out ack/nak */
+       OPEN_TO         = (1U << 11),   /* I_T nexus lost, open cxn timeout */
+       PATH_BLOCKED    = (1U << 10),   /* I_T nexus lost, pathway blocked */
+       NO_DEST         = (1U << 9),    /* I_T nexus lost, no destination */
+       STP_RES_BSY     = (1U << 8),    /* STP resources busy */
+       BREAK           = (1U << 7),    /* break received */
+       BAD_DEST        = (1U << 6),    /* bad destination */
+       BAD_PROTO       = (1U << 5),    /* protocol not supported */
+       BAD_RATE        = (1U << 4),    /* cxn rate not supported */
+       WRONG_DEST      = (1U << 3),    /* wrong destination error */
+       CREDIT_TO       = (1U << 2),    /* credit timeout */
+       WDOG_TO         = (1U << 1),    /* watchdog timeout */
+       BUF_PAR         = (1U << 0),    /* buffer parity error */
+};
+
+enum error_info_rec_2 {
+       SLOT_BSY_ERR    = (1U << 31),   /* Slot Busy Error */
+       GRD_CHK_ERR     = (1U << 14),   /* Guard Check Error */
+       APP_CHK_ERR     = (1U << 13),   /* Application Check error */
+       REF_CHK_ERR     = (1U << 12),   /* Reference Check Error */
+       USR_BLK_NM      = (1U << 0),    /* User Block Number */
+};
+
+enum pci_cfg_register_bits {
+       PCTL_PWR_OFF    = (0xFU << 24),
+       PCTL_COM_ON     = (0xFU << 20),
+       PCTL_LINK_RST   = (0xFU << 16),
+       PCTL_LINK_OFFS  = (16),
+       PCTL_PHY_DSBL   = (0xFU << 12),
+       PCTL_PHY_DSBL_OFFS      = (12),
+       PRD_REQ_SIZE    = (0x4000),
+       PRD_REQ_MASK    = (0x00007000),
+       PLS_NEG_LINK_WD         = (0x3FU << 4),
+       PLS_NEG_LINK_WD_OFFS    = 4,
+       PLS_LINK_SPD            = (0x0FU << 0),
+       PLS_LINK_SPD_OFFS       = 0,
+};
+
+enum open_frame_protocol {
+       PROTOCOL_SMP    = 0x0,
+       PROTOCOL_SSP    = 0x1,
+       PROTOCOL_STP    = 0x2,
+};
+
+/* define for response frame datapres field */
+enum datapres_field {
+       NO_DATA         = 0,
+       RESPONSE_DATA   = 1,
+       SENSE_DATA      = 2,
+};
+
+/* define task management IU */
+struct mvs_tmf_task{
+       u8 tmf;
+       u16 tag_of_task_to_be_managed;
+};
+#endif
diff --git a/mvsas_tgt/mv_init.c b/mvsas_tgt/mv_init.c
new file mode 100644 (file)
index 0000000..e81c955
--- /dev/null
@@ -0,0 +1,982 @@
+/*
+ * Marvell 88SE64xx/88SE94xx pci init
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#include "mv_sas.h"
+#include "mv_spi.h"
+
+static struct scsi_transport_template *mvs_stt;
+static const struct mvs_chip_info mvs_chips[] = {
+       [chip_6320] =   { 1, 2, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
+       [chip_6440] =   { 1, 4, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
+       [chip_6485] =   { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
+       [chip_9180] =   { 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
+       [chip_9480] =   { 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
+};
+
+#ifdef SUPPORT_TARGET
+#include <scst.h>
+#include <scst_debug.h>
+#include "mv_tgt.h"
+struct mvs_info *tgt_mvi;
+struct mvs_tgt_initiator mvs_tgt;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
+struct class_device_attribute *mvst_host_attrs[];
+#else
+struct device_attribute *mvst_host_attrs[];
+#endif
+#endif
+
+#define SOC_SAS_NUM 2
+
+static struct scsi_host_template mvs_sht = {
+       .module                 = THIS_MODULE,
+       .name                   = DRV_NAME,
+       .queuecommand           = sas_queuecommand,
+       .target_alloc           = sas_target_alloc,
+       .slave_configure        = mvs_slave_configure,
+       .slave_destroy          = sas_slave_destroy,
+       .scan_finished          = mvs_scan_finished,
+       .scan_start             = mvs_scan_start,
+       .change_queue_depth     = sas_change_queue_depth,
+       .change_queue_type      = sas_change_queue_type,
+       .bios_param             = sas_bios_param,
+       .can_queue              = 1,
+       .cmd_per_lun            = 1,
+       .this_id                = -1,
+       .sg_tablesize           = SG_ALL,
+       .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .eh_device_reset_handler        = sas_eh_device_reset_handler,
+       .eh_bus_reset_handler   = sas_eh_bus_reset_handler,
+       .slave_alloc            = mvs_slave_alloc,
+       .target_destroy         = sas_target_destroy,
+       .ioctl                  = sas_ioctl,
+#ifdef SUPPORT_TARGET
+       .shost_attrs            = mvst_host_attrs,
+#endif
+};
+
+static struct sas_domain_function_template mvs_transport_ops = {
+       .lldd_dev_found         = mvs_dev_found,
+       .lldd_dev_gone  = mvs_dev_gone,
+
+       .lldd_execute_task      = mvs_queue_command,
+       .lldd_control_phy       = mvs_phy_control,
+
+       .lldd_abort_task        = mvs_abort_task,
+       .lldd_abort_task_set    = mvs_abort_task_set,
+       .lldd_clear_aca         = mvs_clear_aca,
+       .lldd_clear_task_set    = mvs_clear_task_set,
+       .lldd_I_T_nexus_reset   = mvs_I_T_nexus_reset,
+       .lldd_lu_reset          = mvs_lu_reset,
+       .lldd_query_task        = mvs_query_task,
+
+       .lldd_port_formed       = mvs_port_formed,
+       .lldd_port_deformed     = mvs_port_deformed,
+
+};
+
+static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
+{
+       struct mvs_phy *phy = &mvi->phy[phy_id];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+       phy->mvi = mvi;
+       init_timer(&phy->timer);
+       sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
+       sas_phy->class = SAS;
+       sas_phy->iproto = SAS_PROTOCOL_ALL;
+       sas_phy->tproto = 0;
+       sas_phy->type = PHY_TYPE_PHYSICAL;
+       sas_phy->role = PHY_ROLE_INITIATOR;
+       sas_phy->oob_mode = OOB_NOT_CONNECTED;
+       sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+
+       sas_phy->id = phy_id;
+       sas_phy->sas_addr = &mvi->sas_addr[0];
+       sas_phy->frame_rcvd = &phy->frame_rcvd[0];
+       sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
+       sas_phy->lldd_phy = phy;
+}
+
+static void mvs_free(struct mvs_info *mvi)
+{
+       int i;
+       struct mvs_wq *mwq;
+       int slot_nr;
+
+       if (!mvi)
+               return;
+
+       if (mvi->flags & MVF_FLAG_SOC)
+               slot_nr = MVS_SOC_SLOTS;
+       else
+               slot_nr = MVS_SLOTS;
+
+       for (i = 0; i < mvi->tags_num; i++) {
+               struct mvs_slot_info *slot = &mvi->slot_info[i];
+               if (slot->buf)
+                       dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
+                                         slot->buf, slot->buf_dma);
+       }
+
+       if (mvi->tx)
+               dma_free_coherent(mvi->dev,
+                                 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+                                 mvi->tx, mvi->tx_dma);
+       if (mvi->rx_fis)
+               dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
+                                 mvi->rx_fis, mvi->rx_fis_dma);
+       if (mvi->rx)
+               dma_free_coherent(mvi->dev,
+                                 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+                                 mvi->rx, mvi->rx_dma);
+       if (mvi->slot)
+               dma_free_coherent(mvi->dev,
+                                 sizeof(*mvi->slot) * slot_nr,
+                                 mvi->slot, mvi->slot_dma);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       if (mvi->bulk_buffer)
+               dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
+                                 mvi->bulk_buffer, mvi->bulk_buffer_dma);
+#endif
+
+       MVS_CHIP_DISP->chip_iounmap(mvi);
+       if (mvi->shost)
+               scsi_host_put(mvi->shost);
+       list_for_each_entry(mwq, &mvi->wq_list, entry)
+               cancel_delayed_work(&mwq->work_q);
+       kfree(mvi);
+}
+
+#ifdef MVS_USE_TASKLET
+struct tasklet_struct  mv_tasklet;
+static void mvs_tasklet(unsigned long opaque)
+{
+       unsigned long flags;
+       u32 stat;
+       u16 core_nr, i = 0;
+
+       struct mvs_info *mvi;
+       struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
+
+       core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+       if (unlikely(!mvi))
+               BUG_ON(1);
+
+       for (i = 0; i < core_nr; i++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+               stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
+               if (stat)
+                       MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
+       }
+
+}
+#endif
+
+static irqreturn_t mvs_interrupt(int irq, void *opaque)
+{
+       u32 core_nr, i = 0;
+       u32 stat;
+       struct mvs_info *mvi;
+       struct sas_ha_struct *sha = opaque;
+
+       core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+       if (unlikely(!mvi))
+               return IRQ_NONE;
+
+       stat = MVS_CHIP_DISP->isr_status(mvi, irq);
+       if (!stat)
+               return IRQ_NONE;
+
+#ifdef MVS_USE_TASKLET
+       tasklet_schedule(&mv_tasklet);
+#else
+       for (i = 0; i < core_nr; i++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+               MVS_CHIP_DISP->isr(mvi, irq, stat);
+       }
+#endif
+       return IRQ_HANDLED;
+}
+
+static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
+{
+       int i, slot_nr;
+
+       if (mvi->flags & MVF_FLAG_SOC)
+               slot_nr = MVS_SOC_SLOTS;
+       else
+               slot_nr = MVS_SLOTS;
+
+       spin_lock_init(&mvi->lock);
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               mvs_phy_init(mvi, i);
+               mvi->port[i].wide_port_phymap = 0;
+               mvi->port[i].port_attached = 0;
+               INIT_LIST_HEAD(&mvi->port[i].list);
+       }
+       for (i = 0; i < MVS_MAX_DEVICES; i++) {
+               mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
+               mvi->devices[i].dev_type = NO_DEVICE;
+               mvi->devices[i].device_id = i;
+       }
+
+       /*
+        * alloc and init our DMA areas
+        */
+       mvi->tx = dma_alloc_coherent(mvi->dev,
+                                    sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+                                    &mvi->tx_dma, GFP_KERNEL);
+       if (!mvi->tx)
+               goto err_out;
+       memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
+       mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
+                                        &mvi->rx_fis_dma, GFP_KERNEL);
+       if (!mvi->rx_fis)
+               goto err_out;
+       memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
+
+       mvi->rx = dma_alloc_coherent(mvi->dev,
+                                    sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+                                    &mvi->rx_dma, GFP_KERNEL);
+       if (!mvi->rx)
+               goto err_out;
+       memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
+       mvi->rx[0] = cpu_to_le32(0xfff);
+       mvi->rx_cons = 0xfff;
+
+       mvi->slot = dma_alloc_coherent(mvi->dev,
+                                      sizeof(*mvi->slot) * slot_nr,
+                                      &mvi->slot_dma, GFP_KERNEL);
+       if (!mvi->slot)
+               goto err_out;
+       memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
+                                      TRASH_BUCKET_SIZE,
+                                      &mvi->bulk_buffer_dma, GFP_KERNEL);
+       if (!mvi->bulk_buffer)
+               goto err_out;
+#endif
+       for (i = 0; i < slot_nr; i++) {
+               struct mvs_slot_info *slot = &mvi->slot_info[i];
+
+               slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
+                                              &slot->buf_dma, GFP_KERNEL);
+               if (!slot->buf) {
+                       printk(KERN_DEBUG"failed to allocate slot->buf.\n");
+                       goto err_out;
+               }
+               memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+               ++mvi->tags_num;
+       }
+       /* Initialize tags */
+       mvs_tag_init(mvi);
+       return 0;
+err_out:
+       return 1;
+}
+
+
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
+{
+       unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
+       struct pci_dev *pdev = mvi->pdev;
+       if (bar_ex != -1) {
+               /*
+                * ioremap main and peripheral registers
+                */
+               res_start = pci_resource_start(pdev, bar_ex);
+               res_len = pci_resource_len(pdev, bar_ex);
+               if (!res_start || !res_len)
+                       goto err_out;
+
+               res_flag_ex = pci_resource_flags(pdev, bar_ex);
+               if (res_flag_ex & IORESOURCE_MEM) {
+                       if (res_flag_ex & IORESOURCE_CACHEABLE)
+                               mvi->regs_ex = ioremap(res_start, res_len);
+                       else
+                               mvi->regs_ex = ioremap_nocache(res_start,
+                                               res_len);
+               } else
+                       mvi->regs_ex = (void *)res_start;
+               if (!mvi->regs_ex)
+                       goto err_out;
+       }
+
+       res_start = pci_resource_start(pdev, bar);
+       res_len = pci_resource_len(pdev, bar);
+       if (!res_start || !res_len)
+               goto err_out;
+
+       res_flag = pci_resource_flags(pdev, bar);
+       if (res_flag & IORESOURCE_CACHEABLE)
+               mvi->regs = ioremap(res_start, res_len);
+       else
+               mvi->regs = ioremap_nocache(res_start, res_len);
+
+       if (!mvi->regs) {
+               if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
+                       iounmap(mvi->regs_ex);
+               mvi->regs_ex = NULL;
+               goto err_out;
+       }
+
+       return 0;
+err_out:
+       return -1;
+}
+
+void mvs_iounmap(void __iomem *regs)
+{
+       iounmap(regs);
+}
+
+static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
+                               const struct pci_device_id *ent,
+                               struct Scsi_Host *shost, unsigned int id)
+{
+       struct mvs_info *mvi;
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+       mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
+                       GFP_KERNEL);
+       if (!mvi)
+               return NULL;
+
+       mvi->pdev = pdev;
+       mvi->dev = &pdev->dev;
+       mvi->chip_id = ent->driver_data;
+       mvi->chip = &mvs_chips[mvi->chip_id];
+       INIT_LIST_HEAD(&mvi->wq_list);
+       mvi->irq = pdev->irq;
+
+       ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
+       ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
+
+       mvi->id = id;
+       mvi->sas = sha;
+       mvi->shost = shost;
+#ifdef MVS_USE_TASKLET
+       tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
+#endif
+
+       if (MVS_CHIP_DISP->chip_ioremap(mvi))
+               goto err_out;
+       if (!mvs_alloc(mvi, shost))
+               return mvi;
+err_out:
+       mvs_free(mvi);
+       return NULL;
+}
+
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+       int rc;
+
+       if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+               rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               if (rc) {
+                       rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+                       if (rc) {
+                               dev_printk(KERN_ERR, &pdev->dev,
+                                          "64-bit DMA enable failed\n");
+                               return rc;
+                       }
+               }
+       } else {
+               rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               if (rc) {
+                       dev_printk(KERN_ERR, &pdev->dev,
+                                  "32-bit DMA enable failed\n");
+                       return rc;
+               }
+               rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+               if (rc) {
+                       dev_printk(KERN_ERR, &pdev->dev,
+                                  "32-bit consistent DMA enable failed\n");
+                       return rc;
+               }
+       }
+
+       return rc;
+}
+
+static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
+                               const struct mvs_chip_info *chip_info)
+{
+       int phy_nr, port_nr; unsigned short core_nr;
+       struct asd_sas_phy **arr_phy;
+       struct asd_sas_port **arr_port;
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+       core_nr = chip_info->n_host;
+       phy_nr  = core_nr * chip_info->n_phy;
+       port_nr = phy_nr;
+
+       memset(sha, 0x00, sizeof(struct sas_ha_struct));
+       arr_phy  = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
+       arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
+       if (!arr_phy || !arr_port)
+               goto exit_free;
+
+       sha->sas_phy = arr_phy;
+       sha->sas_port = arr_port;
+
+       sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
+       if (!sha->lldd_ha)
+               goto exit_free;
+
+       ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
+
+       shost->transportt = mvs_stt;
+       shost->max_id = 128;
+       shost->max_lun = ~0;
+       shost->max_channel = 1;
+       shost->max_cmd_len = 16;
+
+       return 0;
+exit_free:
+       kfree(arr_phy);
+       kfree(arr_port);
+       return -1;
+
+}
+
+static void  __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
+                       const struct mvs_chip_info *chip_info)
+{
+       int can_queue, i = 0, j = 0;
+       struct mvs_info *mvi = NULL;
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+       unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+       for (j = 0; j < nr_core; j++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+               for (i = 0; i < chip_info->n_phy; i++) {
+                       sha->sas_phy[j * chip_info->n_phy  + i] =
+                               &mvi->phy[i].sas_phy;
+                       sha->sas_port[j * chip_info->n_phy + i] =
+                               &mvi->port[i].sas_port;
+               }
+       }
+
+       sha->sas_ha_name = DRV_NAME;
+       sha->dev = mvi->dev;
+       sha->lldd_module = THIS_MODULE;
+       sha->sas_addr = &mvi->sas_addr[0];
+
+       sha->num_phys = nr_core * chip_info->n_phy;
+
+       sha->lldd_max_execute_num = 1;
+
+       if (mvi->flags & MVF_FLAG_SOC)
+               can_queue = MVS_SOC_CAN_QUEUE;
+       else
+               can_queue = MVS_CAN_QUEUE;
+
+       sha->lldd_queue_size = can_queue;
+       shost->can_queue = can_queue;
+       mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
+       sha->core.shost = mvi->shost;
+}
+
+#ifndef SUPPORT_TARGET
+static void mvs_init_sas_add(struct mvs_info *mvi)
+{
+       u8 i;
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
+               mvi->phy[i].dev_sas_addr =
+                       cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
+       }
+
+       memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
+}
+#endif
+
+static int __devinit mvs_pci_init(struct pci_dev *pdev,
+                                 const struct pci_device_id *ent)
+{
+       unsigned int rc, nhost = 0;
+       struct mvs_info *mvi;
+       irq_handler_t irq_handler = mvs_interrupt;
+       struct Scsi_Host *shost = NULL;
+       const struct mvs_chip_info *chip;
+
+       dev_printk(KERN_INFO, &pdev->dev,
+               "mvsas: driver version %s\n", DRV_VERSION);
+       rc = pci_enable_device(pdev);
+       if (rc)
+               goto err_out_enable;
+
+       pci_set_master(pdev);
+
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc)
+               goto err_out_disable;
+
+       rc = pci_go_64(pdev);
+       if (rc)
+               goto err_out_regions;
+
+       shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
+       if (!shost) {
+               rc = -ENOMEM;
+               goto err_out_regions;
+       }
+
+       chip = &mvs_chips[ent->driver_data];
+       SHOST_TO_SAS_HA(shost) =
+               kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
+       if (!SHOST_TO_SAS_HA(shost)) {
+               kfree(shost);
+               rc = -ENOMEM;
+               goto err_out_regions;
+       }
+
+       rc = mvs_prep_sas_ha_init(shost, chip);
+       if (rc) {
+               kfree(shost);
+               rc = -ENOMEM;
+               goto err_out_regions;
+       }
+
+       pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
+#ifdef SUPPORT_TARGET
+       tgt_mvi = kcalloc(chip->n_host, sizeof(struct mvs_info *), GFP_KERNEL);
+       if (!tgt_mvi)
+               PRINT_ERROR("%s:allocate tgt_mvi failed", __func__);
+#endif
+
+       do {
+               mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
+               if (!mvi) {
+                       rc = -ENOMEM;
+                       goto err_out_regions;
+               }
+
+#ifdef SUPPORT_TARGET
+               if (mvs_spi_init(mvi)) {
+                       mvs_free(mvi);
+                       rc = -EFAULT;
+                       goto err_out_regions;
+               }
+#else
+               mvs_init_sas_add(mvi);
+#endif
+
+               mvi->instance = nhost;
+
+#ifdef SUPPORT_TARGET
+               ((struct mvs_info **)tgt_mvi)[nhost] = mvi;
+               mvst_init_tgt_port(mvi);
+#endif
+
+               rc = MVS_CHIP_DISP->chip_init(mvi);
+               if (rc) {
+                       mvs_free(mvi);
+                       goto err_out_regions;
+               }
+               nhost++;
+       } while (nhost < chip->n_host);
+
+       mvs_post_sas_ha_init(shost, chip);
+
+       rc = scsi_add_host(shost, &pdev->dev);
+       if (rc)
+               goto err_out_shost;
+
+       rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
+       if (rc)
+               goto err_out_shost;
+       rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
+               DRV_NAME, SHOST_TO_SAS_HA(shost));
+       if (rc)
+               goto err_not_sas;
+
+#ifdef SUPPORT_TARGET
+       rc = mvst_init();
+       if (rc)
+               goto err_out_shost;
+#endif
+
+       MVS_CHIP_DISP->interrupt_enable(mvi);
+
+       scsi_scan_host(mvi->shost);
+
+       return 0;
+
+err_not_sas:
+       sas_unregister_ha(SHOST_TO_SAS_HA(shost));
+err_out_shost:
+       scsi_remove_host(mvi->shost);
+err_out_regions:
+       pci_release_regions(pdev);
+err_out_disable:
+       pci_disable_device(pdev);
+err_out_enable:
+       return rc;
+}
+
+static void __devexit mvs_pci_remove(struct pci_dev *pdev)
+{
+       unsigned short core_nr, i = 0;
+       struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+       struct mvs_info *mvi = NULL;
+
+       core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+#ifdef MVS_USE_TASKLET
+       tasklet_kill(&mv_tasklet);
+#endif
+
+       pci_set_drvdata(pdev, NULL);
+       sas_unregister_ha(sha);
+       sas_remove_host(mvi->shost);
+       scsi_remove_host(mvi->shost);
+
+#ifdef SUPPORT_TARGET
+       for (i = 0; i < core_nr; i++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+               mvi->flags |= MVF_HOST_SHUTTING_DOWN;
+               if ((mvs_tgt.tgt_host_action != NULL)
+                       && (mvi->flags & MVF_TARGET_MODE_ENABLE)) {
+                       mv_dprintk("start disable target mode of host%d\n", i);
+                       mvs_tgt.tgt_host_action(mvi, EXIT_TARGET_MODE, 0);
+               }
+       }
+       mvst_exit();
+#endif
+
+       MVS_CHIP_DISP->interrupt_disable(mvi);
+       free_irq(mvi->irq, sha);
+       for (i = 0; i < core_nr; i++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+               mvs_free(mvi);
+       }
+       kfree(sha->sas_phy);
+       kfree(sha->sas_port);
+       kfree(sha);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       return;
+}
+
+static struct pci_device_id __devinitdata mvs_pci_table[] = {
+       { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
+       { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
+       {
+               .vendor         = PCI_VENDOR_ID_MARVELL,
+               .device         = 0x6440,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = 0x6480,
+               .class          = 0,
+               .class_mask     = 0,
+               .driver_data    = chip_6485,
+       },
+       { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
+       { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
+       { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
+       { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
+
+       { }     /* terminate list */
+};
+
+static struct pci_driver mvs_pci_driver = {
+       .name           = DRV_NAME,
+       .id_table       = mvs_pci_table,
+       .probe          = mvs_pci_init,
+       .remove         = __devexit_p(mvs_pci_remove),
+};
+
+#ifdef SUPPORT_TARGET
+
+#define SATA_STR               "SATA "
+#define SAS_STR                "SAS "
+#define NA_STR         " "
+
+#define END_DEV_STR            "END DEVICE"
+#define EXPANDER_STR   "EXPANDER"
+
+static char *mvs_get_phy_type_string(struct mvs_phy *phy)
+{
+       if (!phy->phy_attached)
+               return NA_STR;
+       if (phy->phy_type & PORT_TYPE_SAS)
+               return SAS_STR;
+       else if (phy->phy_type & PORT_TYPE_SATA)
+               return SATA_STR;
+       else
+               return NA_STR;
+}
+
+static char *mvs_get_dev_type_string(struct mvs_phy *phy)
+{
+       if (!phy->phy_attached)
+               return NA_STR;
+       if ((phy->att_dev_info & 0x7) == 1
+               || phy->phy_type & PORT_TYPE_SATA)
+               return END_DEV_STR;
+       else if ((phy->att_dev_info & 0x7) == 2
+               || (phy->att_dev_info & 0x7) == 3)
+               return EXPANDER_STR;
+       else
+               return NA_STR;
+}
+
+#if  LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
+static ssize_t
+mvs_show_tgt_enabled(struct class_device *cdev, char *buffer)
+#else
+static ssize_t
+mvs_show_tgt_enabled(struct device *cdev,
+               struct device_attribute *attr, char *buffer)
+#endif
+{
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(class_to_shost(cdev));
+       struct mvs_info *mvi = NULL;
+       struct mvs_phy *phy;
+       char *phy_type, *dev_type;
+       ulong max_size = PAGE_SIZE;
+       ssize_t size = 0;
+       u8 phyid = 0, core_id, hn;
+       size = snprintf(buffer, max_size, "%-5s%-20s%-18s%-20s%-12s\n",
+                       "phy", "dev sas address", "attach dev type",
+                       "attach sas address", "target mode");
+       hn = ((struct mvs_prv_info *)sas_ha->lldd_ha)->n_host;
+       for (core_id = 0; core_id < hn; core_id++) {
+               mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[core_id];
+               for (phyid = 0; phyid < mvi->chip->n_phy; phyid++) {
+                       phy = &mvi->phy[phyid];
+                       phy_type = mvs_get_phy_type_string(phy);
+                       dev_type = mvs_get_dev_type_string(phy);
+                       size += snprintf(buffer+size, max_size,
+                               "%-5d%-20llx%-5s%-13s%-20llx%-15d\n",
+                               phyid+core_id*mvi->chip->n_phy,
+                               SAS_ADDR(&phy->dev_sas_addr),
+                               phy_type, dev_type,
+                               SAS_ADDR(&phy->att_dev_sas_addr),
+                               PHY_IN_TARGET_MODE(phy->dev_info));
+               }
+       }
+       return size;
+}
+
+static int mvsas_parse_ushort(const char *str, unsigned short *valp)
+{
+       unsigned long val;
+       char *end;
+       int ret = 0;
+
+       if (!isdigit(str[0])) {
+               ret = -1;
+               goto bail;
+       }
+
+       val = simple_strtoull(str, &end, 0);
+       if (val > 0xffff) {
+               ret = -1;
+               goto bail;
+       }
+
+       *valp = val;
+
+       ret = end + 1 - str;
+       if (ret == 0)
+               ret = -1;
+
+bail:
+       return ret;
+}
+
+static void mvs_target_mode_setting(struct sas_ha_struct *sas_ha,
+                               int action, const char *buffer)
+{
+       struct mvs_info *mvi = NULL;
+       unsigned short val = 0, hi;
+       u8 host_no = 0;
+       if (mvsas_parse_ushort(buffer, &val) < 0)
+               return;
+       hi = val/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
+       mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
+       if (!mvi) {
+               mv_dprintk("failed to get root pointer\n");
+               return;
+       }
+       if (val > mvi->chip->n_host*mvi->chip->n_phy)
+               return;
+       while (val+1 > mvi->chip->n_phy) {
+               val -= mvi->chip->n_phy;
+               host_no++;
+       }
+       switch (action) {
+       case MVSAS_ENABLE_TGT:
+               mv_dprintk("Enable phy%d\n", val+host_no*mvi->chip->n_phy);
+               mvs_tgt.tgt_host_action(mvi, ENABLE_TARGET_MODE, val);
+               msleep_interruptible(10*1000);
+               break;
+       case MVSAS_DISABLE_TGT:
+               mv_dprintk("Disable phy%d\n", val+host_no*mvi->chip->n_phy);
+               mvs_tgt.tgt_host_action(mvi, DISABLE_TARGET_MODE, val);
+               msleep_interruptible(10*1000);
+               break;
+       default:
+               break;
+       }
+}
+
+
+#if   LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
+static ssize_t
+mvs_store_tgt_enabled(struct class_device *cdev,
+                         const char *buffer, size_t size)
+#else
+static ssize_t
+mvs_store_tgt_enabled(struct device *cdev, struct device_attribute *attr,
+                         const char *buffer, size_t size)
+#endif
+{
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(class_to_shost(cdev));
+       char *p, *e;
+       int force = 0, action = 0;
+
+       if (buffer == NULL)
+               return size;
+
+       if (mvs_tgt.tgt_host_action == NULL) {
+               mv_printk("not acting for lack of target driver\n");
+               return size;
+       }
+
+       if ((size > 1) && (buffer[1] == 'f')) {
+               force = 1;
+               mv_dprintk("forcing the matter\n");
+       }
+       p = (char *)buffer;
+       if (p[strlen(p) - 1] == '\n')
+               p[strlen(p) - 1] = '\0';
+       if (!strncasecmp("enable", p, 6)) {
+               mv_printk("get enable\n");
+               p += 6;
+               action = MVSAS_ENABLE_TGT;
+       } else if (!strncasecmp("disable ", p, 7)) {
+               mv_printk("get disable\n");
+               p += 7;
+               action = MVSAS_DISABLE_TGT;
+       } else {
+               mv_printk("Unknown action \"%s\"", p);
+               return size;
+       }
+
+       switch (action) {
+       case MVSAS_ENABLE_TGT:
+       case MVSAS_DISABLE_TGT:
+               while (isspace(*p) && *p != '\0')
+                       p++;
+               e = p;
+               while (!isspace(*e) && *e != '\0')
+                       e++;
+               *e = 0;
+               break;
+       }
+
+       mvs_target_mode_setting(sas_ha, action, p);
+
+       return size;
+}
+
+#if  LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
+
+static CLASS_DEVICE_ATTR(target_mode,
+                        S_IRUGO|S_IWUSR,
+                        mvs_show_tgt_enabled,
+                        mvs_store_tgt_enabled);
+
+struct class_device_attribute *mvst_host_attrs[] = {
+       &class_device_attr_target_mode,
+       NULL,
+};
+#else
+static DEVICE_ATTR(target_mode,
+                        S_IRUGO|S_IWUSR,
+                        mvs_show_tgt_enabled,
+                        mvs_store_tgt_enabled);
+
+struct device_attribute *mvst_host_attrs[] = {
+       &dev_attr_target_mode,
+       NULL,
+};
+
+#endif
+
+#endif /* #ifdef SUPPORT_TARGET */
+
+
+
+static int __init mvs_init(void)
+{
+       int rc;
+
+       mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
+       if (!mvs_stt)
+               return -ENOMEM;
+
+       rc = pci_register_driver(&mvs_pci_driver);
+
+       if (rc)
+               goto err_out;
+
+       return 0;
+
+err_out:
+       sas_release_transport(mvs_stt);
+       return rc;
+}
+
+static void __exit mvs_exit(void)
+{
+       pci_unregister_driver(&mvs_pci_driver);
+       sas_release_transport(mvs_stt);
+}
+
+module_init(mvs_init);
+module_exit(mvs_exit);
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+#ifdef CONFIG_PCI
+MODULE_DEVICE_TABLE(pci, mvs_pci_table);
+#endif
diff --git a/mvsas_tgt/mv_sas.c b/mvsas_tgt/mv_sas.c
new file mode 100644 (file)
index 0000000..3808d3e
--- /dev/null
@@ -0,0 +1,2237 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
+#include "sas_task.c"
+#endif
+
+static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
+{
+       if (task->lldd_task) {
+               struct mvs_slot_info *slot;
+               slot = (struct mvs_slot_info *) task->lldd_task;
+               *tag = slot->slot_tag;
+               return 1;
+       }
+       return 0;
+}
+
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
+{
+       void *bitmap = (void *) &mvi->tags;
+       clear_bit(tag, bitmap);
+}
+
+void mvs_tag_free(struct mvs_info *mvi, u32 tag)
+{
+       mvs_tag_clear(mvi, tag);
+}
+
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
+{
+       void *bitmap = (void *) &mvi->tags;
+       set_bit(tag, bitmap);
+}
+
+inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
+{
+       unsigned int index, tag;
+       void *bitmap = (void *) &mvi->tags;
+
+       index = find_first_zero_bit(bitmap, mvi->tags_num);
+       tag = index;
+       if (tag >= mvi->tags_num)
+               return -SAS_QUEUE_FULL;
+       mvs_tag_set(mvi, tag);
+       *tag_out = tag;
+       return 0;
+}
+
+void mvs_tag_init(struct mvs_info *mvi)
+{
+       int i;
+       for (i = 0; i < mvi->tags_num; ++i)
+               mvs_tag_clear(mvi, i);
+}
+
+void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
+{
+       u32 i;
+       u32 run;
+       u32 offset;
+
+       offset = 0;
+       while (size) {
+               printk(KERN_DEBUG"%08X : ", baseaddr + offset);
+               if (size >= 16)
+                       run = 16;
+               else
+                       run = size;
+               size -= run;
+               for (i = 0; i < 16; i++) {
+                       if (i < run)
+                               printk(KERN_DEBUG"%02X ", (u32)data[i]);
+                       else
+                               printk(KERN_DEBUG"   ");
+               }
+               printk(KERN_DEBUG": ");
+               for (i = 0; i < run; i++)
+                       printk(KERN_DEBUG"%c",
+                               isalnum(data[i]) ? data[i] : '.');
+               printk(KERN_DEBUG"\n");
+               data = &data[16];
+               offset += run;
+       }
+       printk(KERN_DEBUG"\n");
+}
+
+#if (_MV_DUMP > 1)
+static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
+                                  enum sas_protocol proto)
+{
+       u32 offset;
+       struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+       offset = slot->cmd_size + MVS_OAF_SZ +
+           MVS_CHIP_DISP->prd_size() * slot->n_elem;
+       dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
+                       tag);
+       mvs_hexdump(32, (u8 *) slot->response,
+                   (u32) slot->buf_dma + offset);
+}
+#endif
+
+static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
+                               enum sas_protocol proto)
+{
+#if (_MV_DUMP > 1)
+       u32 sz, w_ptr;
+       u64 addr;
+       struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+       /*Delivery Queue */
+       sz = MVS_CHIP_SLOT_SZ;
+       w_ptr = slot->tx;
+       addr = mvi->tx_dma;
+       dev_printk(KERN_DEBUG, mvi->dev,
+               "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
+       dev_printk(KERN_DEBUG, mvi->dev,
+               "Delivery Queue Base Address=0x%llX (PA)"
+               "(tx_dma=0x%llX), Entry=%04d\n",
+               addr, (unsigned long long)mvi->tx_dma, w_ptr);
+       mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
+                       (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
+       /*Command List */
+       addr = mvi->slot_dma;
+       dev_printk(KERN_DEBUG, mvi->dev,
+               "Command List Base Address=0x%llX (PA)"
+               "(slot_dma=0x%llX), Header=%03d\n",
+               addr, (unsigned long long)slot->buf_dma, tag);
+       dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
+       /*mvs_cmd_hdr */
+       mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
+               (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
+       /*1.command table area */
+       dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
+       mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
+       /*2.open address frame area */
+       dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
+       mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
+                               (u32) slot->buf_dma + slot->cmd_size);
+       /*3.status buffer */
+       mvs_hba_sb_dump(mvi, tag, proto);
+       /*4.PRD table */
+       dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
+       mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
+               (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
+               (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
+#endif
+}
+
+static void mvs_hba_cq_dump(struct mvs_info *mvi)
+{
+#if (_MV_DUMP > 2)
+       u64 addr;
+       void __iomem *regs = mvi->regs;
+       u32 entry = mvi->rx_cons + 1;
+       u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
+
+       /*Completion Queue */
+       addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
+       dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
+                  mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
+       dev_printk(KERN_DEBUG, mvi->dev,
+               "Completion List Base Address=0x%llX (PA), "
+               "CQ_Entry=%04d, CQ_WP=0x%08X\n",
+               addr, entry - 1, mvi->rx[0]);
+       mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
+                   mvi->rx_dma + sizeof(u32) * entry);
+#endif
+}
+
+void mvs_get_sas_addr(void *buf, u32 buflen)
+{
+       /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
+}
+
+struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
+{
+       unsigned long i = 0, j = 0, hi = 0;
+       struct sas_ha_struct *sha = dev->port->ha;
+       struct mvs_info *mvi = NULL;
+       struct asd_sas_phy *phy;
+
+       while (sha->sas_port[i]) {
+               if (sha->sas_port[i] == dev->port) {
+                       phy =  container_of(sha->sas_port[i]->phy_list.next,
+                               struct asd_sas_phy, port_phy_el);
+                       j = 0;
+                       while (sha->sas_phy[j]) {
+                               if (sha->sas_phy[j] == phy)
+                                       break;
+                               j++;
+                       }
+                       break;
+               }
+               i++;
+       }
+       hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+       return mvi;
+
+}
+
+/* FIXME */
+int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
+{
+       unsigned long i = 0, j = 0, n = 0, num = 0;
+       struct mvs_info *mvi = mvs_find_dev_mvi(dev);
+       struct sas_ha_struct *sha = dev->port->ha;
+
+       while (sha->sas_port[i]) {
+               if (sha->sas_port[i] == dev->port) {
+                       struct asd_sas_phy *phy;
+                       list_for_each_entry(phy,
+                               &sha->sas_port[i]->phy_list, port_phy_el) {
+                               j = 0;
+                               while (sha->sas_phy[j]) {
+                                       if (sha->sas_phy[j] == phy)
+                                               break;
+                                       j++;
+                               }
+                               phyno[n] = (j >= mvi->chip->n_phy) ?
+                                       (j - mvi->chip->n_phy) : j;
+                               num++;
+                               n++;
+                       }
+                       break;
+               }
+               i++;
+       }
+       return num;
+}
+
+static inline void mvs_free_reg_set(struct mvs_info *mvi,
+                               struct mvs_device *dev)
+{
+       if (!dev) {
+               mv_printk("device has been free.\n");
+               return;
+       }
+       if (dev->runing_req != 0)
+               return;
+       if (dev->taskfileset == MVS_ID_NOT_MAPPED)
+               return;
+       MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
+}
+
+static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
+                               struct mvs_device *dev)
+{
+       if (dev->taskfileset != MVS_ID_NOT_MAPPED)
+               return 0;
+       return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
+}
+
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
+{
+       u32 no;
+       for_each_phy(phy_mask, phy_mask, no) {
+               if (!(phy_mask & 1))
+                       continue;
+               MVS_CHIP_DISP->phy_reset(mvi, no, hard);
+       }
+}
+
+/* FIXME: locking? */
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+                       void *funcdata)
+{
+       int rc = 0, phy_id = sas_phy->id;
+       u32 tmp, i = 0, hi;
+       struct sas_ha_struct *sha = sas_phy->ha;
+       struct mvs_info *mvi = NULL;
+
+       while (sha->sas_phy[i]) {
+               if (sha->sas_phy[i] == sas_phy)
+                       break;
+               i++;
+       }
+       hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+       switch (func) {
+       case PHY_FUNC_SET_LINK_RATE:
+               MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
+               break;
+
+       case PHY_FUNC_HARD_RESET:
+               tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
+               if (tmp & PHY_RST_HARD)
+                       break;
+               MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
+               break;
+
+       case PHY_FUNC_LINK_RESET:
+               MVS_CHIP_DISP->phy_enable(mvi, phy_id);
+               MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
+               break;
+
+       case PHY_FUNC_DISABLE:
+               MVS_CHIP_DISP->phy_disable(mvi, phy_id);
+               break;
+       case PHY_FUNC_RELEASE_SPINUP_HOLD:
+       default:
+               rc = -EOPNOTSUPP;
+       }
+       msleep(200);
+       return rc;
+}
+
+void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
+                               u32 off_lo, u32 off_hi, u64 sas_addr)
+{
+       u32 lo = (u32)sas_addr;
+       u32 hi = (u32)(sas_addr>>32);
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
+       MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
+       MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
+}
+
+static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       struct sas_ha_struct *sas_ha;
+       if (!phy->phy_attached)
+               return;
+
+       if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
+               && phy->phy_type & PORT_TYPE_SAS) {
+               return;
+       }
+
+       sas_ha = mvi->sas;
+       sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+
+       if (sas_phy->phy) {
+               struct sas_phy *sphy = sas_phy->phy;
+
+               sphy->negotiated_linkrate = sas_phy->linkrate;
+               sphy->minimum_linkrate = phy->minimum_linkrate;
+               sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+               sphy->maximum_linkrate = phy->maximum_linkrate;
+               sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
+       }
+
+       if (phy->phy_type & PORT_TYPE_SAS) {
+               struct sas_identify_frame *id;
+
+               id = (struct sas_identify_frame *)phy->frame_rcvd;
+               id->dev_type = phy->identify.device_type;
+               id->initiator_bits = SAS_PROTOCOL_ALL;
+               id->target_bits = phy->identify.target_port_protocols;
+       } else if (phy->phy_type & PORT_TYPE_SATA) {
+               /*Nothing*/
+       }
+       mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
+
+       sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+
+       mvi->sas->notify_port_event(sas_phy,
+                                  PORTE_BYTES_DMAED);
+}
+
+int mvs_slave_alloc(struct scsi_device *scsi_dev)
+{
+       struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+       if (dev_is_sata(dev)) {
+               /* We don't need to rescan targets
+                * if REPORT_LUNS request is failed
+                */
+               if (scsi_dev->lun > 0)
+                       return -ENXIO;
+               scsi_dev->tagged_supported = 1;
+       }
+
+       return sas_slave_alloc(scsi_dev);
+}
+
+int mvs_slave_configure(struct scsi_device *sdev)
+{
+       struct domain_device *dev = sdev_to_domain_dev(sdev);
+       int ret = sas_slave_configure(sdev);
+
+       if (ret)
+               return ret;
+       if (dev_is_sata(dev)) {
+               /* may set PIO mode */
+       #if MV_DISABLE_NCQ
+               struct ata_port *ap = dev->sata_dev.ap;
+               struct ata_device *adev = ap->link.device;
+               adev->flags |= ATA_DFLAG_NCQ_OFF;
+               scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
+       #endif
+       }
+       return 0;
+}
+
+void mvs_scan_start(struct Scsi_Host *shost)
+{
+       int i, j;
+       unsigned short core_nr;
+       struct mvs_info *mvi;
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+       core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+       for (j = 0; j < core_nr; j++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+               for (i = 0; i < mvi->chip->n_phy; ++i)
+                       mvs_bytes_dmaed(mvi, i);
+       }
+}
+
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+       /* give the phy enabling interrupt event time to come in (1s
+        * is empirically about all it takes) */
+       if (time < HZ)
+               return 0;
+       /* Wait for discovery to finish */
+       scsi_flush_work(shost);
+       return 1;
+}
+
+static int mvs_task_prep_smp(struct mvs_info *mvi,
+                            struct mvs_task_exec_info *tei)
+{
+       int elem, rc, i;
+       struct sas_task *task = tei->task;
+       struct mvs_cmd_hdr *hdr = tei->hdr;
+       struct domain_device *dev = task->dev;
+       struct asd_sas_port *sas_port = dev->port;
+       struct scatterlist *sg_req, *sg_resp;
+       u32 req_len, resp_len, tag = tei->tag;
+       void *buf_tmp;
+       u8 *buf_oaf;
+       dma_addr_t buf_tmp_dma;
+       void *buf_prd;
+       struct mvs_slot_info *slot = &mvi->slot_info[tag];
+       u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#if _MV_DUMP
+       u8 *buf_cmd;
+       void *from;
+#endif
+       /*
+        * DMA-map SMP request, response buffers
+        */
+       sg_req = &task->smp_task.smp_req;
+       elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       if (!elem)
+               return -ENOMEM;
+       req_len = sg_dma_len(sg_req);
+
+       sg_resp = &task->smp_task.smp_resp;
+       elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       if (!elem) {
+               rc = -ENOMEM;
+               goto err_out;
+       }
+       resp_len = SB_RFB_MAX;
+
+       /* must be in dwords */
+       if ((req_len & 0x3) || (resp_len & 0x3)) {
+               rc = -EINVAL;
+               goto err_out_2;
+       }
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+
+       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
+       buf_tmp = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+
+#if _MV_DUMP
+       buf_cmd = buf_tmp;
+       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+       buf_tmp += req_len;
+       buf_tmp_dma += req_len;
+       slot->cmd_size = req_len;
+#else
+       hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
+#endif
+
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+       buf_oaf = buf_tmp;
+       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+
+       /* region 3: PRD table *********************************** */
+       buf_prd = buf_tmp;
+       if (tei->n_elem)
+               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+       else
+               hdr->prd_tbl = 0;
+
+       i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+       buf_tmp += i;
+       buf_tmp_dma += i;
+
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       slot->response = buf_tmp;
+       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+       if (mvi->flags & MVF_FLAG_SOC)
+               hdr->reserved[0] = 0;
+
+       /*
+        * Fill in TX ring and command slot header
+        */
+       slot->tx = mvi->tx_prod;
+       mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
+                                       TXQ_MODE_I | tag |
+                                       (sas_port->phy_mask << TXQ_PHY_SHIFT));
+
+       hdr->flags |= flags;
+       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
+       hdr->tags = cpu_to_le32(tag);
+       hdr->data_len = 0;
+
+       /* generate open address frame hdr (first 12 bytes) */
+       /* initiator, SMP, ftype 1h */
+       buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
+       buf_oaf[1] = dev->linkrate & 0xf;
+       *(u16 *)(buf_oaf + 2) = 0xFFFF;         /* SAS SPEC */
+       memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+       /* fill in PRD (scatter/gather) table, if any */
+       MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+
+#if _MV_DUMP
+       /* copy cmd table */
+       from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
+       memcpy(buf_cmd, from + sg_req->offset, req_len);
+       kunmap_atomic(from, KM_IRQ0);
+#endif
+       return 0;
+
+err_out_2:
+       dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
+                    PCI_DMA_FROMDEVICE);
+err_out:
+       dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
+                    PCI_DMA_TODEVICE);
+       return rc;
+}
+
+static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+       struct ata_queued_cmd *qc = task->uldd_task;
+
+       if (qc) {
+               if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+                       qc->tf.command == ATA_CMD_FPDMA_READ) {
+                       *tag = qc->tag;
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+static int mvs_task_prep_ata(struct mvs_info *mvi,
+                            struct mvs_task_exec_info *tei)
+{
+       struct sas_task *task = tei->task;
+       struct domain_device *dev = task->dev;
+       struct mvs_device *mvi_dev =
+               (struct mvs_device *)dev->lldd_dev;
+       struct mvs_cmd_hdr *hdr = tei->hdr;
+       struct asd_sas_port *sas_port = dev->port;
+       struct mvs_slot_info *slot;
+       void *buf_prd;
+       u32 tag = tei->tag, hdr_tag;
+       u32 flags, del_q;
+       void *buf_tmp;
+       u8 *buf_cmd, *buf_oaf;
+       dma_addr_t buf_tmp_dma;
+       u32 i, req_len, resp_len;
+       const u32 max_resp_len = SB_RFB_MAX;
+
+       if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
+               mv_dprintk("Have not enough regiset for dev %d.\n",
+                       mvi_dev->device_id);
+               return -EBUSY;
+       }
+
+       slot = &mvi->slot_info[tag];
+       slot->tx = mvi->tx_prod;
+       del_q = TXQ_MODE_I | tag |
+               (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+               (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+               (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+       mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       if (task->data_dir == DMA_FROM_DEVICE)
+               flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
+       else
+               flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#else
+       flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#endif
+       if (task->ata_task.use_ncq)
+               flags |= MCH_FPDMA;
+       if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
+               if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
+                       flags |= MCH_ATAPI;
+       }
+
+       /* FIXME: fill in port multiplier number */
+
+       hdr->flags = cpu_to_le32(flags);
+
+       /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
+       if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+       else
+               hdr_tag = tag;
+
+       hdr->tags = cpu_to_le32(hdr_tag);
+
+       hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+
+       /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
+       buf_cmd = buf_tmp = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+
+       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_ATA_CMD_SZ;
+       buf_tmp_dma += MVS_ATA_CMD_SZ;
+#if _MV_DUMP
+       slot->cmd_size = MVS_ATA_CMD_SZ;
+#endif
+
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+       /* used for STP.  unused for SATA? */
+       buf_oaf = buf_tmp;
+       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+
+       /* region 3: PRD table ********************************************* */
+       buf_prd = buf_tmp;
+
+       if (tei->n_elem)
+               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+       else
+               hdr->prd_tbl = 0;
+       i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
+
+       buf_tmp += i;
+       buf_tmp_dma += i;
+
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       /* FIXME: probably unused, for SATA.  kept here just in case
+        * we get a STP/SATA error information record
+        */
+       slot->response = buf_tmp;
+       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+       if (mvi->flags & MVF_FLAG_SOC)
+               hdr->reserved[0] = 0;
+
+       req_len = sizeof(struct host_to_dev_fis);
+       resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
+           sizeof(struct mvs_err_info) - i;
+
+       /* request, response lengths */
+       resp_len = min(resp_len, max_resp_len);
+       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+       if (likely(!task->ata_task.device_control_reg_update))
+               task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+       /* fill in command FIS and ATAPI CDB */
+       memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+       if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+               memcpy(buf_cmd + STP_ATAPI_CMD,
+                       task->ata_task.atapi_packet, 16);
+
+       /* generate open address frame hdr (first 12 bytes) */
+       /* initiator, STP, ftype 1h */
+       buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
+       buf_oaf[1] = dev->linkrate & 0xf;
+       *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+       memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+       /* fill in PRD (scatter/gather) table, if any */
+       MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       if (task->data_dir == DMA_FROM_DEVICE)
+               MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
+                               TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
+#endif
+       return 0;
+}
+
+static int mvs_task_prep_ssp(struct mvs_info *mvi,
+                            struct mvs_task_exec_info *tei, int is_tmf,
+                            struct mvs_tmf_task *tmf)
+{
+       struct sas_task *task = tei->task;
+       struct mvs_cmd_hdr *hdr = tei->hdr;
+       struct mvs_port *port = tei->port;
+       struct domain_device *dev = task->dev;
+       struct mvs_device *mvi_dev =
+               (struct mvs_device *)dev->lldd_dev;
+       struct asd_sas_port *sas_port = dev->port;
+       struct mvs_slot_info *slot;
+       void *buf_prd;
+       struct ssp_frame_hdr *ssp_hdr;
+       void *buf_tmp;
+       u8 *buf_cmd, *buf_oaf, fburst = 0;
+       dma_addr_t buf_tmp_dma;
+       u32 flags;
+       u32 resp_len, req_len, i, tag = tei->tag;
+       const u32 max_resp_len = SB_RFB_MAX;
+       u32 phy_mask;
+
+       slot = &mvi->slot_info[tag];
+
+       phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
+               sas_port->phy_mask) & TXQ_PHY_MASK;
+
+       slot->tx = mvi->tx_prod;
+       mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
+                               (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
+                               (phy_mask << TXQ_PHY_SHIFT));
+
+       flags = MCH_RETRY;
+       if (task->ssp_task.enable_first_burst) {
+               flags |= MCH_FBURST;
+               fburst = (1 << 7);
+       }
+       hdr->flags = cpu_to_le32(flags |
+                                (tei->n_elem << MCH_PRD_LEN_SHIFT) |
+                                (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
+       hdr->tags = cpu_to_le32(tag);
+       hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+
+       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+       buf_cmd = buf_tmp = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+
+       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_SSP_CMD_SZ;
+       buf_tmp_dma += MVS_SSP_CMD_SZ;
+#if _MV_DUMP
+       slot->cmd_size = MVS_SSP_CMD_SZ;
+#endif
+
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+       buf_oaf = buf_tmp;
+       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+
+       /* region 3: PRD table ********************************************* */
+       buf_prd = buf_tmp;
+       if (tei->n_elem)
+               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+       else
+               hdr->prd_tbl = 0;
+
+       i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+       buf_tmp += i;
+       buf_tmp_dma += i;
+
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       slot->response = buf_tmp;
+       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+       if (mvi->flags & MVF_FLAG_SOC)
+               hdr->reserved[0] = 0;
+
+       resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
+           sizeof(struct mvs_err_info) - i;
+       resp_len = min(resp_len, max_resp_len);
+
+       req_len = sizeof(struct ssp_frame_hdr) + 28;
+
+       /* request, response lengths */
+       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+       /* generate open address frame hdr (first 12 bytes) */
+       /* initiator, SSP, ftype 1h */
+       buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
+       buf_oaf[1] = dev->linkrate & 0xf;
+       *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+       memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+       /* fill in SSP frame header (Command Table.SSP frame header) */
+       ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
+
+       if (is_tmf)
+               ssp_hdr->frame_type = SSP_TASK;
+       else
+               ssp_hdr->frame_type = SSP_COMMAND;
+
+       memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
+              HASHED_SAS_ADDR_SIZE);
+       memcpy(ssp_hdr->hashed_src_addr,
+              dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+       ssp_hdr->tag = cpu_to_be16(tag);
+
+       /* fill in IU for TASK and Command Frame */
+       buf_cmd += sizeof(*ssp_hdr);
+       memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+
+       if (ssp_hdr->frame_type != SSP_TASK) {
+               buf_cmd[9] = fburst | task->ssp_task.task_attr |
+                               (task->ssp_task.task_prio << 3);
+               memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
+       } else{
+               buf_cmd[10] = tmf->tmf;
+               switch (tmf->tmf) {
+               case TMF_ABORT_TASK:
+               case TMF_QUERY_TASK:
+                       buf_cmd[12] =
+                               (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+                       buf_cmd[13] =
+                               tmf->tag_of_task_to_be_managed & 0xff;
+                       break;
+               default:
+                       break;
+               }
+       }
+       /* fill in PRD (scatter/gather) table, if any */
+       MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+       return 0;
+}
+
+#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
+                               struct completion *completion, int lock,
+                               int is_tmf, struct mvs_tmf_task *tmf)
+{
+       struct domain_device *dev = task->dev;
+       struct mvs_info *mvi;
+       struct mvs_device *mvi_dev;
+       struct mvs_task_exec_info tei;
+       struct sas_task *t = task;
+       struct mvs_slot_info *slot;
+       u32 tag = 0xdeadbeef, rc, n_elem = 0;
+       u32 n = num, pass = 0;
+       unsigned long flags = 0;
+
+       if (!dev->port) {
+               struct task_status_struct *tsm = &t->task_status;
+
+               tsm->resp = SAS_TASK_UNDELIVERED;
+               tsm->stat = SAS_PHY_DOWN;
+               t->task_done(t);
+               return 0;
+       }
+
+       mvi = mvs_find_dev_mvi(task->dev);
+
+       if (lock)
+               spin_lock_irqsave(&mvi->lock, flags);
+       do {
+               dev = t->dev;
+               mvi_dev = (struct mvs_device *)dev->lldd_dev;
+               if (DEV_IS_GONE(mvi_dev)) {
+                       if (mvi_dev)
+                               mv_dprintk("device %d not ready.\n",
+                                       mvi_dev->device_id);
+                       else
+                               mv_dprintk("device %016llx not ready.\n",
+                                       SAS_ADDR(dev->sas_addr));
+
+                       rc = SAS_PHY_DOWN;
+                       goto out_done;
+               }
+
+               if (dev->port->id >= mvi->chip->n_phy)
+                       tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
+               else
+                       tei.port = &mvi->port[dev->port->id];
+
+               if (!tei.port->port_attached) {
+                       if (sas_protocol_ata(t->task_proto)) {
+                               mv_dprintk("port %d does not"
+                                       "attached device.\n", dev->port->id);
+                               rc = SAS_PHY_DOWN;
+                               goto out_done;
+                       } else {
+                               struct task_status_struct *ts = &t->task_status;
+                               ts->resp = SAS_TASK_UNDELIVERED;
+                               ts->stat = SAS_PHY_DOWN;
+                               t->task_done(t);
+                               if (n > 1)
+                                       t = list_entry(t->list.next,
+                                                       struct sas_task, list);
+                               continue;
+                       }
+               }
+
+               if (!sas_protocol_ata(t->task_proto)) {
+                       if (t->num_scatter) {
+                               n_elem = dma_map_sg(mvi->dev,
+                                                   t->scatter,
+                                                   t->num_scatter,
+                                                   t->data_dir);
+                               if (!n_elem) {
+                                       rc = -ENOMEM;
+                                       goto err_out;
+                               }
+                       }
+               } else {
+                       n_elem = t->num_scatter;
+               }
+
+               rc = mvs_tag_alloc(mvi, &tag);
+               if (rc)
+                       goto err_out;
+
+               slot = &mvi->slot_info[tag];
+
+
+               t->lldd_task = NULL;
+               slot->n_elem = n_elem;
+               slot->slot_tag = tag;
+               memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+
+               tei.task = t;
+               tei.hdr = &mvi->slot[tag];
+               tei.tag = tag;
+               tei.n_elem = n_elem;
+               switch (t->task_proto) {
+               case SAS_PROTOCOL_SMP:
+                       rc = mvs_task_prep_smp(mvi, &tei);
+                       break;
+               case SAS_PROTOCOL_SSP:
+                       rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
+                       break;
+               case SAS_PROTOCOL_SATA:
+               case SAS_PROTOCOL_STP:
+               case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+                       rc = mvs_task_prep_ata(mvi, &tei);
+                       break;
+               default:
+                       dev_printk(KERN_ERR, mvi->dev,
+                               "unknown sas_task proto: 0x%x\n",
+                               t->task_proto);
+                       rc = -EINVAL;
+                       break;
+               }
+
+               if (rc) {
+                       mv_dprintk("rc is %x\n", rc);
+                       goto err_out_tag;
+               }
+               slot->task = t;
+               slot->port = tei.port;
+               t->lldd_task = (void *) slot;
+               list_add_tail(&slot->entry, &tei.port->list);
+               /* TODO: select normal or high priority */
+               spin_lock(&t->task_state_lock);
+               t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+               spin_unlock(&t->task_state_lock);
+
+               mvs_hba_memory_dump(mvi, tag, t->task_proto);
+               mvi_dev->runing_req++;
+               ++pass;
+               mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+               if (n > 1)
+                       t = list_entry(t->list.next, struct sas_task, list);
+       } while (--n);
+       rc = 0;
+       goto out_done;
+
+err_out_tag:
+       mvs_tag_free(mvi, tag);
+err_out:
+
+       dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
+       if (!sas_protocol_ata(t->task_proto))
+               if (n_elem)
+                       dma_unmap_sg(mvi->dev, t->scatter, n_elem,
+                                    t->data_dir);
+out_done:
+       if (likely(pass)) {
+               MVS_CHIP_DISP->start_delivery(mvi,
+                       (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+       }
+       if (lock)
+               spin_unlock_irqrestore(&mvi->lock, flags);
+       return rc;
+}
+
+int mvs_queue_command(struct sas_task *task, const int num,
+                       gfp_t gfp_flags)
+{
+       return mvs_task_exec(task, num, gfp_flags, NULL, 1, 0, NULL);
+}
+
+static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
+{
+       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+       mvs_tag_clear(mvi, slot_idx);
+}
+
+static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+                         struct mvs_slot_info *slot, u32 slot_idx)
+{
+       if (!slot->task)
+               return;
+       if (!sas_protocol_ata(task->task_proto))
+               if (slot->n_elem)
+                       dma_unmap_sg(mvi->dev, task->scatter,
+                                    slot->n_elem, task->data_dir);
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SMP:
+               dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
+                            PCI_DMA_FROMDEVICE);
+               dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
+                            PCI_DMA_TODEVICE);
+               break;
+
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SSP:
+       default:
+               /* do nothing */
+               break;
+       }
+       list_del_init(&slot->entry);
+       task->lldd_task = NULL;
+       slot->task = NULL;
+       slot->port = NULL;
+       slot->slot_tag = 0xFFFFFFFF;
+       mvs_slot_free(mvi, slot_idx);
+}
+
+static void mvs_update_wideport(struct mvs_info *mvi, int i)
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct mvs_port *port = phy->port;
+       int j, no;
+
+       for_each_phy(port->wide_port_phymap, j, no) {
+               if (j & 1) {
+                       MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+                                               PHYR_WIDE_PORT);
+                       MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+                                               port->wide_port_phymap);
+               } else {
+                       MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+                                               PHYR_WIDE_PORT);
+                       MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+                                               0);
+               }
+       }
+}
+
+static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
+{
+       u32 tmp;
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct mvs_port *port = phy->port;
+
+       tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
+       if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
+               if (!port)
+                       phy->phy_attached = 1;
+               return tmp;
+       }
+
+#if 0/*def SUPPORT_TARGET*/
+       if ((phy->irq_status & (PHYEV_ID_DONE | PHYEV_RDY_CH))
+               && (tmp & PHY_READY_MASK)) {
+               mv_dprintk("phy[%d] reset and check device.\n", i);
+               if (!port)
+                       phy->phy_attached = 1;
+               return tmp;
+       }
+#endif
+       if (port) {
+               if (phy->phy_type & PORT_TYPE_SAS) {
+                       port->wide_port_phymap &= ~(1U << i);
+                       if (!port->wide_port_phymap)
+                               port->port_attached = 0;
+                       mvs_update_wideport(mvi, i);
+               } else if (phy->phy_type & PORT_TYPE_SATA)
+                       port->port_attached = 0;
+               phy->port = NULL;
+               phy->phy_attached = 0;
+               phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+       }
+       return 0;
+}
+
+static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
+{
+       u32 *s = (u32 *) buf;
+
+       if (!s)
+               return NULL;
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
+       s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
+       s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
+       s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
+       s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+       /* Workaround: take some ATAPI devices for ATA */
+       if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
+               s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
+
+       return (void *)s;
+}
+
+static u32 mvs_is_sig_fis_received(u32 irq_status)
+{
+       return irq_status & PHYEV_SIG_FIS;
+}
+
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct sas_identify_frame *id;
+
+       id = (struct sas_identify_frame *)phy->frame_rcvd;
+
+       if (get_st) {
+               phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
+               phy->phy_status = mvs_is_phy_ready(mvi, i);
+       }
+
+       if (phy->phy_status) {
+               int oob_done = 0;
+               struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
+
+               oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
+
+               MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
+               if (phy->phy_type & PORT_TYPE_SATA) {
+                       phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
+                       if (mvs_is_sig_fis_received(phy->irq_status)) {
+                               phy->phy_attached = 1;
+                               phy->att_dev_sas_addr =
+                                       i + mvi->id * mvi->chip->n_phy;
+                               if (oob_done)
+                                       sas_phy->oob_mode = SATA_OOB_MODE;
+                               phy->frame_rcvd_size =
+                                   sizeof(struct dev_to_host_fis);
+                               mvs_get_d2h_reg(mvi, i, (void *)id);
+                       } else {
+                               u32 tmp;
+                               dev_printk(KERN_DEBUG, mvi->dev,
+                                       "Phy%d : No sig fis\n", i);
+                               tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
+                               MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
+                                               tmp | PHYEV_SIG_FIS);
+                               phy->phy_attached = 0;
+                               phy->phy_type &= ~PORT_TYPE_SATA;
+                               MVS_CHIP_DISP->phy_reset(mvi, i, 0);
+                               goto out_done;
+                       }
+               }               else if (phy->phy_type & PORT_TYPE_SAS
+                       || phy->att_dev_info & PORT_SSP_INIT_MASK) {
+                       phy->phy_attached = 1;
+                       phy->identify.device_type =
+                               phy->att_dev_info & PORT_DEV_TYPE_MASK;
+
+                       if (phy->identify.device_type == SAS_END_DEV)
+                               phy->identify.target_port_protocols =
+                                                       SAS_PROTOCOL_SSP;
+                       else if (phy->identify.device_type != NO_DEVICE)
+                               phy->identify.target_port_protocols =
+                                                       SAS_PROTOCOL_SMP;
+                       if (oob_done)
+                               sas_phy->oob_mode = SAS_OOB_MODE;
+                       phy->frame_rcvd_size =
+                           sizeof(struct sas_identify_frame);
+#ifdef SUPPORT_TARGET
+                       mvi->tgt_port[i].port_attr = mvst_check_port(mvi, i);
+                       mv_dprintk("get port %d attr %x\n", i,
+                               mvi->tgt_port[i].port_attr);
+                       if (mvi->tgt_port[i].port_attr == MVST_TGT_PORT
+                               || mvi->tgt_port[i].port_attr ==
+                               MVST_INIT_TGT_PORT) {
+                               mv_dprintk("port %d is to be tgt port.\n", i);
+                               mvi->tgt_port[i].port_attached = 1;
+                               mvst_update_wideport(mvi, i);
+                       }
+#endif
+               }
+               memcpy(sas_phy->attached_sas_addr,
+                       &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
+
+               if (MVS_CHIP_DISP->phy_work_around)
+                       MVS_CHIP_DISP->phy_work_around(mvi, i);
+       }
+       mv_dprintk("port %d attach dev info is %x\n",
+               i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
+       mv_dprintk("port %d attach sas addr is %llx\n",
+               i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
+out_done:
+       if (get_st)
+               MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
+}
+
+static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
+{
+       struct sas_ha_struct *sas_ha = sas_phy->ha;
+       struct mvs_info *mvi = NULL; int i = 0, hi;
+       struct mvs_phy *phy = sas_phy->lldd_phy;
+       struct asd_sas_port *sas_port = sas_phy->port;
+       struct mvs_port *port;
+       unsigned long flags = 0;
+       if (!sas_port)
+               return;
+
+       while (sas_ha->sas_phy[i]) {
+               if (sas_ha->sas_phy[i] == sas_phy)
+                       break;
+               i++;
+       }
+       hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
+       mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
+       if (sas_port->id >= mvi->chip->n_phy)
+               port = &mvi->port[sas_port->id - mvi->chip->n_phy];
+       else
+               port = &mvi->port[sas_port->id];
+       if (lock)
+               spin_lock_irqsave(&mvi->lock, flags);
+       port->port_attached = 1;
+       phy->port = port;
+       if (phy->phy_type & PORT_TYPE_SAS) {
+               port->wide_port_phymap = sas_port->phy_mask;
+               mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
+               mvs_update_wideport(mvi, sas_phy->id);
+       }
+       if (lock)
+               spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
+{
+       /*Nothing*/
+}
+
+
+void mvs_port_formed(struct asd_sas_phy *sas_phy)
+{
+       mvs_port_notify_formed(sas_phy, 1);
+}
+
+void mvs_port_deformed(struct asd_sas_phy *sas_phy)
+{
+       mvs_port_notify_deformed(sas_phy, 1);
+}
+
+struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
+{
+       u32 dev;
+       for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
+               if (mvi->devices[dev].dev_type == NO_DEVICE) {
+                       mvi->devices[dev].device_id = dev;
+                       return &mvi->devices[dev];
+               }
+       }
+
+       if (dev == MVS_MAX_DEVICES)
+               mv_printk("max support %d devices, ignore ..\n",
+                       MVS_MAX_DEVICES);
+
+       return NULL;
+}
+
+void mvs_free_dev(struct mvs_device *mvi_dev)
+{
+       u32 id = mvi_dev->device_id;
+       memset(mvi_dev, 0, sizeof(*mvi_dev));
+       mvi_dev->device_id = id;
+       mvi_dev->dev_type = NO_DEVICE;
+       mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
+}
+
+#define DEV_IS_EXPANDER(type)  ((type == EDGE_DEV) || (type == FANOUT_DEV))
+
+int mvs_dev_found_notify(struct domain_device *dev, int lock)
+{
+       unsigned long flags = 0;
+       int res = 0;
+       struct mvs_info *mvi = NULL;
+       struct domain_device *parent_dev = dev->parent;
+       struct mvs_device *mvi_device;
+
+       mvi = mvs_find_dev_mvi(dev);
+
+       if (lock)
+               spin_lock_irqsave(&mvi->lock, flags);
+
+       mvi_device = mvs_alloc_dev(mvi);
+       if (!mvi_device) {
+               res = -1;
+               goto found_out;
+       }
+       dev->lldd_dev = (void *)mvi_device;
+       mvi_device->dev_type = dev->dev_type;
+
+       if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+               int phy_id;
+               u8 phy_num = parent_dev->ex_dev.num_phys;
+               struct ex_phy *phy;
+               for (phy_id = 0; phy_id < phy_num; phy_id++) {
+                       phy = &parent_dev->ex_dev.ex_phy[phy_id];
+                       if (SAS_ADDR(phy->attached_sas_addr) ==
+                               SAS_ADDR(dev->sas_addr)) {
+                               mvi_device->attached_phy = phy_id;
+                               break;
+                       }
+               }
+
+               if (phy_id == phy_num) {
+                       mv_printk("Error: no attached dev:%016llx"
+                               "at ex:%016llx.\n",
+                               SAS_ADDR(dev->sas_addr),
+                               SAS_ADDR(parent_dev->sas_addr));
+                       res = -1;
+               }
+       }
+
+found_out:
+       if (lock)
+               spin_unlock_irqrestore(&mvi->lock, flags);
+       return res;
+}
+
+int mvs_dev_found(struct domain_device *dev)
+{
+       return mvs_dev_found_notify(dev, 1);
+}
+
+void mvs_dev_gone_notify(struct domain_device *dev, int lock)
+{
+       unsigned long flags = 0;
+       struct mvs_info *mvi;
+       struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+
+       mvi = mvs_find_dev_mvi(dev);
+
+       if (lock)
+               spin_lock_irqsave(&mvi->lock, flags);
+
+       if (mvi_dev) {
+               mv_dprintk("found dev[%d:%x] is gone.\n",
+                       mvi_dev->device_id, mvi_dev->dev_type);
+               mvs_free_reg_set(mvi, mvi_dev);
+               mvs_free_dev(mvi_dev);
+       } else {
+               mv_dprintk("found dev has gone.\n");
+       }
+       dev->lldd_dev = NULL;
+
+       if (lock)
+               spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+
+void mvs_dev_gone(struct domain_device *dev)
+{
+       mvs_dev_gone_notify(dev, 1);
+}
+
+static  struct sas_task *mvs_alloc_task(void)
+{
+       struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
+
+       if (task) {
+               INIT_LIST_HEAD(&task->list);
+               spin_lock_init(&task->task_state_lock);
+               task->task_state_flags = SAS_TASK_STATE_PENDING;
+               init_timer(&task->timer);
+               init_completion(&task->completion);
+       }
+       return task;
+}
+
+static  void mvs_free_task(struct sas_task *task)
+{
+       if (task) {
+               BUG_ON(!list_empty(&task->list));
+               kfree(task);
+       }
+}
+
+static void mvs_task_done(struct sas_task *task)
+{
+       if (!del_timer(&task->timer))
+               return;
+       complete(&task->completion);
+}
+
+static void mvs_tmf_timedout(unsigned long data)
+{
+       struct sas_task *task = (struct sas_task *)data;
+
+       task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+       complete(&task->completion);
+}
+
+/* XXX */
+#define MVS_TASK_TIMEOUT 20
+static int mvs_exec_internal_tmf_task(struct domain_device *dev,
+                       void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
+{
+       int res, retry;
+       struct sas_task *task = NULL;
+
+       for (retry = 0; retry < 3; retry++) {
+               task = mvs_alloc_task();
+               if (!task)
+                       return -ENOMEM;
+
+               task->dev = dev;
+               task->task_proto = dev->tproto;
+
+               memcpy(&task->ssp_task, parameter, para_len);
+               task->task_done = mvs_task_done;
+
+               task->timer.data = (unsigned long) task;
+               task->timer.function = mvs_tmf_timedout;
+               task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
+               add_timer(&task->timer);
+
+               res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 0, 1, tmf);
+
+               if (res) {
+                       del_timer(&task->timer);
+                       mv_printk("executing internel task failed:%d\n", res);
+                       goto ex_err;
+               }
+
+               wait_for_completion(&task->completion);
+               res = -TMF_RESP_FUNC_FAILED;
+               /* Even TMF timed out, return direct. */
+               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+                               mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
+                               goto ex_err;
+                       }
+               }
+
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                   task->task_status.stat == SAM_GOOD) {
+                       res = TMF_RESP_FUNC_COMPLETE;
+                       break;
+               }
+
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_UNDERRUN) {
+                       /* no error, but return the number of bytes of
+                        * underrun */
+                       res = task->task_status.residual;
+                       break;
+               }
+
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_OVERRUN) {
+                       mv_dprintk("blocked task error.\n");
+                       res = -EMSGSIZE;
+                       break;
+               } else {
+                       mv_dprintk(" task to dev %016llx response: 0x%x "
+                                   "status 0x%x\n",
+                                   SAS_ADDR(dev->sas_addr),
+                                   task->task_status.resp,
+                                   task->task_status.stat);
+                       mvs_free_task(task);
+                       task = NULL;
+
+               }
+       }
+ex_err:
+       BUG_ON(retry == 3 && task != NULL);
+       if (task != NULL)
+               mvs_free_task(task);
+       return res;
+}
+
+static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
+                               u8 *lun, struct mvs_tmf_task *tmf)
+{
+       struct sas_ssp_task ssp_task;
+       DECLARE_COMPLETION_ONSTACK(completion);
+       if (!(dev->tproto & SAS_PROTOCOL_SSP))
+               return TMF_RESP_FUNC_ESUPP;
+
+       strncpy((u8 *)&ssp_task.LUN, lun, 8);
+
+       return mvs_exec_internal_tmf_task(dev, &ssp_task,
+                               sizeof(ssp_task), tmf);
+}
+
+
+/*  Standard mandates link reset for ATA  (type 0)
+    and hard reset for SSP (type 1) , only for RECOVERY */
+static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
+{
+       int rc;
+       struct sas_phy *phy = sas_find_local_phy(dev);
+       int reset_type = (dev->dev_type == SATA_DEV ||
+                       (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
+       rc = sas_phy_reset(phy, reset_type);
+       msleep(2000);
+       return rc;
+}
+
+/* mandatory SAM-3 */
+int mvs_lu_reset(struct domain_device *dev, u8 *lun)
+{
+       unsigned long flags;
+       int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_tmf_task tmf_task;
+       struct mvs_info *mvi = mvs_find_dev_mvi(dev);
+       struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+
+       tmf_task.tmf = TMF_LU_RESET;
+
+       rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+       if (rc == TMF_RESP_FUNC_COMPLETE) {
+               num = mvs_find_dev_phyno(dev, phyno);
+               spin_lock_irqsave(&mvi->lock, flags);
+               for (i = 0; i < num; i++)
+                       mvs_release_task(mvi, phyno[i], dev);
+               spin_unlock_irqrestore(&mvi->lock, flags);
+       }
+       /* If failed, fall-through I_T_Nexus reset */
+       mv_printk("%s for device[%x]:rc= %d\n", __func__,
+                       mvi_dev->device_id, rc);
+       return rc;
+}
+
+int mvs_I_T_nexus_reset(struct domain_device *dev)
+{
+       unsigned long flags;
+       int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_info *mvi = mvs_find_dev_mvi(dev);
+       struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+
+       rc = mvs_debug_I_T_nexus_reset(dev);
+       mv_printk("%s for device[%x]:rc= %d\n",
+               __func__, mvi_dev->device_id, rc);
+
+       /* housekeeper */
+       num = mvs_find_dev_phyno(dev, phyno);
+       spin_lock_irqsave(&mvi->lock, flags);
+       for (i = 0; i < num; i++)
+               mvs_release_task(mvi, phyno[i], dev);
+       spin_unlock_irqrestore(&mvi->lock, flags);
+
+       return rc;
+}
+/* optional SAM-3 */
+int mvs_query_task(struct sas_task *task)
+{
+       u32 tag;
+       struct scsi_lun lun;
+       struct mvs_tmf_task tmf_task;
+       int rc = TMF_RESP_FUNC_FAILED;
+
+       if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+               struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+               struct domain_device *dev = task->dev;
+               struct mvs_info *mvi = mvs_find_dev_mvi(dev);
+
+               int_to_scsilun(cmnd->device->lun, &lun);
+               rc = mvs_find_tag(mvi, task, &tag);
+               if (rc == 0) {
+                       rc = TMF_RESP_FUNC_FAILED;
+                       return rc;
+               }
+
+               tmf_task.tmf =  TMF_QUERY_TASK;
+               tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+               rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+               switch (rc) {
+               /* The task is still in Lun, release it then */
+               case TMF_RESP_FUNC_SUCC:
+               /* The task is not in Lun or failed, reset the phy */
+               case TMF_RESP_FUNC_FAILED:
+               case TMF_RESP_FUNC_COMPLETE:
+                       break;
+               }
+       }
+       mv_printk("%s:rc= %d\n", __func__, rc);
+       return rc;
+}
+
+/*  mandatory SAM-3, still need free task/slot info */
+int mvs_abort_task(struct sas_task *task)
+{
+       unsigned long flags;
+       u32 tag;
+       int i;
+       struct scsi_lun lun;
+       struct mvs_tmf_task tmf_task;
+       int rc = TMF_RESP_FUNC_FAILED;
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+               rc = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+       if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+               struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+               struct domain_device *dev = task->dev;
+               struct mvs_info *mvi = mvs_find_dev_mvi(dev);
+
+               int_to_scsilun(cmnd->device->lun, &lun);
+               rc = mvs_find_tag(mvi, task, &tag);
+               if (rc == 0) {
+                       mv_printk("No such tag in %s\n", __func__);
+                       rc = TMF_RESP_FUNC_FAILED;
+                       return rc;
+               }
+               mv_printk("Abort tag[%x]:[", tag);
+               for (i = 0; i < 16; i++)
+                       mv_printk("%02x ", cmnd->cmnd[i]);
+               mv_printk("]\n");
+
+               tmf_task.tmf =  TMF_ABORT_TASK;
+               tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+               rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+
+               /* if successful, clear the task and callback forwards.*/
+               if (rc == TMF_RESP_FUNC_COMPLETE) {
+                       u32 slot_no;
+                       struct mvs_slot_info *slot;
+                       struct mvs_info *mvi = mvs_find_dev_mvi(dev);
+
+                       if (task->lldd_task) {
+                               slot = (struct mvs_slot_info *)task->lldd_task;
+                               slot_no = (u32) (slot - mvi->slot_info);
+                               mvs_slot_complete(mvi, slot_no, 1);
+                       }
+               }
+       } else if (task->task_proto & SAS_PROTOCOL_SATA ||
+               task->task_proto & SAS_PROTOCOL_STP) {
+               /* to do free register_set */
+       } else {
+               /* SMP */
+
+       }
+out:
+       if (rc != TMF_RESP_FUNC_COMPLETE)
+               mv_printk("%s:rc= %d\n", __func__, rc);
+       return rc;
+}
+
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_tmf_task tmf_task;
+
+       tmf_task.tmf = TMF_ABORT_TASK_SET;
+       rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+       return rc;
+}
+
+int mvs_clear_aca(struct domain_device *dev, u8 *lun)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_tmf_task tmf_task;
+
+       tmf_task.tmf = TMF_CLEAR_ACA;
+       rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+       return rc;
+}
+
+int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_tmf_task tmf_task;
+
+       tmf_task.tmf = TMF_CLEAR_TASK_SET;
+       rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+       return rc;
+}
+
+static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
+                       u32 slot_idx, int err)
+{
+       struct mvs_device *mvi_dev = (struct mvs_device *)task->dev->lldd_dev;
+       struct task_status_struct *tstat = &task->task_status;
+       struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
+       int stat = SAM_GOOD;
+
+
+       resp->frame_len = sizeof(struct dev_to_host_fis);
+       memcpy(&resp->ending_fis[0],
+              SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
+              sizeof(struct dev_to_host_fis));
+       tstat->buf_valid_size = sizeof(*resp);
+       if (unlikely(err))
+               stat = SAS_PROTO_RESPONSE;
+       return stat;
+}
+
+static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
+                        u32 slot_idx)
+{
+       struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+       int stat;
+       u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
+       u32 tfs = 0;
+       enum mvs_port_type type = PORT_TYPE_SAS;
+
+       if (err_dw0 & CMD_ISS_STPD)
+               MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
+
+       MVS_CHIP_DISP->command_active(mvi, slot_idx);
+
+       stat = SAM_CHECK_COND;
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SSP:
+               stat = SAS_ABORTED_TASK;
+               break;
+       case SAS_PROTOCOL_SMP:
+               stat = SAM_CHECK_COND;
+               break;
+
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+       {
+               if (err_dw0 == 0x80400002)
+                       mv_printk("find reserved error, why?\n");
+
+               task->ata_task.use_ncq = 0;
+               stat = SAS_PROTO_RESPONSE;
+               mvs_sata_done(mvi, task, slot_idx, 1);
+
+       }
+               break;
+       default:
+               break;
+       }
+
+       return stat;
+}
+
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
+{
+       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+       struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+       struct sas_task *task = slot->task;
+       struct mvs_device *mvi_dev = NULL;
+       struct task_status_struct *tstat;
+
+       bool aborted;
+       void *to;
+       enum exec_status sts;
+
+       if (unlikely(!task || !task->lldd_task))
+               return -1;
+
+       tstat = &task->task_status;
+       mvi_dev = (struct mvs_device *)task->dev->lldd_dev;
+
+       mvs_hba_cq_dump(mvi);
+
+       spin_lock(&task->task_state_lock);
+       task->task_state_flags &=
+               ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+       task->task_state_flags |= SAS_TASK_STATE_DONE;
+       /* race condition*/
+       aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
+       spin_unlock(&task->task_state_lock);
+
+       memset(tstat, 0, sizeof(*tstat));
+       tstat->resp = SAS_TASK_COMPLETE;
+
+       if (unlikely(aborted)) {
+               tstat->stat = SAS_ABORTED_TASK;
+               if (mvi_dev)
+                       mvi_dev->runing_req--;
+               if (sas_protocol_ata(task->task_proto))
+                       mvs_free_reg_set(mvi, mvi_dev);
+
+               mvs_slot_task_free(mvi, task, slot, slot_idx);
+               return -1;
+       }
+
+       if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
+               mv_dprintk("port has not device.\n");
+               tstat->stat = SAS_PHY_DOWN;
+               goto out;
+       }
+
+       /*
+       if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
+                mv_dprintk("Find device[%016llx] RXQ_ERR %X,
+                err info:%016llx\n",
+                SAS_ADDR(task->dev->sas_addr),
+                rx_desc, (u64)(*(u64 *) slot->response));
+       }
+       */
+
+       /* error info record present */
+       if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+               tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+               goto out;
+       }
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SSP:
+               /* hw says status == 0, datapres == 0 */
+               if (rx_desc & RXQ_GOOD) {
+                       tstat->stat = SAM_GOOD;
+                       tstat->resp = SAS_TASK_COMPLETE;
+               }
+               /* response frame present */
+               else if (rx_desc & RXQ_RSP) {
+                       struct ssp_response_iu *iu = slot->response +
+                                               sizeof(struct mvs_err_info);
+                       sas_ssp_task_response(mvi->dev, task, iu);
+               } else
+                       tstat->stat = SAM_CHECK_COND;
+               break;
+
+       case SAS_PROTOCOL_SMP: {
+                       struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+                       tstat->stat = SAM_GOOD;
+                       to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
+                       memcpy(to + sg_resp->offset,
+                               slot->response + sizeof(struct mvs_err_info),
+                               sg_dma_len(sg_resp));
+                       kunmap_atomic(to, KM_IRQ0);
+                       break;
+               }
+
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
+                       tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
+                       break;
+               }
+
+       default:
+               tstat->stat = SAM_CHECK_COND;
+               break;
+       }
+
+out:
+       if (mvi_dev)
+               mvi_dev->runing_req--;
+       if (sas_protocol_ata(task->task_proto))
+               mvs_free_reg_set(mvi, mvi_dev);
+
+       mvs_slot_task_free(mvi, task, slot, slot_idx);
+       sts = tstat->stat;
+
+       spin_unlock(&mvi->lock);
+       if (task->task_done)
+               task->task_done(task);
+       else
+               mv_dprintk("why has not task_done.\n");
+       spin_lock(&mvi->lock);
+
+       return sts;
+}
+
+void mvs_release_task(struct mvs_info *mvi,
+               int phy_no, struct domain_device *dev)
+{
+       int i = 0; u32 slot_idx;
+       struct mvs_phy *phy;
+       struct mvs_port *port;
+       struct mvs_slot_info *slot, *slot2;
+
+       phy = &mvi->phy[phy_no];
+       port = phy->port;
+       if (!port)
+               return;
+
+       list_for_each_entry_safe(slot, slot2, &port->list, entry) {
+               struct sas_task *task;
+               slot_idx = (u32) (slot - mvi->slot_info);
+               task = slot->task;
+
+               if (dev && task->dev != dev)
+                       continue;
+
+               mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
+                       slot_idx, slot->slot_tag, task);
+
+               if (task->task_proto & SAS_PROTOCOL_SSP) {
+                       mv_printk("attached with SSP task CDB[");
+                       for (i = 0; i < 16; i++)
+                               mv_printk(" %02x", task->ssp_task.cdb[i]);
+                       mv_printk(" ]\n");
+               }
+
+               mvs_slot_complete(mvi, slot_idx, 1);
+       }
+}
+
+static void mvs_phy_disconnected(struct mvs_phy *phy)
+{
+       phy->phy_attached = 0;
+       phy->att_dev_info = 0;
+       phy->att_dev_sas_addr = 0;
+}
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)
+static void mvs_work_queue(struct work_struct *work)
+{
+       struct delayed_work *dw = container_of(work, struct delayed_work, work);
+#else
+static void mvs_work_queue(void *arg)
+{
+       struct delayed_work *dw = arg;
+#endif
+       struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
+       struct mvs_info *mvi = mwq->mvi;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mvi->lock, flags);
+       if (mwq->handler & PHY_PLUG_EVENT) {
+               u32 phy_no = (unsigned long) mwq->data;
+               struct sas_ha_struct *sas_ha = mvi->sas;
+               struct mvs_phy *phy = &mvi->phy[phy_no];
+               struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+               if (phy->phy_event & PHY_PLUG_OUT) {
+                       u32 tmp;
+                       struct sas_identify_frame *id;
+                       id = (struct sas_identify_frame *)phy->frame_rcvd;
+                       tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
+                       phy->phy_event &= ~PHY_PLUG_OUT;
+                       if (!(tmp & PHY_READY_MASK)) {
+                               sas_phy_disconnected(sas_phy);
+                               mvs_phy_disconnected(phy);
+                               sas_ha->notify_phy_event(sas_phy,
+                                       PHYE_LOSS_OF_SIGNAL);
+                               mv_dprintk("phy%d Removed Device\n", phy_no);
+                       } else {
+                               MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+                               mvs_update_phyinfo(mvi, phy_no, 1);
+                               mvs_bytes_dmaed(mvi, phy_no);
+                               mvs_port_notify_formed(sas_phy, 0);
+                               mv_dprintk("phy%d Attached Device\n", phy_no);
+                       }
+               }
+       }
+       list_del(&mwq->entry);
+       spin_unlock_irqrestore(&mvi->lock, flags);
+       kfree(mwq);
+}
+
+static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
+{
+       struct mvs_wq *mwq;
+       int ret = 0;
+
+       mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
+       if (mwq) {
+               mwq->mvi = mvi;
+               mwq->data = data;
+               mwq->handler = handler;
+               MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
+               list_add_tail(&mwq->entry, &mvi->wq_list);
+               schedule_delayed_work(&mwq->work_q, HZ * 2);
+       } else
+               ret = -ENOMEM;
+
+       return ret;
+}
+
+static void mvs_sig_time_out(unsigned long tphy)
+{
+       struct mvs_phy *phy = (struct mvs_phy *)tphy;
+       struct mvs_info *mvi = phy->mvi;
+       u8 phy_no;
+
+       for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
+               if (&mvi->phy[phy_no] == phy) {
+                       mv_dprintk("Get signature time out, reset phy %d\n",
+                               phy_no+mvi->id*mvi->chip->n_phy);
+                       MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
+               }
+       }
+}
+
+static void mvs_sig_remove_timer(struct mvs_phy *phy)
+{
+       if (phy->timer.function)
+               del_timer(&phy->timer);
+       phy->timer.function = NULL;
+}
+
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
+{
+       u32 tmp;
+       struct sas_ha_struct *sas_ha = mvi->sas;
+       struct mvs_phy *phy = &mvi->phy[phy_no];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+       phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
+       mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
+               MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
+       mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
+               phy->irq_status);
+
+       /*
+       * events is port event now ,
+       * we need check the interrupt status which belongs to per port.
+       */
+
+       if (phy->irq_status & PHYEV_DCDR_ERR)
+               mv_dprintk("port %d STP decoding error.\n",
+               phy_no+mvi->id*mvi->chip->n_phy);
+
+       if (phy->irq_status & PHYEV_POOF) {
+#ifdef SUPPORT_TARGET
+               /*if initiator plug out*/
+               mv_dprintk("port %d attr is %x\n", phy_no,
+                       mvi->tgt_port[phy_no].port_attr);
+               if (mvi->tgt_port[phy_no].port_attr == MVST_TGT_PORT
+                       || mvi->tgt_port[phy_no].port_attr ==
+                       MVST_INIT_TGT_PORT) {
+                       phy->phy_event |= PHY_PLUG_OUT;
+                       mvs_release_task(mvi, phy_no, NULL);
+                       sas_phy_disconnected(sas_phy);
+                       mvs_phy_disconnected(phy);
+                       mv_dprintk("notify plug out on phy[%d]\n", phy_no +
+                               mvi->id*mvi->chip->n_phy);
+                       sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
+               } else
+#endif
+               {
+                       if (!(phy->phy_event & PHY_PLUG_OUT)) {
+                               int dev_sata = phy->phy_type & PORT_TYPE_SATA;
+                               int ready;
+                               mvs_release_task(mvi, phy_no, NULL);
+                               phy->phy_event |= PHY_PLUG_OUT;
+                               mvs_handle_event(mvi,
+                                       (void *)(unsigned long)phy_no,
+                                       PHY_PLUG_EVENT);
+                               ready = mvs_is_phy_ready(mvi, phy_no);
+                               if (!ready)
+                                       mv_dprintk("phy%d Unplug Notice\n",
+                                               phy_no +
+                                               mvi->id * mvi->chip->n_phy);
+                               if (ready || dev_sata) {
+                                       if (MVS_CHIP_DISP->stp_reset)
+                                               MVS_CHIP_DISP->stp_reset(mvi,
+                                                               phy_no);
+                                       else
+                                               MVS_CHIP_DISP->phy_reset(mvi,
+                                                               phy_no, 0);
+                                       return;
+                               }
+                       }
+               }
+       }
+
+       if (phy->irq_status & PHYEV_COMWAKE) {
+               tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
+               MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
+                                       tmp | PHYEV_SIG_FIS);
+               if (phy->timer.function == NULL) {
+                       phy->timer.data = (unsigned long)phy;
+                       phy->timer.function = mvs_sig_time_out;
+                       phy->timer.expires = jiffies + 10*HZ;
+                       add_timer(&phy->timer);
+               }
+       }
+       if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
+               phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
+               mvs_sig_remove_timer(phy);
+               mv_dprintk("notify plug in on phy[%d]\n", phy_no);
+               if (phy->phy_status) {
+                       mdelay(10);
+                       MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+                       if (phy->phy_type & PORT_TYPE_SATA) {
+                               tmp = MVS_CHIP_DISP->read_port_irq_mask(
+                                               mvi, phy_no);
+                               tmp &= ~PHYEV_SIG_FIS;
+                               MVS_CHIP_DISP->write_port_irq_mask(mvi,
+                                                       phy_no, tmp);
+                       }
+                       mvs_update_phyinfo(mvi, phy_no, 0);
+                       mvs_bytes_dmaed(mvi, phy_no);
+                       /* whether driver is going to handle hot plug */
+                       if (phy->phy_event & PHY_PLUG_OUT) {
+                               mvs_port_notify_formed(sas_phy, 0);
+                               phy->phy_event &= ~PHY_PLUG_OUT;
+                       }
+               } else {
+                       mv_dprintk("plugin interrupt but phy%d is gone\n",
+                               phy_no + mvi->id*mvi->chip->n_phy);
+               }
+       } else if (phy->irq_status & PHYEV_BROAD_CH) {
+               mv_dprintk("port %d broadcast change.\n",
+                       phy_no + mvi->id*mvi->chip->n_phy);
+               /* exception for Samsung disk drive*/
+               mdelay(1000);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+       }
+       MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
+}
+
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
+{
+       u32 rx_prod_idx, rx_desc;
+       bool attn = false;
+
+       /* the first dword in the RX ring is special: it contains
+        * a mirror of the hardware's RX producer index, so that
+        * we don't have to stall the CPU reading that register.
+        * The actual RX ring is offset by one dword, due to this.
+        */
+       rx_prod_idx = mvi->rx_cons;
+       mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
+       if (mvi->rx_cons == 0xfff)      /* h/w hasn't touched RX ring yet */
+               return 0;
+
+       /* The CMPL_Q may come late, read from register and try again
+       * note: if coalescing is enabled,
+       * it will need to read from register every time for sure
+       */
+       if (unlikely(mvi->rx_cons == rx_prod_idx))
+               mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
+
+       if (mvi->rx_cons == rx_prod_idx)
+               return 0;
+
+       while (mvi->rx_cons != rx_prod_idx) {
+               /* increment our internal RX consumer pointer */
+               rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
+               rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
+#ifdef SUPPORT_TARGET
+               if ((mvs_tgt.tgt_rsp_ssp_cmd) && MVST_IN_TARGET_MODE(mvi)) {
+                       u8 rc = 0;
+                       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+                       struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+                       if ((!slot->slot_scst_cmd) && (rx_desc & RXQ_CMD_RX)) {
+                               /*  target command */
+                               mvi->rx[rx_prod_idx + 1] = 0;
+                               rc = mvs_tgt.tgt_rsp_ssp_cmd(mvi, rx_desc);
+                               if (!rc)
+                                       continue;
+                       }
+               }
+#endif
+
+
+               if (likely(rx_desc & RXQ_DONE)) {
+#ifdef SUPPORT_TARGET
+                       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+                       struct mvs_cmd_header  *cmd_hdr =
+                               (struct mvs_cmd_header  *)&mvi->slot[slot_idx];
+                       /* check complete command type */
+                       if (cmd_hdr->ssp_frame_type == MCH_SSP_FR_CMD) {
+                               mvs_slot_complete(mvi, rx_desc, 0);
+                       } else  {
+                               if (mvs_tgt.tgt_cmd_cmpl)
+                                       mvs_tgt.tgt_cmd_cmpl(mvi, rx_desc);
+                       }
+#else
+                       mvs_slot_complete(mvi, rx_desc, 0);
+#endif
+               }
+               if (rx_desc & RXQ_ATTN) {
+                       attn = true;
+               } else if (rx_desc & RXQ_ERR) {
+                       if (!(rx_desc & RXQ_DONE))
+                               mvs_slot_complete(mvi, rx_desc, 0);
+               } else if (rx_desc & RXQ_SLOT_RESET) {
+                       mvs_slot_free(mvi, rx_desc);
+               }
+       }
+
+       if (attn && self_clear)
+               MVS_CHIP_DISP->int_full(mvi);
+       return 0;
+}
+
diff --git a/mvsas_tgt/mv_sas.h b/mvsas_tgt/mv_sas.h
new file mode 100644 (file)
index 0000000..66b598b
--- /dev/null
@@ -0,0 +1,427 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_SAS_H_
+#define _MV_SAS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+#include <scsi/libsas.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sas_ata.h>
+#include <linux/version.h>
+#include "mv_defs.h"
+#ifdef SUPPORT_TARGET
+#include "mv_tgt.h"
+#endif
+
+#define DRV_NAME               "mvsas"
+#define DRV_VERSION            "0.8.2"
+#define _MV_DUMP               0
+#define MVS_ID_NOT_MAPPED      0x7f
+/* #define DISABLE_HOTPLUG_DMA_FIX */
+
+#define WIDE_PORT_MAX_PHY              4
+#define        MV_DISABLE_NCQ  0
+#define mv_printk(fmt, arg ...)        \
+       printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
+#ifdef MV_DEBUG
+#define mv_dprintk(format, arg...)     \
+       printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
+#else
+#define mv_dprintk(format, arg...)
+#endif
+#define MV_MAX_U32                     0xffffffff
+
+extern struct mvs_tgt_initiator mvs_tgt;
+extern struct mvs_info *tgt_mvi;
+extern const struct mvs_dispatch mvs_64xx_dispatch;
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+
+#define bit(n) ((u32)1 << n)
+
+#define for_each_phy(__lseq_mask, __mc, __lseq)                        \
+       for ((__mc) = (__lseq_mask), (__lseq) = 0;              \
+                                       (__mc) != 0 ;           \
+                                       (++__lseq), (__mc) >>= 1)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
+#include <scsi.h>
+#define sg_page(sg)    (sg->page)
+#define for_each_sg(sglist, sg, nr, __i)       \
+       for (__i = 0, sg = (sglist); __i < (nr); __i++, sg++)
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
+#define SAS_PROTOCOL_ALL SAS_PROTO_ALL
+#endif
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)
+#define MV_INIT_DELAYED_WORK(w, f, d)  INIT_DELAYED_WORK(w, f)
+#else
+#define MV_INIT_DELAYED_WORK(w, f, d)  INIT_DELAYED_WORK(w, f, (void *) d)
+#endif
+
+#define UNASSOC_D2H_FIS(id)            \
+       ((void *) mvi->rx_fis + 0x100 * id)
+#define SATA_RECEIVED_FIS_LIST(reg_set)        \
+       ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
+#define SATA_RECEIVED_SDB_FIS(reg_set) \
+       (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
+#define SATA_RECEIVED_D2H_FIS(reg_set) \
+       (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
+#define SATA_RECEIVED_PIO_FIS(reg_set) \
+       (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
+#define SATA_RECEIVED_DMA_FIS(reg_set) \
+       (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
+
+struct mvs_info;
+
+struct mvs_dispatch {
+       char *name;
+       int (*chip_init)(struct mvs_info *mvi);
+       int (*spi_init)(struct mvs_info *mvi);
+       int (*chip_ioremap)(struct mvs_info *mvi);
+       void (*chip_iounmap)(struct mvs_info *mvi);
+       irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
+       u32 (*isr_status)(struct mvs_info *mvi, int irq);
+       void (*interrupt_enable)(struct mvs_info *mvi);
+       void (*interrupt_disable)(struct mvs_info *mvi);
+
+       u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
+       void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
+
+       u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
+       void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
+       void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+       u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
+       void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
+       void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+       u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
+       void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
+
+       u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
+       void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
+
+       void (*get_sas_addr)(void *buf, u32 buflen);
+       void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
+       void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
+                               u32 tfs);
+       void (*start_delivery)(struct mvs_info *mvi, u32 tx);
+       u32 (*rx_update)(struct mvs_info *mvi);
+       void (*int_full)(struct mvs_info *mvi);
+       u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
+       void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
+       u32 (*prd_size)(void);
+       u32 (*prd_count)(void);
+       void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
+       void (*detect_porttype)(struct mvs_info *mvi, int i);
+       int (*oob_done)(struct mvs_info *mvi, int i);
+       void (*fix_phy_info)(struct mvs_info *mvi, int i,
+                               struct sas_identify_frame *id);
+       void (*phy_work_around)(struct mvs_info *mvi, int i);
+       void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
+                               struct sas_phy_linkrates *rates);
+       u32 (*phy_max_link_rate)(void);
+       void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
+       void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
+       void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
+       void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
+#ifdef SUPPORT_TARGET
+       void (*enable_target_mode)(struct mvs_info *mvi, u32 port);
+       void (*disable_target_mode)(struct mvs_info *mvi, u32 port);
+#endif
+       void (*clear_active_cmds)(struct mvs_info *mvi);
+       u32 (*spi_read_data)(struct mvs_info *mvi);
+       void (*spi_write_data)(struct mvs_info *mvi, u32 data);
+       int (*spi_buildcmd)(struct mvs_info *mvi,
+                                               u32      *dwCmd,
+                                               u8       cmd,
+                                               u8       read,
+                                               u8       length,
+                                               u32      addr
+                                               );
+       int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
+       int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
+#endif
+
+};
+
+struct mvs_chip_info {
+       u32             n_host;
+       u32             n_phy;
+       u32             fis_offs;
+       u32             fis_count;
+       u32             srs_sz;
+       u32             slot_width;
+       const struct mvs_dispatch *dispatch;
+};
+#define MVS_CHIP_SLOT_SZ       (1U << mvi->chip->slot_width)
+#define MVS_RX_FISL_SZ         \
+       (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
+#define MVS_CHIP_DISP          (mvi->chip->dispatch)
+
+struct mvs_err_info {
+       __le32                  flags;
+       __le32                  flags2;
+};
+
+struct mvs_cmd_hdr {
+       __le32                  flags;  /* PRD tbl len; SAS, SATA ctl */
+       __le32                  lens;   /* cmd, max resp frame len */
+       __le32                  tags;   /* targ port xfer tag; tag */
+       __le32                  data_len;       /* data xfer len */
+       __le64                  cmd_tbl;        /* command table address */
+       __le64                  open_frame;     /* open addr frame address */
+       __le64                  status_buf;     /* status buffer address */
+       __le64                  prd_tbl;                /* PRD tbl address */
+       __le32                  reserved[4];
+};
+
+struct mvs_port {
+       struct asd_sas_port     sas_port;
+       u8                      port_attached;
+       u8                      wide_port_phymap;
+       struct list_head        list;
+};
+
+struct mvs_phy {
+       struct mvs_info                 *mvi;
+       struct mvs_port         *port;
+       struct asd_sas_phy      sas_phy;
+       struct sas_identify     identify;
+       struct scsi_device      *sdev;
+       struct timer_list timer;
+       u64             dev_sas_addr;
+       u64             att_dev_sas_addr;
+       u32             att_dev_info;
+       u32             dev_info;
+       u32             phy_type;
+       u32             phy_status;
+       u32             irq_status;
+       u32             frame_rcvd_size;
+       u8              frame_rcvd[32];
+       u8              phy_attached;
+       u8              phy_mode;
+       u8              reserved[2];
+       u32             phy_event;
+       enum sas_linkrate       minimum_linkrate;
+       enum sas_linkrate       maximum_linkrate;
+};
+
+struct mvs_device {
+       enum sas_dev_type dev_type;
+       struct domain_device *sas_device;
+       u32 attached_phy;
+       u32 device_id;
+       u32 runing_req;
+       u8 taskfileset;
+       struct list_head                dev_entry;
+};
+
+struct mvs_slot_info {
+       struct list_head entry;
+       union {
+               struct sas_task *task;
+               void *tdata;
+       };
+       u32 n_elem;
+       u32 tx;
+       u32 slot_tag;
+
+       /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
+        * and PRD table
+        */
+       void *buf;
+       dma_addr_t buf_dma;
+#if _MV_DUMP
+       u32 cmd_size;
+#endif
+#ifdef SUPPORT_TARGET
+       u32 target_cmd_tag;
+       /* interrnal command if NULL */
+       void *slot_scst_cmd;
+       struct mvst_port *slot_tgt_port;
+#endif
+       void *response;
+       struct mvs_port *port;
+       struct mvs_device       *device;
+       void *open_frame;
+};
+
+struct mvs_info {
+       unsigned long flags;
+
+       /* host-wide lock */
+       spinlock_t lock;
+
+       /* our device */
+       struct pci_dev *pdev;
+       struct device *dev;
+
+       /* enhanced mode registers */
+       void __iomem *regs;
+
+       /* peripheral or soc registers */
+       void __iomem *regs_ex;
+       u8 sas_addr[SAS_ADDR_SIZE];
+
+       /* SCSI/SAS glue */
+       struct sas_ha_struct *sas;
+       struct Scsi_Host *shost;
+
+       /* TX (delivery) DMA ring */
+       __le32 *tx;
+       dma_addr_t tx_dma;
+
+       /* cached next-producer idx */
+       u32 tx_prod;
+
+       /* RX (completion) DMA ring */
+       __le32  *rx;
+       dma_addr_t rx_dma;
+
+       /* RX consumer idx */
+       u32 rx_cons;
+
+       /* RX'd FIS area */
+       __le32 *rx_fis;
+       dma_addr_t rx_fis_dma;
+
+       /* DMA command header slots */
+       struct mvs_cmd_hdr *slot;
+       dma_addr_t slot_dma;
+
+       u32 chip_id;
+       const struct mvs_chip_info *chip;
+
+       int tags_num;
+       u8 tags[MVS_SLOTS >> 3];
+
+       /* further per-slot information */
+       struct mvs_phy phy[MVS_MAX_PHYS];
+       struct mvs_port port[MVS_MAX_PHYS];
+       u32 irq;
+       u32 id;
+       u64 sata_reg_set;
+       struct list_head *hba_list;
+       struct list_head soc_entry;
+       struct list_head wq_list;
+       unsigned long instance;
+#ifdef SUPPORT_TARGET
+       unsigned long host_no;
+       struct mvst_tgt *tgt;
+       struct mvst_port tgt_port[MVS_MAX_PHYS];
+       struct list_head data_cmd_list;
+#endif /* SUPPORT_TARGET */
+       u16 flashid;
+       u32 flashsize;
+       u32 flashsectSize;
+
+       void *addon;
+       struct mvs_device       devices[MVS_MAX_DEVICES];
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       void *bulk_buffer;
+       dma_addr_t bulk_buffer_dma;
+#define TRASH_BUCKET_SIZE      0x20000
+#endif
+       struct mvs_slot_info slot_info[0];
+};
+
+struct mvs_prv_info{
+       u8 n_host;
+       u8 n_phy;
+       u16 reserve;
+       struct mvs_info *mvi[2];
+};
+
+struct mvs_wq {
+       struct delayed_work work_q;
+       struct mvs_info *mvi;
+       void *data;
+       int handler;
+       struct list_head entry;
+};
+
+struct mvs_task_exec_info {
+       struct sas_task *task;
+       struct mvs_cmd_hdr *hdr;
+       struct mvs_port *port;
+       u32 tag;
+       int n_elem;
+};
+
+
+/******************** function prototype *********************/
+void mvs_get_sas_addr(void *buf, u32 buflen);
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
+void mvs_tag_free(struct mvs_info *mvi, u32 tag);
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
+int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
+void mvs_tag_init(struct mvs_info *mvi);
+void mvs_iounmap(void __iomem *regs);
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+                       void *funcdata);
+void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
+                               u32 off_lo, u32 off_hi, u64 sas_addr);
+int mvs_slave_alloc(struct scsi_device *scsi_dev);
+int mvs_slave_configure(struct scsi_device *sdev);
+void mvs_scan_start(struct Scsi_Host *shost);
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
+int mvs_queue_command(struct sas_task *task, const int num,
+                       gfp_t gfp_flags);
+int mvs_abort_task(struct sas_task *task);
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
+int mvs_clear_aca(struct domain_device *dev, u8 *lun);
+int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
+void mvs_port_formed(struct asd_sas_phy *sas_phy);
+void mvs_port_deformed(struct asd_sas_phy *sas_phy);
+int mvs_dev_found(struct domain_device *dev);
+void mvs_dev_gone(struct domain_device *dev);
+int mvs_lu_reset(struct domain_device *dev, u8 *lun);
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
+int mvs_I_T_nexus_reset(struct domain_device *dev);
+int mvs_query_task(struct sas_task *task);
+void mvs_release_task(struct mvs_info *mvi, int phy_no,
+                       struct domain_device *dev);
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
+void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
+#endif
+
diff --git a/mvsas_tgt/mv_spi.c b/mvsas_tgt/mv_spi.c
new file mode 100644 (file)
index 0000000..9c5d831
--- /dev/null
@@ -0,0 +1,511 @@
+#ifdef SUPPORT_TARGET
+#include "mv_sas.h"
+#include "mv_spi.h"
+
+static u8    SPICmd[16];
+
+#ifndef IDENTIFY_SPI
+u8   DEFAULT_SPI_CMD[16] =
+{
+    0x06, 0x04, 0x05, 0x01, 0x03, 0x02, 0x52, 0x62, 0x15
+};
+#else
+u8   ATMEL_SPI_CMD[16] =
+{
+    0x06, 0x04, 0x05, 0x01, 0x03, 0x02, 0x52, 0x62, 0x15
+};
+u8   MXIC_SPI_CMD[16] =
+{
+    0x06, 0x04, 0x05, 0x01, 0x03, 0x02, 0x20, 0x60, 0x90
+};
+u8   WINBOND_SPI_CMD[16] =
+{
+    0x06, 0x04, 0x05, 0x01, 0x03, 0x02, 0x20, 0xC7, 0xAB
+};
+
+u8   ATMEL_SPI_CMD_41a_021[16] =
+{
+/*  0     1    2     3     4     5     6     7     8     9     10    11*/
+    0x06, 0x04, 0x05, 0x01, 0x03, 0x02, 0xD8, 0x60, 0x9F, 0x36, 0x39, 0x3C
+};
+
+u8     EON_F20_SPI_CMD[16] =
+{
+       0x06, 0x04, 0x05, 0x01, 0x03, 0x02, 0x20, 0x60, 0x90
+};
+#endif
+
+
+
+
+int spi_rdsr(struct mvs_info *mvi, u8 *sr)
+{
+       u32  dwTmp;
+
+       MVS_CHIP_DISP->spi_buildcmd(mvi, &dwTmp,
+                       (u8)SPICmd[SPI_INS_RDSR],
+                       1,
+                       1,
+                       -1);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 == MVS_CHIP_DISP->spi_waitdataready(mvi, 10000)) {
+               dwTmp = MVS_CHIP_DISP->spi_read_data(mvi);
+               *sr = (u8)dwTmp;
+               return 0;
+       } else {
+               mv_dprintk("timeout\n");
+       }
+       return -1;
+}
+
+int spi_pollisr(struct mvs_info *mvi, u8 mask, u8 bit, u32 timeout)
+{
+       u32  i;
+       u8   sr;
+
+       for (i = 0; i < timeout; i++) {
+               if (0 == spi_rdsr(mvi, &sr)) {
+                       if ((sr & mask) == bit)
+                               return 0;
+               }
+               msleep(20);
+       }
+       return -1;
+}
+
+#ifdef IDENTIFY_SPI
+#define SPI_IDENTIFY_TIMER             10000
+
+int spi_atmelidentify(struct mvs_info *mvi)
+{
+       u32  dwtmp;
+       MVS_CHIP_DISP->spi_buildcmd(mvi, &dwtmp,
+               ATMEL_SPI_CMD[SPI_INS_RDID],
+               1,
+               2,
+               0);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwtmp);
+       if (0 == MVS_CHIP_DISP->spi_waitdataready(mvi, SPI_IDENTIFY_TIMER)) {
+               dwtmp = MVS_CHIP_DISP->spi_read_data(mvi);
+               switch (dwtmp) {
+               case 0x631f:
+                       mvi->flashid = AT25F2048;
+                       mvi->flashsize = 256L * 1024;
+                       mvi->flashsectSize = 64L * 1024;
+                       return 0;
+               }
+       }
+       mv_dprintk("identify failed\n");
+       return -1;
+}
+
+int spi_atmelidentify_41a_021(struct mvs_info *mvi)
+{
+       u32  dwTmp;
+       MVS_CHIP_DISP->spi_buildcmd(mvi, &dwTmp,
+               (u8)ATMEL_SPI_CMD_41a_021[SPI_INS_RDID],
+               1,
+               2,
+               -1);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 == MVS_CHIP_DISP->spi_waitdataready(mvi, SPI_IDENTIFY_TIMER)) {
+               dwTmp = MVS_CHIP_DISP->spi_read_data(mvi);
+               switch (dwTmp) {
+               case 0x441f:
+                       mvi->flashid = AT25DF041A;
+                       mvi->flashsize = 256L * 1024;
+                       mvi->flashsectSize = 64L * 1024;
+                       return 0;
+               case 0x431f:
+                       mvi->flashid = AT25DF021;
+                       mvi->flashsize = 256L * 1024;
+                       mvi->flashsectSize = 64L * 1024;
+                       return 0;
+               }
+       }
+
+    return -1;
+}
+
+
+int spi_winbondidentify(struct mvs_info *mvi)
+{
+       u32  dwTmp;
+
+       MVS_CHIP_DISP->spi_buildcmd(mvi,  &dwTmp,
+               WINBOND_SPI_CMD[SPI_INS_RDID],
+               1,
+               2,
+               0);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 == MVS_CHIP_DISP->spi_waitdataready(mvi, SPI_IDENTIFY_TIMER)) {
+               dwTmp = MVS_CHIP_DISP->spi_read_data(mvi);
+               switch (dwTmp) {
+               case 0x1212:
+                       mvi->flashid = W25X40;
+                       mvi->flashsize = 256L * 1024;
+                       mvi->flashsectSize = 64L * 1024;
+                       return 0;
+           }
+       }
+
+       return -1;
+}
+
+int spi_mxicidentify(struct mvs_info *mvi)
+{
+       u32  dwTmp;
+
+       MVS_CHIP_DISP->spi_buildcmd(mvi, &dwTmp,
+               MXIC_SPI_CMD[SPI_INS_RDID],
+               1,
+               2,
+               0);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 == MVS_CHIP_DISP->spi_waitdataready(mvi, SPI_IDENTIFY_TIMER)) {
+               dwTmp = MVS_CHIP_DISP->spi_read_data(mvi);
+               switch (dwTmp) {
+               case 0x11C2:
+                       mvi->flashid = MX25L2005;
+                       mvi->flashsize = 256L * 1024;
+                       mvi->flashsectSize = 4L * 1024;
+                       return 0;
+               }
+       }
+       return -1;
+}
+
+int spi_eonidentify_f20(struct mvs_info *mvi)
+{
+       u32  dwTmp;
+
+       MVS_CHIP_DISP->spi_buildcmd(mvi,  &dwTmp,
+               EON_F20_SPI_CMD[SPI_INS_RDID],
+               1,
+               2,
+               0);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 == MVS_CHIP_DISP->spi_waitdataready(mvi, SPI_IDENTIFY_TIMER)) {
+               dwTmp = MVS_CHIP_DISP->spi_read_data(mvi);
+               switch (dwTmp) {
+               case 0x111C:
+                       mvi->flashid = EN25F20;
+                       mvi->flashsize = 256L * 1024;
+                       mvi->flashsectSize = 4L * 1024;
+                       return 0;
+               }
+       }
+
+       return -1;
+
+}
+#endif
+
+
+int spi_init(struct mvs_info *mvi)
+{
+       u32  i;
+#ifndef IDENTIFY_SPI
+       for (i = 0; i < sizeof(SPICmd); i++)
+               SPICmd[i] = DEFAULT_SPI_CMD[i];
+
+       mvi->flashid = 0x11ab;
+       mvi->flashsize = 256L * 1024;
+       mvi->flashsectSize = 64L * 1024;
+       return 0;
+#else
+       u8   *spivendor;
+
+       spivendor = NULL;
+       /* Identify Atmel first. Suppose it's popular.Don't identify Mxic
+        *  since it can use the same instruction set as Atmel.
+        * If cannot identify, by default use Atmel instruction set. */
+       if (0 == spi_atmelidentify(mvi))
+               spivendor = ATMEL_SPI_CMD;
+       else if (0 == spi_atmelidentify_41a_021(mvi))
+               spivendor = ATMEL_SPI_CMD_41a_021;
+       else if (0 == spi_winbondidentify(mvi))
+               spivendor = WINBOND_SPI_CMD;
+       else if (0 == spi_eonidentify_f20(mvi))
+               spivendor = EON_F20_SPI_CMD;
+       else
+               spivendor = ATMEL_SPI_CMD;
+
+       if (spivendor) {
+               for (i = 0; i < sizeof(SPICmd); i++)
+                       SPICmd[i] = spivendor[i];
+               return 0;
+       }
+       return -1;
+#endif
+}
+
+int spi_read(struct mvs_info *mvi, u32 addr, u8 *data, u8 size)
+{
+       u32  i, dwTmp;
+
+       if (size > 4)
+               size = 4;
+       MVS_CHIP_DISP->spi_buildcmd(mvi, &dwTmp,
+               (u8)SPICmd[SPI_INS_READ],
+               1,
+               size,
+               addr);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 == MVS_CHIP_DISP->spi_waitdataready(mvi, 10000)) {
+               dwTmp = MVS_CHIP_DISP->spi_read_data(mvi);
+               for (i = 0; i < size; i++)
+                       data[i] = ((u8 *)&dwTmp)[i];
+               return 0;
+       } else
+           mv_dprintk("timeout\n");
+
+       return -1;
+}
+
+int spi_readbuf(struct mvs_info *mvi, u32 addr, u8 *data, u32 count)
+{
+       u32      i, j;
+       u32      tmpAddr, tmpdata, addrend;
+       u8       *val = data;
+
+       addrend = addr + count;
+       tmpAddr = rounding(addr, 4);
+       j = (addr & ((1U<<2) - 1));
+       if (j > 0) {
+               spi_read(mvi, tmpAddr, (u8 *)&tmpdata, 4);
+               for (i = j; i < 4; i++)
+                       *val++ = ((u8 *)&tmpdata)[i];
+               tmpAddr += 4;
+       }
+       j = rounding(addrend, 4);
+       for (; tmpAddr < j; tmpAddr += 4) {
+               spi_read(mvi, tmpAddr, (u8 *)&tmpdata, 4);
+               *((u32 *)val) = tmpdata;
+               val += 4;
+       }
+       if (tmpAddr < addrend) {
+               spi_read(mvi, tmpAddr, (u8 *)&tmpdata, 4);
+               count = addrend - tmpAddr;
+               for (i = 0; i < count; i++)
+                       *val++ = ((u8 *)&tmpdata)[i];
+       }
+
+    return 0;
+}
+
+u8     mvverifychecksum(u8 *address, u32 Size)
+{
+       u8      checkSum = 0;
+       u32     temp = 0;
+
+       for (temp = 0; temp < Size ; temp++)
+               checkSum += address[temp];
+
+       return  checkSum;
+}
+
+u8     mvcalculatechecksum(u8 *address, u32 size)
+{
+       u8 checkSum;
+       u32 temp = 0;
+       checkSum = 0;
+
+       for (temp = 0; temp < size; temp++)
+               checkSum += address[temp];
+
+       checkSum = (~checkSum) + 1;
+       return checkSum;
+}
+
+int spi_wren(struct mvs_info *mvi)
+{
+       u32  dwTmp;
+
+       MVS_CHIP_DISP->spi_buildcmd(mvi,  &dwTmp,
+               (u8)SPICmd[SPI_INS_WREN],
+               0,
+               0,
+               -1);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 != MVS_CHIP_DISP->spi_waitdataready(mvi, 10000))
+               return -1;
+       if (0 == spi_pollisr(mvi, 0x03, 0x02, 300000))
+               return 0;
+       return -1;
+}
+
+int spi_rdpt(struct mvs_info *mvi, u32 addr, u8 *data)
+{
+       u32   dwTmp;
+
+       MVS_CHIP_DISP->spi_buildcmd(mvi,  &dwTmp,
+               (u8)SPICmd[SPI_INS_RDPT],
+               1,
+               1,
+               addr);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 == MVS_CHIP_DISP->spi_waitdataready(mvi, 10000)) {
+               dwTmp = MVS_CHIP_DISP->spi_read_data(mvi);
+               *data = (u8)dwTmp;
+               return 0;
+       } else {
+               mv_dprintk("SPI_RDPT timeout\n");
+       }
+       return -1;
+}
+
+int spi_sectunprotect(struct mvs_info *mvi, u32 addr)
+{
+       u32 dwTmp;
+       u8 protect_sect = 0xFF;
+       if (-1 == spi_rdpt(mvi, addr, &protect_sect))
+               return -1;
+
+       if (protect_sect == 0)
+               return 0;
+
+       if (-1 == spi_wren(mvi))
+               return -1;
+
+       MVS_CHIP_DISP->spi_buildcmd(mvi,  &dwTmp,
+               (u8)SPICmd[SPI_INS_UPTSEC],
+               0,
+               0,
+               addr);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+       if (0 != MVS_CHIP_DISP->spi_waitdataready(mvi, 10000))
+               return -1;
+       if (0 == spi_pollisr(mvi, 0x03, 0, 300000))
+               return 0;
+       mv_dprintk("error SPI_SectUnprotect \n");
+       return -1;
+}
+
+int spi_secterase(struct mvs_info *mvi, u32 addr)
+{
+       u32  dwTmp;
+
+       if (-1 == spi_wren(mvi))
+               return -1;
+
+       if ((mvi->flashid == AT25DF041A) || (mvi->flashid == AT25DF021)) {
+               if (-1 == spi_sectunprotect(mvi, addr)) {
+                       mv_dprintk("Un protect error.\n");
+                       return -1;
+               }
+       }
+       MVS_CHIP_DISP->spi_buildcmd(mvi,  &dwTmp,
+               (u8)SPICmd[SPI_INS_SERASE],
+               0,
+               0,
+               addr);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+       if (0 != MVS_CHIP_DISP->spi_waitdataready(mvi, 10000))
+               return -1;
+       if (0 == spi_pollisr(mvi, 0x03, 0, 300000))
+               return 0;
+       mv_dprintk("error SPI_SectErase\n");
+       return -1;
+}
+
+int spi_write(struct mvs_info *mvi, u32 addr, u32 data)
+{
+       u32 dwTmp;
+
+       spi_wren(mvi);
+       MVS_CHIP_DISP->spi_write_data(mvi, data);
+       MVS_CHIP_DISP->spi_buildcmd(mvi,  &dwTmp,
+               (u8)SPICmd[SPI_INS_RPOG],
+               0,
+               4,
+               addr);
+       MVS_CHIP_DISP->spi_issuecmd(mvi, dwTmp);
+
+       if (0 != MVS_CHIP_DISP->spi_waitdataready(mvi, 10000)) {
+               mv_dprintk("timeout\n");
+               return -1;
+       }
+       if (0 == spi_pollisr(mvi, 0x01, 0, 5000))
+               return 0;
+       mv_dprintk("timeout\n");
+       return -1;
+}
+
+int spi_writebuf(struct mvs_info *mvi, u32 addr, u32 *data, u32 count)
+{
+       u32  i;
+
+       for (i = 0; i < count; i += 4) {
+               if (-1 == spi_write(mvi, addr + i, *(u32 *)&data[i])) {
+                       mv_dprintk("Write failed at %5.5x\n", addr+i);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+bool mvui_init_param(struct mvs_info *mvi, struct hba_info_main *hba_info_para)
+{
+       u32     param_flash_addr = PARA_OFF;
+       if (!mvi)
+               return false;
+
+       if (spi_init(mvi)) {
+               mv_dprintk("Init flash rom failed.\n");
+               return false;
+       }
+       mv_dprintk("Init flash rom ok,flash type is 0x%x.\n", mvi->flashid);
+       /* step 1 read param from flash offset = 0x3FFF00 */
+       spi_readbuf(mvi, param_flash_addr, \
+                       (u8 *)hba_info_para, FLASH_PARA_SIZE);
+
+       /* step 2 check the signature first */
+       if (hba_info_para->signature[0] == 'M' && \
+           hba_info_para->signature[1] == 'R' && \
+           hba_info_para->signature[2] == 'V' && \
+           hba_info_para->signature[3] == 'L' && \
+           (!mvverifychecksum((u8 *)hba_info_para, FLASH_PARA_SIZE))) {
+               return true;
+       }
+       return false;
+}
+
+u8 mvs_spi_init(struct mvs_info *mvi)
+{
+       u8 i;
+       u64 sas_addr;
+       struct hba_info_main hba_info_para;
+
+       do {
+               if (!mvui_init_param(mvi, &hba_info_para)) {
+                       for (i = 0; i < mvi->chip->n_phy; i++) {
+                               sas_addr = 0x5005043011ab0000ULL;
+                               mvi->phy[i].dev_sas_addr =
+                                       cpu_to_be64((u64)(*(u64 *)&sas_addr));
+                       }
+                       return -1;
+               }
+               for (i = 0; i < mvi->chip->n_phy; i++) {
+                       int vphy = i+mvi->id*mvi->chip->n_phy;
+                       sas_addr = hba_info_para.sas_address[vphy];
+                       mvi->phy[i].dev_sas_addr = sas_addr;
+                       mv_printk("Phy %d SAS ADDRESS %016llx\n", i,
+                               SAS_ADDR(&mvi->phy[i].dev_sas_addr));
+               }
+       } while (0);
+
+       memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
+
+       return 0;
+}
+#endif   /*SUPPORT_TARGET*/
+
diff --git a/mvsas_tgt/mv_spi.h b/mvsas_tgt/mv_spi.h
new file mode 100644 (file)
index 0000000..a201bfa
--- /dev/null
@@ -0,0 +1,56 @@
+#ifdef SUPPORT_TARGET
+#ifndef _MV_SPI_H_
+#define _MV_SPI_H_
+
+#define IDENTIFY_SPI           1
+#define FLASH_SIZE             0x40000
+#define PARA_OFF               (FLASH_SIZE - 0x100)
+
+#define NVRAM_DATA_MAJOR_VERSION               0
+#define NVRAM_DATA_MINOR_VERSION               1
+
+#define AT25F2048                      0x0101
+#define AT25DF041A                     0x0102
+#define AT25DF021                      0x0103
+
+#define MX25L2005               0x0201
+#define MX25L4005               0x0202
+#define MX25L8005               0x0203
+#define W25X40                         0x0301
+#define EN25F20                                0x0401
+
+
+#define SPI_INS_WREN                   0
+#define SPI_INS_WRDI                   1
+#define SPI_INS_RDSR                           2
+#define SPI_INS_WRSR                   3
+#define SPI_INS_READ                   4
+#define SPI_INS_RPOG                   5
+#define SPI_INS_SERASE                 6
+#define SPI_INS_CERASE                 7
+#define SPI_INS_RDID                           8
+#define SPI_INS_PRSEC                  9
+#define SPI_INS_UPTSEC                 10
+#define SPI_INS_RDPT                           11
+
+
+#define MAX_PD_IN_PD_PAGE_FLASH                128
+#define FLASH_PARA_SIZE        (sizeof(struct hba_info_main))
+#define rounding_mask(x, mask)  (((x)+(mask))&~(mask))
+#define rounding(value, align)  rounding_mask(value,   \
+                                                (typeof(value)) (align-1))
+#define offset_of(type, member) offsetof(type, member)
+
+#define PAGE_INTERVAL_DISTANCE         0x100
+
+struct hba_info_main{
+       u8      signature[4];
+       u8      reserve[52];
+       u64     sas_address[8];
+       u8      reserved4[135];
+       u8      checksum;
+};     /* total 256 bytes */
+u8 mvs_spi_init(struct mvs_info *mvi);
+
+#endif
+#endif  /*SUPPORT_TARGET*/
diff --git a/mvsas_tgt/mv_tgt.c b/mvsas_tgt/mv_tgt.c
new file mode 100644 (file)
index 0000000..e66c157
--- /dev/null
@@ -0,0 +1,2817 @@
+#ifdef SUPPORT_TARGET
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <asm/byteorder.h>
+#include <scst.h>
+#include <scst_debug.h>
+#include "mv_sas.h"
+#include "mv_defs.h"
+#include "mv_64xx.h"
+#include "mv_chips.h"
+
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+unsigned long mvst_trace_flag = MVST_DEFAULT_LOG_FLAGS;
+#endif
+
+
+#ifndef SUPPORT_TARGET
+#error "SUPPORT_TARGET is NOT DEFINED"
+#endif
+
+static int mvst_target_detect(struct scst_tgt_template *templ);
+static int mvst_target_release(struct scst_tgt *scst_tgt);
+static int mvst_xmit_response(struct scst_cmd *scst_cmd);
+static int mvst_rdy_to_xfer(struct scst_cmd *scst_cmd);
+static void mvst_on_free_cmd(struct scst_cmd *scst_cmd);
+static void mvst_task_mgmt_fn_done(struct scst_mgmt_cmd *mcmd);
+static int mvst_report_event(struct scst_aen *aen);
+/* Predefs for callbacks handed to mvst(target) */
+static u8 mvst_response_ssp_command(struct mvs_info *mvi, u32 rx_desc);
+static void mvst_async_event(uint16_t code, struct mvs_info *mvi,
+       uint16_t *mailbox);
+static void mvst_cmd_completion(struct mvs_info *mvi, u32 rx_desc);
+static void mvst_host_action(struct mvs_info *mvi,
+       enum mvst_tgt_host_action_t action, u8 phyid);
+static int mvst_send_cmd(struct mvs_info *mvi);
+static int mvst_start_sas_target(struct mvs_info *mvi, u8 id);
+static int mvst_restart_free_list(struct mvs_info *mvi, u8 slot_id);
+
+
+struct kmem_cache *mvst_cmd_cachep;
+struct mvst_msg_queue tgt_msg_queue;
+
+struct scst_tgt_template tgt_template = {
+       .name = MVST_NAME,
+       .sg_tablesize = 0,
+       .use_clustering = 1,
+#ifdef DEBUG_WORK_IN_THREAD
+       .xmit_response_atomic = 0,
+       .rdy_to_xfer_atomic = 0,
+#else
+       .xmit_response_atomic = 1,
+       .rdy_to_xfer_atomic = 1,
+#endif
+       .detect = mvst_target_detect,
+       .release = mvst_target_release,
+       .xmit_response = mvst_xmit_response,
+       .rdy_to_xfer = mvst_rdy_to_xfer,
+       .on_free_cmd = mvst_on_free_cmd,
+       .task_mgmt_fn_done = mvst_task_mgmt_fn_done,
+       .report_aen = mvst_report_event,
+};
+
+/*
+ * Functions
+ */
+
+static u64 mvst_get_be_sas_addr(u8 *sas_addr)
+{
+       u64 lo = cpu_to_be32((u32)(*(u32 *)&sas_addr[0]));
+       u64 hi = cpu_to_be32((u32)(*(u32 *)&sas_addr[4]));
+       return  (hi << 32) | lo;
+}
+
+static u64 mvst_get_le_sas_addr(u8 *sas_addr)
+{
+       u64 lo = ((u32)(*(u32 *)&sas_addr[4]));
+       u64 hi = ((u32)(*(u32 *)&sas_addr[0]));
+       return  (hi << 32) | lo;
+}
+
+/* FIXME
+ *
+ * use_sg can not bigger than MAX_SG_COUNT
+ *
+ */
+static inline void
+mvst_prep_prd(struct mvst_prm *prm, struct mvs_prd *buf_prd)
+{
+       struct mvs_info *mvi = prm->tgt->mvi;
+
+       TRACE_ENTRY();
+       TRACE_DBG("bufflen 0x%x, %p", prm->bufflen, prm->sg);
+       sBUG_ON(prm->sg_cnt == 0);
+       prm->seg_cnt = pci_map_sg(prm->tgt->mvi->pdev, prm->sg, prm->sg_cnt,
+                                  scst_to_tgt_dma_dir(prm->data_direction));
+       MVS_CHIP_DISP->make_prd(prm->sg, prm->sg_cnt, buf_prd);
+}
+
+
+void
+mvst_fixup_payload(u32 *payload, u32 nr_pl_dwords)
+{
+       u32 tmp;
+
+       while (nr_pl_dwords--) {
+               tmp = *payload;
+               tmp = ((tmp & 0x000000FF) << 24)        |
+                               ((tmp & 0x0000FF00) << 8)       |
+                               ((tmp & 0x00FF0000) >> 8)       |
+                               ((tmp & 0xFF000000) >> 24);
+       *payload = tmp;
+       payload++;
+    }
+}
+
+void
+memory_dword_copy(u32 *dst, u32 *src, u32 nr_bytes)
+{
+       nr_bytes /= sizeof(u32);
+       while (nr_bytes--)
+               *dst++ = *src++;
+}
+
+static inline int test_tgt_sess_count(struct mvst_tgt *tgt,
+                               struct mvs_info *mvi)
+{
+       unsigned long flags;
+       int res;
+
+       /*
+        * We need to protect against race, when tgt is freed before or
+        * inside wake_up()
+        */
+       spin_lock_irqsave(&tgt->mvi->lock, flags);
+       TRACE_DBG("tgt %p, empty(sess_list)=%d sess_count=%d",
+             tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+       res = (tgt->sess_count == 0);
+       spin_unlock_irqrestore(&tgt->mvi->lock, flags);
+
+       return res;
+}
+
+/* mvi->lock supposed to be held on entry */
+static inline void mvst_exec_queue(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       mw32(MVS_TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+}
+
+/* mvi->lock supposed to be held on entry */
+static inline void mvst_send_notify_ack(struct mvs_info *mvi,
+       struct mvst_mgmt_cmd *mcmd, int status)
+{
+
+}
+
+/*
+ * register with initiator driver (but target mode isn't enabled till
+ * it's turned on via sysfs)
+ */
+static int mvst_target_detect(struct scst_tgt_template *templ)
+{
+       int res;
+       struct mvs_tgt_initiator itd = {
+               .magic = MVST_TARGET_MAGIC,
+               .tgt_rsp_ssp_cmd = mvst_response_ssp_command,
+               .tgt_cmd_cmpl = mvst_cmd_completion,
+               .tgt_async_event = mvst_async_event,
+               .tgt_host_action = mvst_host_action,
+               .tgt_send_cmd = mvst_send_cmd
+       };
+
+       TRACE_ENTRY();
+
+       res = mvs_tgt_register_driver(&itd);
+       if (res != 0) {
+               PRINT_ERROR("Unable to register driver: %d", res);
+               goto out;
+       }
+
+out:
+       TRACE_EXIT();
+       return res;
+}
+
+/* no lock held */
+static void mvst_free_session_done(struct scst_session *scst_sess)
+{
+       struct mvst_sess *sess;
+       struct mvst_tgt *tgt;
+       struct mvs_info *mvi;
+       unsigned long flags;
+
+       TRACE_ENTRY();
+
+       sBUG_ON(scst_sess == NULL);
+       sess = (struct mvst_sess *)scst_sess_get_tgt_priv(scst_sess);
+       sBUG_ON(sess == NULL);
+       tgt = sess->tgt;
+
+       kfree(sess);
+
+       if (tgt == NULL)
+               goto out;
+
+       TRACE_MGMT_DBG("tgt %p, empty(sess_list) %d, sess_count %d",
+             tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+
+       mvi = tgt->mvi;
+
+       /*
+        * We need to protect against race, when tgt is freed before or
+        * inside wake_up()
+        */
+       spin_lock_irqsave(&mvi->lock, flags);
+       tgt->sess_count--;
+       if (tgt->sess_count == 0)
+               wake_up_all(&tgt->waitQ);
+       spin_unlock_irqrestore(&mvi->lock, flags);
+
+out:
+       TRACE_EXIT();
+       return;
+}
+
+/* mvi->lock supposed to be held on entry */
+static void mvst_unreg_sess(struct mvst_sess *sess)
+{
+       TRACE_ENTRY();
+
+       if (sess == NULL)
+               goto out;
+
+       list_del(&sess->sess_entry);
+
+       TRACE_DBG("mvst tgt(%ld): session for initiator %016llx deleted",
+               sess->tgt->mvi->instance,
+               mvst_get_le_sas_addr((u8 *)&sess->initiator_sas_addr));
+
+       /*
+        * Any commands for this session will be finished regularly,
+        * because we must not drop SCSI commands on transport level,
+        * at least without any response to the initiator.
+        */
+
+       scst_unregister_session(sess->scst_sess, 0, mvst_free_session_done);
+
+out:
+       TRACE_EXIT();
+       return;
+}
+
+/* mvi->lock supposed to be held on entry */
+static void mvst_clear_tgt_db(struct mvst_tgt *tgt)
+{
+       struct mvst_sess *sess, *sess_tmp;
+
+       TRACE_ENTRY();
+
+       TRACE_MGMT_DBG("Clearing targets DB %p", tgt);
+
+       list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list, sess_entry)
+               mvst_unreg_sess(sess);
+
+       /* At this point tgt could be already dead */
+
+       TRACE_MGMT_DBG("Finished clearing Target DB %p", tgt);
+
+       TRACE_EXIT();
+       return;
+}
+
+/* should be called w/out lock, but tgt should be
+ * unfindable at this point */
+static int mvst_target_release(struct scst_tgt *scst_tgt)
+{
+       int res = 0;
+       struct mvst_tgt *tgt =
+               (struct mvst_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
+       struct mvs_info *mvi = tgt->mvi;
+       unsigned long flags = 0;
+
+       TRACE_ENTRY();
+
+       spin_lock_irqsave(&mvi->lock, flags);
+       tgt->tgt_shutdown = 1;
+       mvst_clear_tgt_db(tgt);
+       spin_unlock_irqrestore(&mvi->lock, flags);
+
+       wait_event(tgt->waitQ, test_tgt_sess_count(tgt, mvi));
+
+       /* big hammer */
+       if (!mvi->flags & MVF_HOST_SHUTTING_DOWN)
+               mvi->flags |= MVF_TARGET_MODE_ENABLE;
+
+       /* wait for sessions to clear out (just in case) */
+       wait_event(tgt->waitQ, test_tgt_sess_count(tgt, mvi));
+
+       TRACE_MGMT_DBG("Finished waiting for tgt %p: empty(sess_list)=%d "
+               "sess_count=%d", tgt, list_empty(&tgt->sess_list),
+               tgt->sess_count);
+
+       /* The lock is needed, because we still can get an incoming packet */
+       spin_lock_irqsave(&mvi->lock, flags);
+       scst_tgt_set_tgt_priv(scst_tgt, NULL);
+       mvi->tgt = NULL;
+       spin_unlock_irqrestore(&mvi->lock, flags);
+
+       kfree(tgt);
+
+       TRACE_EXIT_RES(res);
+       return res;
+}
+
+static inline int mvst_has_data(struct scst_cmd *scst_cmd)
+{
+       return scst_cmd_get_resp_data_len(scst_cmd) > 0;
+}
+
+
+static void
+mvst_put_slot(struct mvs_info *mvi, struct mvs_slot_info *slot)
+{
+       u32 slot_idx = slot->target_cmd_tag;
+       /* reset field used by target driver */
+       slot->target_cmd_tag = 0xdeadbeef;
+       slot->tx = 0xdeadbeef;
+       slot->slot_scst_cmd = NULL;
+       slot->response = NULL;
+       slot->open_frame = NULL;
+       slot->slot_tgt_port = NULL;
+       list_del(&slot->entry);
+       mvs_tag_clear(mvi, slot_idx);
+}
+
+
+static struct mvs_slot_info*
+mvst_get_slot(struct mvs_info *mvi, struct mvst_port *tgt_port)
+{
+       u8 rc = 0;
+       u32 tag;
+       struct mvs_slot_info *slot = NULL;
+       rc = mvs_tag_alloc(mvi, &tag);
+       if (rc)
+               return  NULL;
+       slot = &mvi->slot_info[tag];
+       memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+       /* used by initiator driver, reserved  for target driver */
+       slot->n_elem = 0;
+       slot->task = NULL;
+       slot->port = NULL;
+
+       /* save free tag */
+       slot->target_cmd_tag = tag;
+       slot->slot_tgt_port = tgt_port;
+       slot->slot_scst_cmd = NULL;
+       slot->open_frame = NULL;
+       slot->tx = mvi->tx_prod;
+       list_add_tail(&slot->entry, &slot->slot_tgt_port->slot_list);
+       return  slot;
+
+}
+
+static int mvst_prep_resp_frame(struct mvst_prm *prm,
+                       struct mvs_slot_info *slot, u8 sense_data)
+{
+       u8 has_data = 0;
+       u16 tag;
+       void *buf_tmp, *buf_cmd;
+       dma_addr_t buf_tmp_dma;
+       u32 resp_len = 0, req_len = 0, prd_len = 0;
+       const u32 max_resp_len = SB_RFB_MAX;
+       struct mvs_info *mvi = prm->tgt->mvi;
+       struct mvst_cmd *cmd = prm->cmd;
+       struct mvs_cmd_header *cmd_hdr;
+       struct mvs_delivery_queue *delivery_q;
+       struct mvs_prd *buf_prd;
+       struct open_address_frame *open_frame;
+       struct mv_ssp_response_iu *response_iu;
+       TRACE_ENTRY();
+       tag = slot->target_cmd_tag;
+       cmd_hdr = (struct mvs_cmd_header *)&mvi->slot[tag];
+       /* get free delivery queue */
+       delivery_q = (struct mvs_delivery_queue *)&mvi->tx[mvi->tx_prod];
+
+       /* SSP protocol, Target mode, Normal priority */
+       delivery_q->cmd = TXQ_CMD_SSP;
+       delivery_q->mode = TXQ_MODE_TARGET;
+       delivery_q->priority = TXQ_PRI_NORMAL;
+       delivery_q->sata_reg_set = 0;
+       delivery_q->phy = cmd->cmd_tgt_port->wide_port_phymap;
+       delivery_q->slot_nm = tag;
+
+       if (sense_data) {
+               if ((prm->rq_result) && (SCST_SENSE_VALID(prm->sense_buffer))) {
+                       req_len = 24 + prm->sense_buffer_len;
+                       has_data = 1;
+               } else {
+                       req_len = 24;
+                       has_data = 0;
+               }
+       } else {
+               if (prm->rq_result) {
+                       req_len = 24 + 4;
+                       has_data = 1;
+               } else {
+                       req_len = 24;
+                       has_data = 0;
+               }
+       }
+       req_len += sizeof(struct ssp_frame_header);
+
+       cmd_hdr->ssp_frame_type = MCH_SSP_FR_RESP;
+       cmd_hdr->ssp_passthru = MCH_SSP_MODE_NORMAL;
+
+       /* command header dword 1 */
+       /* configure in below */
+
+       /* command header dword 2 */
+       /* copy the tag from received command frame */
+       cmd_hdr->target_tag = cpu_to_le16(tag);
+       cmd_hdr->tag = be16_to_cpu(prm->cmd->ssp_hdr->tag);
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+       /* command header dword 4 -5 */
+       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+       buf_tmp = buf_cmd = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+       cmd_hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+       /* command header dword 6 -7 */
+       buf_tmp += req_len;
+       buf_tmp_dma += req_len;
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+       slot->open_frame = buf_tmp;
+       cmd_hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       /* command header dword 10 -11 */
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+       /* region 3: PRD table ******************************* */
+       buf_prd = buf_tmp;
+       cmd_hdr->prd_tbl = 0;
+
+       /* command header dword 8 -9 */
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       slot->response = buf_tmp;
+       cmd_hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+
+       /* command header dword 1 */
+       resp_len = MVS_SLOT_BUF_SZ - req_len - MVS_OAF_SZ -
+           sizeof(struct mvs_err_info) - prd_len;
+       resp_len = min(resp_len, max_resp_len);
+
+       cmd_hdr->max_rsp_frame_len = resp_len / 4;
+       cmd_hdr->frame_len =
+               req_len / 4 < MVS_MAX_SSP_FRAME ? req_len/4 : MVS_MAX_SSP_FRAME;
+
+       /* generate open address frame hdr (first 12 bytes) */
+       open_frame = (struct open_address_frame *)slot->open_frame;
+       open_frame->initiator = 0;      /* target mode */
+       open_frame->protocol = PROTOCOL_SSP;
+       open_frame->frame_type = ADDRESS_OPEN_FRAME;
+       open_frame->connect_rate = (prm->cmd->open_frame->received_rate);
+       open_frame->connect_tag =
+               be16_to_cpu(prm->cmd->open_frame->received_tag);
+       open_frame->dest_sas_addr =
+               mvst_get_be_sas_addr((u8 *)&prm->cmd->open_frame->src_sas_addr);
+
+       /*  for passthru mode */
+       /* fill in SSP frame header (Command Table.SSP frame header) */
+       if (cmd_hdr->ssp_passthru == MCH_SSP_MODE_PASSTHRU) {
+               struct ssp_frame_header *ssp_hdr;
+               /* command table */
+               ssp_hdr = (struct ssp_frame_header *)slot->buf;
+               ssp_hdr->frame_type = SSP_RESPONSE;
+               memcpy(ssp_hdr->hashed_dest_sas_addr,
+                       prm->cmd->ssp_hdr->hashed_src_sas_addr,
+                       HASHED_SAS_ADDR_SIZE);
+               memcpy(ssp_hdr->hashed_src_sas_addr,
+                      prm->cmd->ssp_hdr->hashed_dest_sas_addr,
+                      HASHED_SAS_ADDR_SIZE);
+               /* copy the tag from received command frame */
+               ssp_hdr->tag = be16_to_cpu(prm->cmd->ssp_hdr->tag);
+       }
+
+       /* fill in xfer ready frame IU */
+       buf_cmd += sizeof(struct ssp_frame_header);
+       response_iu = (struct mv_ssp_response_iu *)buf_cmd;
+
+       if (has_data == 0) {
+               response_iu->datapres = NO_DATA;
+       } else if (sense_data) {
+               response_iu->status = prm->rq_result;
+               response_iu->datapres = SENSE_DATA;
+               response_iu->sense_data_len =
+                       cpu_to_be32(prm->sense_buffer_len);
+               memcpy(response_iu->data,
+                       prm->sense_buffer, prm->sense_buffer_len);
+       } else {
+       /* response data */
+               response_iu->datapres = RESPONSE_DATA;
+               response_iu->response_data_len = cpu_to_be32((int)4);
+               response_iu->data[3] = INVALID_FRAME;
+       }
+
+       TRACE_EXIT();
+       return 0;
+}
+
+static int
+mvst_send_resp_deferred(struct mvs_info        *mvi,
+                               struct mvst_cmd *cmd)
+{
+       struct mvst_prm prm = { 0 };
+       struct scst_cmd *scst_cmd = cmd->scst_cmd;
+       u16 pass = 0;
+       struct mvs_slot_info *slot;
+       u32 res = SCST_TGT_RES_SUCCESS;
+
+       TRACE_ENTRY();
+       prm.cmd = (struct mvst_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
+       prm.sg = scst_cmd_get_sg(scst_cmd);
+       prm.bufflen = scst_cmd_get_resp_data_len(scst_cmd);
+       prm.sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
+       prm.data_direction = scst_cmd_get_data_direction(scst_cmd);
+       prm.rq_result = scst_cmd_get_status(scst_cmd);
+       prm.sense_buffer = scst_cmd_get_sense_buffer(scst_cmd);
+       prm.sense_buffer_len = scst_cmd_get_sense_buffer_len(scst_cmd);
+       prm.tgt = mvi->tgt;
+       prm.seg_cnt = 0;
+       prm.req_cnt = 1;
+
+       {
+               /* prepare response frame */
+               slot = mvst_get_slot(mvi, cmd->cmd_tgt_port);
+               if (!slot) {
+                       res = SCST_TGT_RES_QUEUE_FULL;
+                       goto err_out;
+               }
+               /* save scst cmd */
+               slot->slot_scst_cmd = scst_cmd;
+               mvst_prep_resp_frame(&prm, slot, 1);
+
+               pass++;
+               mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+
+       }
+       /* Mid-level is done processing */
+       cmd->cmd_state = MVST_STATE_PROCESSED;
+       goto out_done;
+
+err_out:
+       TRACE_DBG("send_resp failed[%d]!\n", res);
+out_done:
+       if (pass)
+               MVS_CHIP_DISP->start_delivery(mvi,
+                       (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+       TRACE_EXIT_RES(res);
+       return res;
+}
+
+
+static int  mvst_prep_data_frame_sg(struct mvst_prm *prm,
+                       struct mvs_slot_info *slot, u8 no_response)
+{
+       u16 tag;
+       u8 *buf_tmp, sense_data;
+       dma_addr_t buf_tmp_dma;
+       u32 resp_len, prd_len = 0;
+       const u32 max_resp_len = SB_RFB_MAX;
+       u16 cmd_tbl_len = MVS_MAX_SSP_FRAME * 4;
+       struct mvs_info *mvi = prm->tgt->mvi;
+       struct mvs_cmd_header *cmd_hdr;
+       struct mvs_delivery_queue *delivery_q;
+       struct mvs_prd *buf_prd;
+       struct open_address_frame *open_frame;
+       struct mv_ssp_response_iu *response_iu;
+
+       TRACE_ENTRY();
+       tag = slot->target_cmd_tag;
+       cmd_hdr = (struct mvs_cmd_header *)&mvi->slot[tag];
+       /* get free delivery queue */
+       delivery_q = (struct mvs_delivery_queue *)&mvi->tx[mvi->tx_prod];
+
+       /* SSP protocol, Target mode, Normal priority */
+       delivery_q->cmd = TXQ_CMD_SSP;
+       delivery_q->mode = TXQ_MODE_TARGET;
+       delivery_q->priority = TXQ_PRI_NORMAL;
+       delivery_q->sata_reg_set = 0;
+       delivery_q->phy = prm->cmd->cmd_tgt_port->wide_port_phymap;
+       delivery_q->slot_nm = tag;
+
+       if ((prm->rq_result) && (SCST_SENSE_VALID(prm->sense_buffer)))
+               sense_data = 1;
+       else
+               sense_data = 0;
+
+       /* command header dword 0 */
+       cmd_hdr->prd_entry_count = prm->sg_cnt;
+
+       cmd_hdr->ssp_frame_type = MCH_SSP_FR_READ_RESP;
+
+       cmd_hdr->ssp_passthru = MCH_SSP_MODE_NORMAL;
+
+       /* command header dword 1 */
+       /* configure in below */
+
+       /* command header dword 2 */
+       cmd_hdr->target_tag = cpu_to_le16(tag);
+
+       /* copy the tag from received command frame */
+       cmd_hdr->tag = be16_to_cpu(prm->cmd->ssp_hdr->tag);
+
+       /* command header dword 3 */
+       cmd_hdr->data_len = cpu_to_le32(prm->bufflen);
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+
+       /* command header dword 4 -5 */
+       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+       buf_tmp  = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+       cmd_hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+       /* prepare response frame following data */
+       buf_tmp += sizeof(struct ssp_frame_header);
+       response_iu = (struct mv_ssp_response_iu *)buf_tmp;
+       if (sense_data == 0) {
+               response_iu->datapres = NO_DATA;
+       } else if (!no_response) {
+               response_iu->status = prm->rq_result;
+               response_iu->datapres = SENSE_DATA;
+               response_iu->sense_data_len =
+                       cpu_to_le32(prm->sense_buffer_len);
+               memcpy(response_iu->data,
+                       prm->sense_buffer, prm->sense_buffer_len);
+       }
+       buf_tmp -= sizeof(struct ssp_frame_header);
+
+       /* command header dword 6 -7 */
+       buf_tmp += cmd_tbl_len;
+       buf_tmp_dma += cmd_tbl_len;
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+       slot->open_frame = buf_tmp;
+       cmd_hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       /* command header dword 10 -11 */
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+
+       /* region 3: PRD table *********************************** */
+       buf_prd = (struct mvs_prd *)buf_tmp;
+       if (prm->sg_cnt != 0)
+               cmd_hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+       else
+               cmd_hdr->prd_tbl = 0;
+
+       prd_len = MVS_CHIP_DISP->prd_size() * prm->sg_cnt;
+       buf_tmp += prd_len;
+       buf_tmp_dma += prd_len;
+
+       /* command header dword 8 -9 */
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       slot->response = buf_tmp;
+       cmd_hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+
+       /* command header dword 1 */
+       resp_len = MVS_SLOT_BUF_SZ - cmd_tbl_len - MVS_OAF_SZ -
+           sizeof(struct mvs_err_info) - prd_len;
+       resp_len = min(resp_len, max_resp_len);
+
+       cmd_hdr->max_rsp_frame_len = resp_len / 4;
+       cmd_hdr->frame_len = cmd_tbl_len;
+
+       /* generate open address frame hdr (first 12 bytes) */
+       open_frame = (struct open_address_frame *)slot->open_frame;
+       open_frame->initiator = 0;      /* target mode */
+       open_frame->protocol = PROTOCOL_SSP;
+       open_frame->frame_type = ADDRESS_OPEN_FRAME;
+       open_frame->connect_rate = (prm->cmd->open_frame->received_rate);
+       open_frame->connect_tag =
+               be16_to_cpu(prm->cmd->open_frame->received_tag);
+       open_frame->dest_sas_addr =
+               mvst_get_be_sas_addr((u8 *)&prm->cmd->open_frame->src_sas_addr);
+
+       /*  for passthru mode */
+       /* fill in SSP frame header (Command Table.SSP frame header) */
+       if (cmd_hdr->ssp_passthru == MCH_SSP_MODE_PASSTHRU) {
+               struct ssp_frame_header *ssp_hdr;
+               /* command table */
+               ssp_hdr = (struct ssp_frame_header *)slot->buf;
+               ssp_hdr->frame_type = SSP_DATA;
+               memcpy(ssp_hdr->hashed_dest_sas_addr,
+                       prm->cmd->ssp_hdr->hashed_src_sas_addr,
+                       HASHED_SAS_ADDR_SIZE);
+               memcpy(ssp_hdr->hashed_src_sas_addr,
+                      prm->cmd->ssp_hdr->hashed_dest_sas_addr,
+                      HASHED_SAS_ADDR_SIZE);
+               /* copy the tag from received command frame */
+               ssp_hdr->tag = be16_to_cpu(prm->cmd->ssp_hdr->tag);
+       }
+
+       /* fill in PRD (scatter/gather) table, if any */
+       mvst_prep_prd(prm, buf_prd);
+       TRACE_EXIT();
+       return 0;
+}
+
+static int
+mvst_send_data_frame_sg(struct mvs_info        *mvi,
+                               struct mvst_prm *prm)
+{
+       u16 pass = 0;
+       struct mvs_slot_info *slot;
+       u32 res = SCST_TGT_RES_SUCCESS;
+       struct mvst_cmd *cmd = prm->cmd;
+       TRACE_ENTRY();
+       /* prepare response frame */
+       slot = mvst_get_slot(mvi, cmd->cmd_tgt_port);
+       if (!slot) {
+               res = SCST_TGT_RES_QUEUE_FULL;
+               goto err_out;
+       }
+       /* save scst cmd */
+       slot->slot_scst_cmd = cmd->scst_cmd;
+       mvst_prep_data_frame_sg(prm, slot, prm->rq_result);
+       pass++;
+       mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+       /* Mid-level is done processing */
+       cmd->cmd_state = MVST_STATE_PROCESSED;
+       goto out_done;
+err_out:
+       TRACE_DBG("send_data_frame failed[%d]!\n", res);
+out_done:
+       if (pass) {
+               MVS_CHIP_DISP->start_delivery(mvi,
+                       (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+       }
+       TRACE_EXIT_RES(res);
+       return res;
+}
+
+
+/* Be called from irq contex */
+static int
+mvst_send_cmd(struct mvs_info *mvi)
+{
+       u32 res = SCST_TGT_RES_SUCCESS;
+       u8 pass = 0;
+       struct list_head *pos, *n;
+       struct mvst_cmd *cmd;
+
+       BUG_ON(!in_irq());
+       list_for_each_safe(pos, n, &mvi->data_cmd_list) {
+               cmd = container_of(pos, struct mvst_cmd, cmd_entry);
+               if (cmd->cmd_state == MVST_STATE_PROCESSED)
+                       continue;       /* has sent command */
+               res = mvst_send_resp_deferred(mvi, cmd);
+               if (res == SCST_TGT_RES_QUEUE_FULL) {
+                       /* no resource */
+                       goto out_cmd;
+               }
+               pass++;
+       }
+out_cmd:
+       return res;
+}
+static int mvst_xmit_response(struct scst_cmd *scst_cmd)
+{
+       int res = SCST_TGT_RES_SUCCESS;
+       struct mvst_sess *sess;
+       int is_send_status;
+       unsigned long flags = 0;
+       struct mvst_prm prm = { 0 };
+       struct mvs_info *mvi;
+
+       TRACE_ENTRY();
+       TRACE(TRACE_SCSI, "xmit_respons scmd[0x%p] tag=%ld, sg_cnt=%d",
+               scst_cmd, scst_cmd_get_tag(scst_cmd), scst_cmd->sg_cnt);
+
+#ifdef DEBUG_WORK_IN_THREAD
+       if (scst_cmd_atomic(scst_cmd))
+               return SCST_TGT_RES_NEED_THREAD_CTX;
+#endif
+       memset(&prm, 0, sizeof(struct mvst_prm));
+       prm.cmd = (struct mvst_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
+       TRACE_DBG("xmit_response cmd[0x%p]", prm.cmd);
+       sess = (struct mvst_sess *)
+               scst_sess_get_tgt_priv(scst_cmd_get_session(scst_cmd));
+
+       if (unlikely(scst_cmd_aborted(scst_cmd))) {
+               TRACE(TRACE_MGMT_MINOR, "mvst tgt(%ld): terminating exchange "
+                       "for aborted scst_cmd=%p (tag=%ld)",
+                       mvi->instance, scst_cmd, scst_cmd_get_tag(scst_cmd));
+
+               scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
+
+               prm.cmd->cmd_state = MVST_STATE_ABORTED;
+
+               /* !! At this point cmd could be already freed !! */
+               goto out;
+       }
+
+       prm.sg = scst_cmd_get_sg(scst_cmd);
+       prm.bufflen = scst_cmd_get_resp_data_len(scst_cmd);
+       prm.sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
+       prm.data_direction = scst_cmd_get_data_direction(scst_cmd);
+       prm.rq_result = scst_cmd_get_status(scst_cmd);
+       prm.sense_buffer = scst_cmd_get_sense_buffer(scst_cmd);
+       prm.sense_buffer_len = scst_cmd_get_sense_buffer_len(scst_cmd);
+       prm.tgt = sess->tgt;
+       prm.seg_cnt = 0;
+       prm.req_cnt = 1;
+       is_send_status = scst_cmd_get_is_send_status(scst_cmd);
+
+       TRACE_DBG("rq_result=%x, is_send_status=%x,"
+               "bufflen=0x%x, sense_buffer_len=0x%x", prm.rq_result,
+               is_send_status, prm.bufflen, prm.sense_buffer_len);
+
+       mvi = prm.tgt->mvi;
+
+       if (prm.rq_result != 0)
+               TRACE_BUFFER("Sense", prm.sense_buffer, prm.sense_buffer_len);
+
+       if (!is_send_status) {
+               /* ToDo, after it's done in SCST */
+               PRINT_ERROR("mvst tgt(%ld): is_send_status not set: "
+                    "feature not implemented", prm.tgt->mvi->instance);
+               res = SCST_TGT_RES_FATAL_ERROR;
+               goto out_tgt_free;
+       }
+
+       /* Acquire ring specific lock */
+       spin_lock_irqsave(&prm.tgt->mvi->lock, flags);
+
+       /*
+        * We need send read left data/response frame to HBA in later,
+        * so save the cmd to mvi->data_cmd_list.
+        */
+       list_add_tail(&prm.cmd->cmd_entry, &mvi->data_cmd_list);
+       if (mvst_has_data(scst_cmd)) {
+               /* prepare send data frame */
+               res = mvst_send_data_frame_sg(mvi, &prm);
+               if (res)
+                       TRACE_DBG("xmit_response"
+                       "mvst_send_data_frame failed[%d]!\n",
+                       res);
+               goto out_done;
+       } else {
+               /* prepare response frame */
+               res = mvst_send_resp_deferred(mvi, prm.cmd);
+               if (res)
+                       TRACE_DBG("xmit_response"
+                       "mvst_send_resp_deferred failed[%d]!\n",
+                       res);
+       }
+
+out_done:
+       /* Release ring specific lock */
+       spin_unlock_irqrestore(&prm.tgt->mvi->lock, flags);
+
+out:
+       TRACE_EXIT_RES(res);
+       return res;
+
+out_tgt_free:
+       /*ToDo: check and set scst_set_delivery_status(), if necessary */
+       if (!in_interrupt()) {
+               msleep(250);
+               scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_DIRECT);
+       } else
+               scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_TASKLET);
+       /* !! At this point cmd could be already freed !! */
+       goto out;
+}
+
+
+
+static int mvst_prep_xfer_frame(struct mvst_prm *prm,
+                       struct mvs_slot_info *slot, u8 first_xfer)
+{
+       u16 tag;
+       void *buf_tmp, *buf_cmd;
+       dma_addr_t buf_tmp_dma;
+       u32 resp_len, req_len, prd_len;
+       const u32 max_resp_len = SB_RFB_MAX;
+       struct mvs_info *mvi = prm->tgt->mvi;
+       struct mvs_cmd_header *cmd_hdr;
+       struct mvs_delivery_queue *delivery_q;
+       struct mvs_prd *buf_prd;
+       struct open_address_frame *open_frame;
+       struct ssp_xfrd_iu *xfrd_iu;
+
+       TRACE_ENTRY();
+       tag = slot->target_cmd_tag;
+
+       cmd_hdr = (struct mvs_cmd_header *)&mvi->slot[tag];
+       /* get free delivery queue */
+       delivery_q = (struct mvs_delivery_queue *)&mvi->tx[mvi->tx_prod];
+       req_len = sizeof(struct ssp_frame_header) + sizeof(struct ssp_xfrd_iu);
+
+       /* SSP protocol, Target mode, Normal priority */
+       delivery_q->cmd = TXQ_CMD_SSP;
+       delivery_q->mode = TXQ_MODE_TARGET;
+       delivery_q->priority = TXQ_PRI_NORMAL;
+       delivery_q->sata_reg_set = 0;
+       delivery_q->phy = prm->cmd->cmd_tgt_port->wide_port_phymap;
+       delivery_q->slot_nm = tag;
+
+       TRACE_DBG("delivery_q=0x%x.\n", mvi->tx[mvi->tx_prod]);
+       /* command header dword 0 */
+       cmd_hdr->prd_entry_count = prm->sg_cnt;
+       cmd_hdr->ssp_frame_type = MCH_SSP_FR_XFER_RDY;
+       cmd_hdr->ssp_passthru = MCH_SSP_MODE_NORMAL;
+
+       /* command header dword 1 */
+       /* configure in below */
+
+       /* command header dword 2 */
+       /* copy the tag from received command frame */
+       cmd_hdr->target_tag = cpu_to_le16(tag);
+       cmd_hdr->tag = be16_to_cpu(prm->cmd->ssp_hdr->tag);
+
+
+       /* command header dword 3 */
+       cmd_hdr->data_len = cpu_to_le32(prm->bufflen);
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+       /* command header dword 4 -5 */
+       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
+       buf_tmp = buf_cmd = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+       cmd_hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+
+       /* command header dword 6 -7 */
+       buf_tmp += req_len;
+       buf_tmp_dma += req_len;
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ***** */
+       slot->open_frame = buf_tmp;
+       cmd_hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       /* command header dword 10 -11 */
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+       /* region 3: PRD table ************************ */
+       buf_prd = buf_tmp;
+       if (prm->sg_cnt)
+               cmd_hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+       else
+               cmd_hdr->prd_tbl = 0;
+
+       prd_len = sizeof(struct mvs_prd) * prm->sg_cnt;
+       buf_tmp += prd_len;
+       buf_tmp_dma += prd_len;
+
+       /* command header dword 8 -9 */
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       slot->response = buf_tmp;
+       cmd_hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+
+       /* command header dword 1 */
+       resp_len = MVS_SLOT_BUF_SZ - req_len - MVS_OAF_SZ -
+           sizeof(struct mvs_err_info) - prd_len;
+       resp_len = min(resp_len, max_resp_len);
+
+       cmd_hdr->max_rsp_frame_len = resp_len / 4;
+       cmd_hdr->frame_len =
+               req_len / 4 < MVS_MAX_SSP_FRAME ?
+               req_len/4 : MVS_MAX_SSP_FRAME;
+
+       TRACE_BUFFER("command header:", cmd_hdr, sizeof(*cmd_hdr));
+       /* generate open address frame hdr (first 12 bytes) */
+       open_frame = (struct open_address_frame *)slot->open_frame;
+       /* target mode */
+       open_frame->initiator = 0;
+       open_frame->protocol = PROTOCOL_SSP;
+       open_frame->frame_type = ADDRESS_OPEN_FRAME;
+       open_frame->connect_rate = (prm->cmd->open_frame->received_rate);
+       open_frame->connect_tag =
+               be16_to_cpu(prm->cmd->open_frame->received_tag);
+       open_frame->dest_sas_addr =
+               mvst_get_be_sas_addr((u8 *)&prm->cmd->open_frame->src_sas_addr);
+
+       TRACE_BUFFER("open frame:", open_frame, sizeof(*open_frame));
+/*  for passthru mode */
+       /* fill in SSP frame header (Command Table.SSP frame header) */
+       if (cmd_hdr->ssp_passthru == MCH_SSP_MODE_PASSTHRU) {
+               struct ssp_frame_header *ssp_hdr;
+               ssp_hdr = (struct ssp_frame_header *)slot->buf;
+               ssp_hdr->frame_type = SSP_XFER_RDY;
+               memcpy(ssp_hdr->hashed_dest_sas_addr,
+                       prm->cmd->ssp_hdr->hashed_src_sas_addr,
+                      HASHED_SAS_ADDR_SIZE);
+               memcpy(ssp_hdr->hashed_src_sas_addr,
+                      prm->cmd->ssp_hdr->hashed_dest_sas_addr,
+                      HASHED_SAS_ADDR_SIZE);
+               /* copy the tag from received command frame */
+               ssp_hdr->tag = be16_to_cpu(prm->cmd->ssp_hdr->tag);
+       }
+
+       /* fill in xfer ready frame IU */
+       buf_cmd += sizeof(struct ssp_frame_header);
+       xfrd_iu = (struct ssp_xfrd_iu *)buf_cmd;
+       xfrd_iu->data_len = cpu_to_be32(prm->bufflen);
+       if ((!prm->cmd->command_iu->first_burst) ||
+               (!prm->cmd->open_frame->first_burst_size))
+               xfrd_iu->requested_offset = 0;
+       else
+               xfrd_iu->requested_offset =
+                       prm->cmd->open_frame->first_burst_size;
+       TRACE_BUFFER("xfrd_iu:", xfrd_iu, sizeof(*xfrd_iu));
+
+       /* fill in PRD (scatter/gather) table, if any */
+       mvst_prep_prd(prm, buf_prd);
+
+       TRACE_EXIT();
+       return 0;
+}
+
+static int mvst_pci_map_calc_cnt(struct mvst_prm *prm)
+{
+       int res = 0;
+
+       sBUG_ON(prm->sg_cnt == 0);
+
+       /* 32 bit S/G Data Transfer */
+       prm->seg_cnt = pci_map_sg(prm->tgt->mvi->pdev, prm->sg, prm->sg_cnt,
+                              scst_to_tgt_dma_dir(prm->data_direction));
+       if (unlikely(prm->seg_cnt == 0))
+               goto out_err;
+       /*
+        * If greater than four sg entries then we need to allocate
+        * the continuation entries, but bug on now
+        */
+
+       sBUG_ON(prm->seg_cnt > SG_ALL);
+
+out:
+       TRACE_DBG("seg_cnt=%d, req_cnt=%d, res=%d", prm->seg_cnt,
+               prm->req_cnt, res);
+       return res;
+
+out_err:
+       PRINT_ERROR("mvs_tgt PCI mapping failed: sg_cnt=%d", prm->sg_cnt);
+       res = -1;
+       goto out;
+}
+
+static int mvst_rdy_to_xfer(struct scst_cmd *scst_cmd)
+{
+       int res = SCST_TGT_RES_SUCCESS;
+       struct mvst_sess *sess;
+       unsigned long flags = 0;
+       struct mvst_prm prm = { 0 };
+       struct mvs_slot_info *slot;
+       u32 rc = 0, pass = 0;
+       struct mvs_info *mvi;
+       TRACE_ENTRY();
+       TRACE(TRACE_SCSI, "tag=%lld", scst_cmd_get_tag(scst_cmd));
+
+#ifdef DEBUG_WORK_IN_THREAD
+       if (scst_cmd_atomic(scst_cmd))
+               return SCST_TGT_RES_NEED_THREAD_CTX;
+#endif
+       prm.cmd = (struct mvst_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
+       sess = (struct mvst_sess *)
+               scst_sess_get_tgt_priv(scst_cmd_get_session(scst_cmd));
+       prm.sg = scst_cmd_get_sg(scst_cmd);
+       prm.bufflen = scst_cmd_get_bufflen(scst_cmd);
+       prm.sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
+       prm.data_direction = scst_cmd_get_data_direction(scst_cmd);
+       prm.tgt = sess->tgt;
+       prm.req_cnt = 1;
+       prm.cmd->cmd_state = MVST_STATE_NEED_DATA;
+
+       mvi = prm.tgt->mvi;
+
+       /* Acquire ring specific lock */
+       spin_lock_irqsave(&mvi->lock, flags);
+
+       /* Calculate number of entries and segments required */
+       if (mvst_pci_map_calc_cnt(&prm) != 0) {
+               res = SCST_TGT_RES_QUEUE_FULL;
+               goto err_out;
+       }
+
+       slot = mvst_get_slot(mvi, prm.cmd->cmd_tgt_port);
+       if (!slot) {
+               res = SCST_TGT_RES_QUEUE_FULL;
+               goto err_out;
+       }
+       slot->slot_scst_cmd = scst_cmd;
+
+       TRACE_DBG("start rdy_to_xfer: mvi(%d)", (int) prm.tgt->mvi->instance);
+
+       rc = mvst_prep_xfer_frame(&prm, slot, 1);
+       if (rc) {
+               res = SCST_TGT_RES_FATAL_ERROR;
+               goto err_out_tag;
+       }
+       ++pass;
+       mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+
+       goto out_done;
+
+err_out_tag:
+       PRINT_ERROR("%s:prepare xfer frame failed.", __func__);
+       mvst_put_slot(mvi, slot);
+err_out:
+       PRINT_ERROR("%s:No sufficient tag for xfer frame", __func__);
+out_done:
+       if (pass)
+               MVS_CHIP_DISP->start_delivery(mvi,
+                       (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+
+       /* Release ring specific lock */
+       spin_unlock_irqrestore(&mvi->lock, flags);
+
+       TRACE_EXIT_RES(res);
+       return res;
+}
+
+
+
+static inline void mvst_free_cmd(struct mvst_cmd *cmd)
+{
+       kmem_cache_free(mvst_cmd_cachep, cmd);
+}
+
+static void mvst_on_free_cmd(struct scst_cmd *scst_cmd)
+{
+       struct mvst_cmd *cmd =
+               (struct mvst_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
+
+       TRACE_ENTRY();
+       TRACE(TRACE_SCSI, "END Command tag %lld", scst_cmd_get_tag(scst_cmd));
+       scst_cmd_set_tgt_priv(scst_cmd, NULL);
+       memset(cmd->ssp_hdr, 0, sizeof(*cmd->ssp_hdr));
+       memset(cmd->command_iu, 0, sizeof(*cmd->command_iu));
+       memset(cmd->open_frame, 0, sizeof(*cmd->open_frame));
+       memset(cmd, 0, sizeof(*cmd));
+
+       mvst_free_cmd(cmd);
+
+       TRACE_EXIT();
+       return;
+}
+
+
+static int mvst_slot_tgt_err(struct mvs_info *mvi,      u32 slot_idx)
+{
+       struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+       u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
+       u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
+       int stat = 0;
+       TRACE_ENTRY();
+       if (err_dw0 & CMD_ISS_STPD)
+               mv_dprintk("slot[%d] command issue stopped.\n", slot_idx);
+
+       if (err_dw1 & SLOT_BSY_ERR)
+               mv_dprintk("slot[%d] busy error.\n", slot_idx);
+       mv_dprintk("slot[%d] get error Dw0:0x%x, Dw1:0x%x\n",
+               slot_idx, err_dw0, err_dw1);
+       TRACE_BUFFER("status buffer:", (u8 *) slot->response, 16);
+       TRACE_EXIT_HRES(stat);
+       return stat;
+}
+
+
+/* mvi->lock supposed to be held on entry */
+static void mvst_do_cmd_completion(struct mvs_info *mvi,
+                                 uint32_t rx_desc)
+{
+       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+       struct mvs_slot_info *slot =
+               (struct mvs_slot_info *)&mvi->slot_info[slot_idx];
+       struct mvs_cmd_header  *cmd_hdr =
+               (struct mvs_cmd_header  *)&mvi->slot[slot_idx];
+       struct scst_cmd *scst_cmd;
+       struct mvst_cmd *cmd;
+       int err = 0;
+       u8 frame_type;
+       u64 dest_sas_addr;
+       TRACE_ENTRY();
+
+       frame_type = cmd_hdr->ssp_frame_type;
+       TRACE(TRACE_DEBUG|TRACE_SCSI, "frame[0x%x] complete, rx_desc=0x%x",
+             frame_type, rx_desc);
+
+       /* error info record present */
+       if (unlikely((rx_desc & RXQ_ERR) && (slot->response))) {
+               mvst_slot_tgt_err(mvi, slot_idx);
+               TRACE_DBG("Found by failed  frame_type[0x%x]", frame_type);
+               err = 1;
+       }
+
+        if (slot->slot_scst_cmd) {
+               if (!slot->open_frame) {
+                       TRACE_DBG("Found recevied command[%p]"
+                               "but no open frame.", slot->slot_scst_cmd);
+                       sBUG_ON(!slot->open_frame);
+                       goto out;
+               }
+        }
+
+        if (slot->slot_scst_cmd) {
+               struct open_address_frame *open_frame =
+                       (struct open_address_frame *)slot->open_frame;
+               struct ssp_frame_header *ssp_hdr;
+               struct mvst_sess *sess;
+               TRACE_BUFFER("SSP Header", ssp_hdr, sizeof(*ssp_hdr));
+               TRACE_BUFFER("SSP open_frame", open_frame, sizeof(*open_frame));
+               dest_sas_addr = (open_frame->dest_sas_addr);
+               sess = mvst_find_sess_by_lid(mvi->tgt,
+                       mvst_get_le_sas_addr((u8 *)&dest_sas_addr));
+               TRACE(TRACE_DEBUG, "dest_sas_addr=%016llx", dest_sas_addr);
+               if (sess == NULL) {
+                       ssp_hdr = (struct ssp_frame_header *)slot->buf;
+                       TRACE_DBG("mvst tgt(%ld): Suspicious: "
+                                  "command completion for non-existing"
+                                  "session " "(sas addr[%016llx], tag %d)",
+                                  mvi->instance,
+                                  mvst_get_le_sas_addr((u8 *)&dest_sas_addr),
+                                  be16_to_cpu(ssp_hdr->tag));
+                       goto out;
+               }
+               scst_cmd = slot->slot_scst_cmd;
+               TRACE_DBG("Found scst_cmd %p", scst_cmd);
+
+               cmd = (struct mvst_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
+
+               if (cmd->cmd_state == MVST_STATE_PROCESSED) {
+                       TRACE_DBG("Command %p finished", cmd);
+                       if (err)
+                               goto out;
+                       list_del(&cmd->cmd_entry);
+                       goto out_free;
+               } else if (cmd->cmd_state == MVST_STATE_NEED_DATA) {
+                       int context = SCST_CONTEXT_TASKLET;
+                       int rx_status = SCST_RX_STATUS_SUCCESS;
+
+                       cmd->cmd_state = MVST_STATE_DATA_IN;
+
+                       if (err)
+                               rx_status = SCST_RX_STATUS_ERROR;
+
+#ifdef DEBUG_WORK_IN_THREAD
+                       context = SCST_CONTEXT_THREAD;
+#endif
+
+                       TRACE_DBG("Data received, context %x, rx_status %d",
+                             context, rx_status);
+
+                       pci_unmap_sg(mvi->pdev, scst_cmd_get_sg(scst_cmd),
+                               scst_cmd_get_sg_cnt(scst_cmd),
+                               scst_to_tgt_dma_dir(
+                               scst_cmd_get_data_direction(scst_cmd)));
+
+                       scst_rx_data(scst_cmd, rx_status, context);
+               } else if (cmd->cmd_state == MVST_STATE_SEND_DATA) {
+                       TRACE_DBG("Read data command %p finished", cmd);
+                       if (err) {
+                               cmd->cmd_state = MVST_STATE_SEND_DATA_RETRY;
+                               sBUG_ON(1);
+                       }
+                       goto out;
+               } else if (cmd->cmd_state == MVST_STATE_ABORTED) {
+                       TRACE_DBG("Aborted command %p finished", cmd);
+                       goto out_free;
+               } else {
+                       PRINT_ERROR("mvst tgt(%ld): A command in state"
+                               "(%d) should " "not return a complete",
+                               mvi->instance, cmd->cmd_state);
+                       goto out_free;
+               }
+       } else {
+               TRACE_DBG("Found internal target frame[0x%x] complete",
+                       cmd_hdr->ssp_frame_type);
+               goto out;
+       }
+out:
+       mvst_put_slot(mvi, slot);
+       TRACE_EXIT();
+       return;
+
+out_free:
+       if (unlikely(err)) {
+               TRACE_MGMT_DBG("%s", "Finishing failed CTIO");
+               scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
+       }
+       if (!in_interrupt()) {
+               msleep(250);
+               scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_DIRECT);
+       } else
+               scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_TASKLET);
+       goto out;
+}
+
+/* mvi->lock supposed to be held on entry */
+/* called via callback from mvst */
+static void mvst_cmd_completion(struct mvs_info *mvi, uint32_t rx_desc)
+{
+       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+       struct mvs_cmd_header  *cmd_hdr = NULL;
+
+       TRACE_ENTRY();
+       sBUG_ON(mvi == NULL);
+
+       if ((mvi->tgt != NULL) && MVST_IN_TARGET_MODE(mvi))
+               mvst_do_cmd_completion(mvi, rx_desc);
+       else {
+               cmd_hdr = (struct mvs_cmd_header  *)&mvi->slot[slot_idx];
+               TRACE_DBG("command complete, but target mode not enabled."
+                       "mvi %p complete frame 0x%x",
+                       mvi, cmd_hdr->ssp_frame_type);
+       }
+       TRACE_EXIT();
+       return;
+}
+
+
+
+/* mvi->lock is supposed to be held on entry */
+static int mvst_do_send_cmd_to_scst(struct mvs_info *mvi, struct mvst_cmd *cmd)
+{
+       int res = 0;
+       struct mvst_sess *sess = cmd->sess;
+       u8 lun[8];
+       scst_data_direction dir = SCST_DATA_NONE;
+       int context;
+
+       TRACE_ENTRY();
+
+       memcpy(lun, cmd->command_iu->lun, 8);
+       cmd->scst_cmd = scst_rx_cmd(sess->scst_sess, (uint8_t *)&lun,
+                                   sizeof(lun), cmd->command_iu->cdb,
+                                   MVST_MAX_CDB_LEN,
+                                   SCST_ATOMIC);
+
+       if (cmd->scst_cmd == NULL) {
+               PRINT_ERROR("mvst tgt(%ld): scst_rx_cmd() failed for "
+                    "host %ld(%p)", mvi->instance, mvi->host_no, mvi);
+               res = -EFAULT;
+               goto out;
+       }
+
+       TRACE_DBG("Get new scst_cmd %p", cmd->scst_cmd);
+       TRACE_BUFFER("Get command header:",
+               cmd->ssp_hdr, sizeof(struct ssp_frame_header));
+       TRACE_BUFFER("Get command open frame:",
+               cmd->open_frame, sizeof(struct open_address_frame));
+       scst_cmd_set_tag(cmd->scst_cmd, be16_to_cpu(cmd->ssp_hdr->tag));
+       scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
+       if (cmd->command_iu->cdb[0] & MVST_EXEC_READ)
+               dir = SCST_DATA_READ;
+       else if (cmd->command_iu->cdb[0] & MVST_EXEC_WRITE)
+               dir = SCST_DATA_WRITE;
+
+       switch (cmd->command_iu->task_attr) {
+       case TASK_ATTR_SIMPLE:
+               cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
+               break;
+       case TASK_ATTR_HOQ:
+               cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
+               break;
+       case TASK_ATTR_ORDERED:
+               cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
+               break;
+       case TASK_ATTR_ACA:
+               cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
+               break;
+       default:
+               PRINT_ERROR("mvst tgt(%ld): Unknown task code %x, use "
+                       "ORDERED instead", mvi->instance,
+                       cmd->command_iu->task_attr);
+               cmd->scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
+               break;
+       }
+
+#ifdef DEBUG_WORK_IN_THREAD
+       context = SCST_CONTEXT_THREAD;
+#else
+       context = SCST_CONTEXT_TASKLET;
+#endif
+
+       TRACE_DBG("Context %x", context);
+       TRACE(TRACE_SCSI, "START Command (tag %ld)",
+               scst_cmd_get_tag(cmd->scst_cmd));
+       scst_cmd_init_done(cmd->scst_cmd, context);
+out:
+       TRACE_EXIT_RES(res);
+       return res;
+}
+
+/* Called in SCST's thread context */
+static void mvst_alloc_session_done(struct scst_session *scst_sess,
+                                  void *data, int result)
+{
+       TRACE_ENTRY();
+
+       if (result != 0) {
+               struct mvst_sess *sess = (struct mvst_sess *)data;
+               struct mvst_tgt *tgt = sess->tgt;
+               struct mvs_info *mvi = tgt->mvi;
+               unsigned long flags;
+
+               TRACE_DBG("mvst tgt(%ld): Session initialization