summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-02 13:52:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-02 13:52:35 -0700
commitd626e3bf728c47746f2129aa00c775d4e8c2a73b (patch)
tree551a8c362c7b9833e7848bc6167cf322f75563a1
parentb66e1f11ebc429569a3784aaf64123633d9e3ed1 (diff)
parent7ad4a485002c141f156a014e89542e01e7f8e36a (diff)
downloadlinux-3.10-d626e3bf728c47746f2129aa00c775d4e8c2a73b.tar.gz
linux-3.10-d626e3bf728c47746f2129aa00c775d4e8c2a73b.tar.bz2
linux-3.10-d626e3bf728c47746f2129aa00c775d4e8c2a73b.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: [SCSI] aic94xx: fix section mismatch [SCSI] u14-34f: Fix 32bit only problem [SCSI] dpt_i2o: sysfs code [SCSI] dpt_i2o: 64 bit support [SCSI] dpt_i2o: move from virt_to_bus/bus_to_virt to dma_alloc_coherent [SCSI] dpt_i2o: use standard __init / __exit code [SCSI] megaraid_sas: fix suspend/resume sections [SCSI] aacraid: Add Power Management support [SCSI] aacraid: Fix jbod operations scan issues [SCSI] aacraid: Fix warning about macro side-effects [SCSI] add support for variable length extended commands [SCSI] Let scsi_cmnd->cmnd use request->cmd buffer [SCSI] bsg: add large command support [SCSI] aacraid: Fix down_interruptible() to check the return value correctly [SCSI] megaraid_sas; Update the Version and Changelog [SCSI] ibmvscsi: Handle non SCSI error status [SCSI] bug fix for free list handling [SCSI] ipr: Rename ipr's state scsi host attribute to prevent collisions [SCSI] megaraid_mbox: fix Dell CERC firmware problem
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas22
-rw-r--r--block/bsg.c12
-rw-r--r--block/scsi_ioctl.c5
-rw-r--r--drivers/firewire/fw-sbp2.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/scsi/53c700.c6
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c133
-rw-r--r--drivers/scsi/aacraid/aacraid.h28
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c34
-rw-r--r--drivers/scsi/aacraid/linit.c22
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/constants.c10
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h16
-rw-r--r--drivers/scsi/dpt/dptsig.h8
-rw-r--r--drivers/scsi/dpt/sys_info.h4
-rw-r--r--drivers/scsi/dpt_i2o.c640
-rw-r--r--drivers/scsi/dpti.h15
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hptiop.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h9
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/scsi.c23
-rw-r--r--drivers/scsi/scsi_error.c15
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/u14-34f.c6
-rw-r--r--drivers/usb/storage/cypress_atacb.c2
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--include/scsi/scsi.h40
-rw-r--r--include/scsi/scsi_cmnd.h23
-rw-r--r--include/scsi/scsi_eh.h4
-rw-r--r--include/scsi/scsi_host.h8
43 files changed, 884 insertions, 291 deletions
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 91c81db0ba7..716fcc1cafb 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,25 @@
+1 Release Date : Mon. March 10 11:02:31 PDT 2008 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+
+2 Current Version : 00.00.03.20-RC1
+3 Older Version : 00.00.03.16
+
+1. Rollback the sense info implementation
+ Sense buffer ptr data type in the ioctl path is reverted back
+ to u32 * as in previous versions of driver.
+
+2. Fixed the driver frame count.
+ When Driver sent wrong frame count to firmware. As this
+ particular command is sent to drive, FW is seeing continuous
+ chip resets and so the command will timeout.
+
+3. Add the new controller(1078DE) support to the driver
+ and Increase the max_wait to 60 from 10 in the controller
+ operational status. With this max_wait increase, driver will
+ make sure the FW will finish the pending cmd for KDUMP case.
+
1 Release Date : Thur. Nov. 07 16:30:43 PST 2007 -
(emaild-id:megaraidlinux@lsi.com)
Sumant Patro
diff --git a/block/bsg.c b/block/bsg.c
index fa796b605f5..f0b7cd34321 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -174,7 +174,11 @@ unlock:
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, int has_write_perm)
{
- memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+ if (hdr->request_len > BLK_MAX_CDB) {
+ rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
+ if (!rq->cmd)
+ return -ENOMEM;
+ }
if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
hdr->request_len))
@@ -211,8 +215,6 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
if (hdr->guard != 'Q')
return -EINVAL;
- if (hdr->request_len > BLK_MAX_CDB)
- return -EINVAL;
if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
hdr->din_xfer_len > (q->max_sectors << 9))
return -EIO;
@@ -302,6 +304,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
}
return rq;
out:
+ if (rq->cmd != rq->__cmd)
+ kfree(rq->cmd);
blk_put_request(rq);
if (next_rq) {
blk_rq_unmap_user(next_rq->bio);
@@ -455,6 +459,8 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
ret = rq->errors;
blk_rq_unmap_user(bio);
+ if (rq->cmd != rq->__cmd)
+ kfree(rq->cmd);
blk_put_request(rq);
return ret;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index ffa3720e6ca..78199c08ec9 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -33,13 +33,12 @@
#include <scsi/scsi_cmnd.h>
/* Command group 3 is reserved and should never be used. */
-const unsigned char scsi_command_size[8] =
+const unsigned char scsi_command_size_tbl[8] =
{
6, 10, 10, 12,
16, 12, 10, 10
};
-
-EXPORT_SYMBOL(scsi_command_size);
+EXPORT_SYMBOL(scsi_command_size_tbl);
#include <scsi/sg.h>
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 62e3c919098..b2458bb8e9c 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -1487,7 +1487,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
goto out;
- memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
+ memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
orb->base.callback = complete_command_orb;
orb->base.request_bus =
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 37b85c67b11..c8bad675dbd 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -1055,7 +1055,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
rec->scsi_result = scsi_cmnd->result;
rec->scsi_cmnd = (unsigned long)scsi_cmnd;
rec->scsi_serial = scsi_cmnd->serial_number;
- memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd,
+ memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
min((int)scsi_cmnd->cmd_len,
ZFCP_DBF_SCSI_OPCODE));
rec->scsi_retries = scsi_cmnd->retries;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9af2330f07a..b2ea4ea051f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4014,7 +4014,7 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
scpnt->result);
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
- (void *) &scpnt->cmnd, scpnt->cmd_len);
+ scpnt->cmnd, scpnt->cmd_len);
ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
fcp_rsp_iu->fcp_sns_len);
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index f4c4fe90240..f5a9addb705 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -599,7 +599,7 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
(struct NCR_700_command_slot *)SCp->host_scribble;
dma_unmap_single(hostdata->dev, slot->pCmd,
- sizeof(SCp->cmnd), DMA_TO_DEVICE);
+ MAX_COMMAND_SIZE, DMA_TO_DEVICE);
if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
#ifdef NCR_700_DEBUG
@@ -1004,7 +1004,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
* here */
NCR_700_unmap(hostdata, SCp, slot);
dma_unmap_single(hostdata->dev, slot->pCmd,
- sizeof(SCp->cmnd),
+ MAX_COMMAND_SIZE,
DMA_TO_DEVICE);
cmnd[0] = REQUEST_SENSE;
@@ -1901,7 +1901,7 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
}
slot->resume_offset = 0;
slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
- sizeof(SCp->cmnd), DMA_TO_DEVICE);
+ MAX_COMMAND_SIZE, DMA_TO_DEVICE);
NCR_700_start_command(SCp);
return 0;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 99c57b0c1d5..46d7e400c8b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -504,10 +504,9 @@ config SCSI_AIC7XXX_OLD
source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
source "drivers/scsi/aic94xx/Kconfig"
-# All the I2O code and drivers do not seem to be 64bit safe.
config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support "
- depends on !64BIT && SCSI && PCI && VIRT_TO_BUS
+ depends on SCSI && PCI && VIRT_TO_BUS
help
This driver supports all of Adaptec's I2O based RAID controllers as
well as the DPT SmartRaid V cards. This is an Adaptec maintained
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 792b2e807bf..ced3eebe252 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -895,7 +895,7 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru
} else {
scb->tag_msg = 0; /* No tag support */
}
- memcpy(&scb->cdb[0], &cmd->cmnd, scb->cdb_len);
+ memcpy(scb->cdb, cmd->cmnd, scb->cdb_len);
}
/**
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 460d4024c46..aa4e77c2527 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -498,6 +498,11 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
fsa_dev_ptr->valid = 1;
+ /* sense_key holds the current state of the spin-up */
+ if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
+ fsa_dev_ptr->sense_data.sense_key = NOT_READY;
+ else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
+ fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
fsa_dev_ptr->size
= ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
@@ -1509,20 +1514,35 @@ static void io_callback(void *context, struct fib * fibptr)
scsi_dma_unmap(scsicmd);
readreply = (struct aac_read_reply *)fib_data(fibptr);
- if (le32_to_cpu(readreply->status) == ST_OK)
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- else {
+ switch (le32_to_cpu(readreply->status)) {
+ case ST_OK:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
+ break;
+ case ST_NOT_READY:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
+ SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ break;
+ default:
#ifdef AAC_DETAILED_STATUS_INFO
printk(KERN_WARNING "io_callback: io failed, status = %d\n",
le32_to_cpu(readreply->status));
#endif
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
+ break;
}
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
@@ -1863,6 +1883,84 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
return SCSI_MLQUEUE_HOST_BUSY;
}
+static void aac_start_stop_callback(void *context, struct fib *fibptr)
+{
+ struct scsi_cmnd *scsicmd = context;
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ BUG_ON(fibptr == NULL);
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
+}
+
+static int aac_start_stop(struct scsi_cmnd *scsicmd)
+{
+ int status;
+ struct fib *cmd_fibcontext;
+ struct aac_power_management *pmcmd;
+ struct scsi_device *sdev = scsicmd->device;
+ struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+
+ if (!(aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_POWER_MANAGEMENT)) {
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+
+ if (aac->in_reset)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Allocate and initialize a Fib
+ */
+ cmd_fibcontext = aac_fib_alloc(aac);
+ if (!cmd_fibcontext)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ aac_fib_init(cmd_fibcontext);
+
+ pmcmd = fib_data(cmd_fibcontext);
+ pmcmd->command = cpu_to_le32(VM_ContainerConfig);
+ pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
+ /* Eject bit ignored, not relevant */
+ pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
+ cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
+ pmcmd->cid = cpu_to_le32(sdev_id(sdev));
+ pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
+ cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
+
+ /*
+ * Now send the Fib to the adapter
+ */
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
+ sizeof(struct aac_power_management),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_start_stop_callback,
+ (void *)scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
/**
* aac_scsi_cmd() - Process SCSI command
* @scsicmd: SCSI command block
@@ -1899,7 +1997,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* If the target container doesn't exist, it may have
* been newly created
*/
- if ((fsa_dev_ptr[cid].valid & 1) == 0) {
+ if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
+ (fsa_dev_ptr[cid].sense_data.sense_key ==
+ NOT_READY)) {
switch (scsicmd->cmnd[0]) {
case SERVICE_ACTION_IN:
if (!(dev->raw_io_interface) ||
@@ -2091,8 +2191,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
-
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
@@ -2187,15 +2287,32 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* These commands are all No-Ops
*/
case TEST_UNIT_READY:
+ if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ NOT_READY, SENCODE_BECOMING_READY,
+ ASENCODE_BECOMING_READY, 0, 0);
+ memcpy(scsicmd->sense_buffer,
+ &dev->fsa_dev[cid].sense_data,
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+ /* FALLTHRU */
case RESERVE:
case RELEASE:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
- case START_STOP:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
+
+ case START_STOP:
+ return aac_start_stop(scsicmd);
}
switch (scsicmd->cmnd[0])
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 113ca9c8934..73916adb8f8 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 2455
+# define AAC_DRIVER_BUILD 2456
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -34,8 +34,8 @@
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
-#define aac_phys_to_logical(x) (x+1)
-#define aac_logical_to_phys(x) (x?x-1:0)
+#define aac_phys_to_logical(x) ((x)+1)
+#define aac_logical_to_phys(x) ((x)?(x)-1:0)
/* #define AAC_DETAILED_STATUS_INFO */
@@ -424,6 +424,8 @@ struct aac_init
*/
__le32 InitFlags; /* flags for supported features */
#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
+#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
+#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
__le32 MaxIoCommands; /* max outstanding commands */
__le32 MaxIoSize; /* largest I/O command */
__le32 MaxFibSize; /* largest FIB to adapter */
@@ -867,8 +869,10 @@ struct aac_supplement_adapter_info
};
#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
-#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
-#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
+/* SupportedOptions2 */
+#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
+#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
+#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -1148,6 +1152,7 @@ struct aac_dev
#define ST_DQUOT 69
#define ST_STALE 70
#define ST_REMOTE 71
+#define ST_NOT_READY 72
#define ST_BADHANDLE 10001
#define ST_NOT_SYNC 10002
#define ST_BAD_COOKIE 10003
@@ -1269,6 +1274,18 @@ struct aac_synchronize_reply {
u8 data[16];
};
+#define CT_POWER_MANAGEMENT 245
+#define CT_PM_START_UNIT 2
+#define CT_PM_STOP_UNIT 3
+#define CT_PM_UNIT_IMMEDIATE 1
+struct aac_power_management {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_POWER_MANAGEMENT */
+ __le32 sub; /* CT_PM_* */
+ __le32 cid;
+ __le32 parm; /* CT_PM_sub_* */
+};
+
#define CT_PAUSE_IO 65
#define CT_RELEASE_IO 66
struct aac_pause {
@@ -1536,6 +1553,7 @@ struct aac_mntent {
#define FSCS_NOTCLEAN 0x0001 /* fsck is necessary before mounting */
#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */
+#define FSCS_NOT_READY 0x0008 /* Array spinning up to fulfil request */
struct aac_query_mount {
__le32 command;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 294a802450b..cbac0635510 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -97,6 +97,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
}
+ init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
+ INITFLAGS_DRIVER_SUPPORTS_PM);
init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index ef67816a6fe..289304aab69 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -515,7 +515,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
udelay(5);
}
- } else if (down_interruptible(&fibptr->event_wait) == 0) {
+ } else if (down_interruptible(&fibptr->event_wait)) {
fibptr->done = 2;
up(&fibptr->event_wait);
}
@@ -906,15 +906,22 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
case AifEnAddJBOD:
case AifEnDeleteJBOD:
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
- if ((container >> 28))
+ if ((container >> 28)) {
+ container = (u32)-1;
break;
+ }
channel = (container >> 24) & 0xF;
- if (channel >= dev->maximum_num_channels)
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
break;
+ }
id = container & 0xFFFF;
- if (id >= dev->maximum_num_physicals)
+ if (id >= dev->maximum_num_physicals) {
+ container = (u32)-1;
break;
+ }
lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
channel = aac_phys_to_logical(channel);
device_config_needed =
(((__le32 *)aifcmd->data)[0] ==
@@ -933,13 +940,18 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
case EM_DRIVE_REMOVAL:
container = le32_to_cpu(
((__le32 *)aifcmd->data)[2]);
- if ((container >> 28))
+ if ((container >> 28)) {
+ container = (u32)-1;
break;
+ }
channel = (container >> 24) & 0xF;
- if (channel >= dev->maximum_num_channels)
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
break;
+ }
id = container & 0xFFFF;
lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
if (id >= dev->maximum_num_physicals) {
/* legacy dev_t ? */
if ((0x2000 <= id) || lun || channel ||
@@ -1025,9 +1037,10 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
break;
}
+ container = 0;
+retry_next:
if (device_config_needed == NOTHING)
- for (container = 0; container < dev->maximum_num_containers;
- ++container) {
+ for (; container < dev->maximum_num_containers; ++container) {
if ((dev->fsa_dev[container].config_waiting_on == 0) &&
(dev->fsa_dev[container].config_needed != NOTHING) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
@@ -1110,6 +1123,11 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
}
if (device_config_needed == ADD)
scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
+ if (channel == CONTAINER_CHANNEL) {
+ container++;
+ device_config_needed = NOTHING;
+ goto retry_next;
+ }
}
static int _aac_reset_adapter(struct aac_dev *aac, int forced)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index c109f63f827..1f7c83607f8 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -401,6 +401,8 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
static int aac_slave_configure(struct scsi_device *sdev)
{
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+ if (aac->jbod && (sdev->type == TYPE_DISK))
+ sdev->removable = 1;
if ((sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) != CONTAINER_CHANNEL) &&
(!aac->jbod || sdev->inq_periph_qual) &&
@@ -809,6 +811,12 @@ static ssize_t aac_show_flags(struct device *cdev,
"SAI_READ_CAPACITY_16\n");
if (dev->jbod)
len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
+ if (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_POWER_MANAGEMENT)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_POWER_MANAGEMENT\n");
+ if (dev->msi)
+ len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
return len;
}
@@ -1106,7 +1114,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
aac->pdev = pdev;
aac->name = aac_driver_template.name;
aac->id = shost->unique_id;
- aac->cardtype = index;
+ aac->cardtype = index;
INIT_LIST_HEAD(&aac->entry);
aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
@@ -1146,19 +1154,19 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
goto out_deinit;
/*
- * Lets override negotiations and drop the maximum SG limit to 34
- */
+ * Lets override negotiations and drop the maximum SG limit to 34
+ */
if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
(shost->sg_tablesize > 34)) {
shost->sg_tablesize = 34;
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
- }
+ }
- if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
+ if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
(shost->sg_tablesize > 17)) {
shost->sg_tablesize = 17;
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
- }
+ }
error = pci_set_dma_max_seg_size(pdev,
(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
@@ -1174,7 +1182,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
else
aac->printf_enabled = 0;
- /*
+ /*
* max channel will be the physical channels plus 1 virtual channel
* all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
* physical channels are address by their actual physical number+1
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 90f5e0a6f2e..2a730c470f6 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -529,10 +529,10 @@ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
/* The first entry, 0, is used for dynamic ids, the rest for devices
* we know about.
*/
-static struct asd_pcidev_struct {
+static const struct asd_pcidev_struct {
const char * name;
int (*setup)(struct asd_ha_struct *asd_ha);
-} asd_pcidev_data[] = {
+} asd_pcidev_data[] __devinitconst = {
/* Id 0 is used for dynamic ids. */
{ .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
.setup = asd_aic9410_setup
@@ -735,7 +735,7 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
static int __devinit asd_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
- struct asd_pcidev_struct *asd_dev;
+ const struct asd_pcidev_struct *asd_dev;
unsigned asd_id = (unsigned) id->driver_data;
struct asd_ha_struct *asd_ha;
struct Scsi_Host *shost;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 403a7f2d8f9..9785d738419 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -28,7 +28,6 @@
#define SERVICE_ACTION_OUT_12 0xa9
#define SERVICE_ACTION_IN_16 0x9e
#define SERVICE_ACTION_OUT_16 0x9f
-#define VARIABLE_LENGTH_CMD 0x7f
@@ -210,7 +209,7 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
cdb0 = cdbp[0];
switch(cdb0) {
case VARIABLE_LENGTH_CMD:
- len = cdbp[7] + 8;
+ len = scsi_varlen_cdb_length(cdbp);
if (len < 10) {
printk("short variable length command, "
"len=%d ext_len=%d", len, cdb_len);
@@ -300,7 +299,7 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
cdb0 = cdbp[0];
switch(cdb0) {
case VARIABLE_LENGTH_CMD:
- len = cdbp[7] + 8;
+ len = scsi_varlen_cdb_length(cdbp);
if (len < 10) {
printk("short opcode=0x%x command, len=%d "
"ext_len=%d", cdb0, len, cdb_len);
@@ -335,10 +334,7 @@ void __scsi_print_command(unsigned char *cdb)
int k, len;
print_opcode_name(cdb, 0);
- if (VARIABLE_LENGTH_CMD == cdb[0])
- len = cdb[7] + 8;
- else
- len = COMMAND_SIZE(cdb[0]);
+ len = scsi_command_size(cdb);
/* print out all bytes in cdb */
for (k = 0; k < len; ++k)
printk(" %02x", cdb[k]);
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index cc784e8f6e9..f60236721e0 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -89,7 +89,7 @@ typedef struct {
int njobs; /* # of jobs sent to HA */
int qdepth; /* Controller queue depth. */
int wakebase; /* mpx wakeup base index. */
- uLONG SGsize; /* Scatter/Gather list size. */
+ uINT SGsize; /* Scatter/Gather list size. */
unsigned heads; /* heads for drives on cntlr. */
unsigned sectors; /* sectors for drives on cntlr. */
uCHAR do_drive32; /* Flag for Above 16 MB Ability */
@@ -97,8 +97,8 @@ typedef struct {
char idPAL[4]; /* 4 Bytes Of The ID Pal */
uCHAR primary; /* 1 For Primary, 0 For Secondary */
uCHAR eataVersion; /* EATA Version */
- uLONG cpLength; /* EATA Command Packet Length */
- uLONG spLength; /* EATA Status Packet Length */
+ uINT cpLength; /* EATA Command Packet Length */
+ uINT spLength; /* EATA Status Packet Length */
uCHAR drqNum; /* DRQ Index (0,5,6,7) */
uCHAR flag1; /* EATA Flags 1 (Byte 9) */
uCHAR flag2; /* EATA Flags 2 (Byte 30) */
@@ -107,23 +107,23 @@ typedef struct {
typedef struct {
uSHORT length; // Remaining length of this
uSHORT drvrHBAnum; // Relative HBA # used by the driver
- uLONG baseAddr; // Base I/O address
+ uINT baseAddr; // Base I/O address
uSHORT blinkState; // Blink LED state (0=Not in blink LED)
uCHAR pciBusNum; // PCI Bus # (Optional)
uCHAR pciDeviceNum; // PCI Device # (Optional)
uSHORT hbaFlags; // Miscellaneous HBA flags
uSHORT Interrupt; // Interrupt set for this device.
# if (defined(_DPT_ARC))
- uLONG baseLength;
+ uINT baseLength;
ADAPTER_OBJECT *AdapterObject;
LARGE_INTEGER DmaLogicalAddress;
PVOID DmaVirtualAddress;
LARGE_INTEGER ReplyLogicalAddress;
PVOID ReplyVirtualAddress;
# else
- uLONG reserved1; // Reserved for future expansion
- uLONG reserved2; // Reserved for future expansion
- uLONG reserved3; // Reserved for future expansion
+ uINT reserved1; // Reserved for future expansion
+ uINT reserved2; // Reserved for future expansion
+ uINT reserved3; // Reserved for future expansion
# endif
} drvrHBAinfo_S;
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
index 94bc894d120..72c8992fdf2 100644
--- a/drivers/scsi/dpt/dptsig.h
+++ b/drivers/scsi/dpt/dptsig.h
@@ -33,11 +33,7 @@
/* to make sure we are talking the same size under all OS's */
typedef unsigned char sigBYTE;
typedef unsigned short sigWORD;
-#if (defined(_MULTI_DATAMODEL) && defined(sun) && !defined(_ILP32))
-typedef uint32_t sigLONG;
-#else
-typedef unsigned long sigLONG;
-#endif
+typedef unsigned int sigINT;
/*
* use sigWORDLittleEndian for:
@@ -300,7 +296,7 @@ typedef struct dpt_sig {
sigBYTE dsFiletype; /* type of file */
sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
sigBYTE dsOEM; /* OEM file was created for */
- sigLONG dsOS; /* which Operating systems */
+ sigINT dsOS; /* which Operating systems */
sigWORD dsCapabilities; /* RAID levels, etc. */
sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
sigWORD dsAdapterSupp; /* DPT adapter families supported */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
index d23b70c8c76..a90c4cb8ea8 100644
--- a/drivers/scsi/dpt/sys_info.h
+++ b/drivers/scsi/dpt/sys_info.h
@@ -145,8 +145,8 @@
uCHAR smartROMRevision;
uSHORT flags; /* See bit definitions above */
uSHORT conventionalMemSize; /* in KB */
- uLONG extendedMemSize; /* in KB */
- uLONG osType; /* Same as DPTSIG's definition */
+ uINT extendedMemSize; /* in KB */
+ uINT osType; /* Same as DPTSIG's definition */
uCHAR osMajorVersion;
uCHAR osMinorVersion; /* The OS version */
uCHAR osRevision;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index ac92ac143b4..0fb5bf4c43a 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -29,11 +29,6 @@
/*#define DEBUG 1 */
/*#define UARTDELAY 1 */
-/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
- high pages. Keep the macro around because of the broken unmerged ia64 tree */
-
-#define ADDR32 (0)
-
#include <linux/module.h>
MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
@@ -108,27 +103,28 @@ static dpt_sig_S DPTI_sig = {
static DEFINE_MUTEX(adpt_configuration_lock);
-static struct i2o_sys_tbl *sys_tbl = NULL;
-static int sys_tbl_ind = 0;
-static int sys_tbl_len = 0;
+static struct i2o_sys_tbl *sys_tbl;
+static dma_addr_t sys_tbl_pa;
+static int sys_tbl_ind;
+static int sys_tbl_len;
static adpt_hba* hba_chain = NULL;
static int hba_count = 0;
+static struct class *adpt_sysfs_class;
+
+#ifdef CONFIG_COMPAT
+static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
+#endif
+
static const struct file_operations adpt_fops = {
.ioctl = adpt_ioctl,
.open = adpt_open,
- .release = adpt_close
-};
-
-#ifdef REBOOT_NOTIFIER
-static struct notifier_block adpt_reboot_notifier =
-{
- adpt_reboot_event,
- NULL,
- 0
-};
+ .release = adpt_close,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_adpt_ioctl,
#endif
+};
/* Structures and definitions for synchronous message posting.
* See adpt_i2o_post_wait() for description
@@ -151,6 +147,21 @@ static DEFINE_SPINLOCK(adpt_post_wait_lock);
*============================================================================
*/
+static inline int dpt_dma64(adpt_hba *pHba)
+{
+ return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
+}
+
+static inline u32 dma_high(dma_addr_t addr)
+{
+ return upper_32_bits(addr);
+}
+
+static inline u32 dma_low(dma_addr_t addr)
+{
+ return (u32)addr;
+}
+
static u8 adpt_read_blink_led(adpt_hba* host)
{
if (host->FwDebugBLEDflag_P) {
@@ -178,8 +189,6 @@ static int adpt_detect(struct scsi_host_template* sht)
struct pci_dev *pDev = NULL;
adpt_hba* pHba;
- adpt_init();
-
PINFO("Detecting Adaptec I2O RAID controllers...\n");
/* search for all Adatpec I2O RAID cards */
@@ -247,13 +256,29 @@ rebuild_sys_tab:
adpt_inquiry(pHba);
}
+ adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
+ if (IS_ERR(adpt_sysfs_class)) {
+ printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
+ adpt_sysfs_class = NULL;
+ }
+
for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if( adpt_scsi_register(pHba,sht) < 0){
+ if (adpt_scsi_host_alloc(pHba, sht) < 0){
adpt_i2o_delete_hba(pHba);
continue;
}
pHba->initialized = TRUE;
pHba->state &= ~DPTI_STATE_RESET;
+ if (adpt_sysfs_class) {
+ struct device *dev = device_create(adpt_sysfs_class,
+ NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit),
+ "dpti%d", pHba->unit);
+ if (IS_ERR(dev)) {
+ printk(KERN_WARNING"dpti%d: unable to "
+ "create device in dpt_i2o class\n",
+ pHba->unit);
+ }
+ }
}
// Register our control device node
@@ -282,7 +307,7 @@ static int adpt_release(struct Scsi_Host *host)
static void adpt_inquiry(adpt_hba* pHba)
{
- u32 msg[14];
+ u32 msg[17];
u32 *mptr;
u32 *lenptr;
int direction;
@@ -290,11 +315,12 @@ static void adpt_inquiry(adpt_hba* pHba)
u32 len;
u32 reqlen;
u8* buf;
+ dma_addr_t addr;
u8 scb[16];
s32 rcode;
memset(msg, 0, sizeof(msg));
- buf = kmalloc(80,GFP_KERNEL|ADDR32);
+ buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
if(!buf){
printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
return;
@@ -305,7 +331,10 @@ static void adpt_inquiry(adpt_hba* pHba)
direction = 0x00000000;
scsidir =0x40000000; // DATA IN (iop<--dev)
- reqlen = 14; // SINGLE SGE
+ if (dpt_dma64(pHba))
+ reqlen = 17; // SINGLE SGE, 64 bit
+ else
+ reqlen = 14; // SINGLE SGE, 32 bit
/* Stick the headers on */
msg[0] = reqlen<<16 | SGL_OFFSET_12;
msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
@@ -338,8 +367,16 @@ static void adpt_inquiry(adpt_hba* pHba)
/* Now fill in the SGList and command */
*lenptr = len;
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = virt_to_bus(buf);
+ if (dpt_dma64(pHba)) {
+ *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
+ *mptr++ = 1 << PAGE_SHIFT;
+ *mptr++ = 0xD0000000|direction|len;
+ *mptr++ = dma_low(addr);
+ *mptr++ = dma_high(addr);
+ } else {
+ *mptr++ = 0xD0000000|direction|len;
+ *mptr++ = addr;
+ }
// Send it on it's way
rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
@@ -347,7 +384,7 @@ static void adpt_inquiry(adpt_hba* pHba)
sprintf(pHba->detail, "Adaptec I2O RAID");
printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
if (rcode != -ETIME && rcode != -EINTR)
- kfree(buf);
+ dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
} else {
memset(pHba->detail, 0, sizeof(pHba->detail));
memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
@@ -356,7 +393,7 @@ static void adpt_inquiry(adpt_hba* pHba)
memcpy(&(pHba->detail[40]), " FW: ", 4);
memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
pHba->detail[48] = '\0'; /* precautionary */
- kfree(buf);
+ dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
}
adpt_i2o_status_get(pHba);
return ;
@@ -632,6 +669,91 @@ stop_output:
return len;
}
+/*
+ * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
+ */
+static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
+{
+ return (u32)cmd->serial_number;
+}
+
+/*
+ * Go from a u32 'context' to a struct scsi_cmnd * .
+ * This could probably be made more efficient.
+ */
+static struct scsi_cmnd *
+ adpt_cmd_from_context(adpt_hba * pHba, u32 context)
+{
+ struct scsi_cmnd * cmd;
+ struct scsi_device * d;
+
+ if (context == 0)
+ return NULL;
+
+ spin_unlock(pHba->host->host_lock);
+ shost_for_each_device(d, pHba->host) {
+ unsigned long flags;
+ spin_lock_irqsave(&d->list_lock, flags);
+ list_for_each_entry(cmd, &d->cmd_list, list) {
+ if (((u32)cmd->serial_number == context)) {
+ spin_unlock_irqrestore(&d->list_lock, flags);
+ scsi_device_put(d);
+ spin_lock(pHba->host->host_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_irqrestore(&d->list_lock, flags);
+ }
+ spin_lock(pHba->host->host_lock);
+
+ return NULL;
+}
+
+/*
+ * Turn a pointer to ioctl reply data into an u32 'context'
+ */
+static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
+{
+#if BITS_PER_LONG == 32
+ return (u32)(unsigned long)reply;
+#else
+ ulong flags = 0;
+ u32 nr, i;
+
+ spin_lock_irqsave(pHba->host->host_lock, flags);
+ nr = ARRAY_SIZE(pHba->ioctl_reply_context);
+ for (i = 0; i < nr; i++) {
+ if (pHba->ioctl_reply_context[i] == NULL) {
+ pHba->ioctl_reply_context[i] = reply;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ if (i >= nr) {
+ kfree (reply);
+ printk(KERN_WARNING"%s: Too many outstanding "
+ "ioctl commands\n", pHba->name);
+ return (u32)-1;
+ }
+
+ return i;
+#endif
+}
+
+/*
+ * Go from an u32 'context' to a pointer to ioctl reply data.
+ */
+static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
+{
+#if BITS_PER_LONG == 32
+ return (void *)(unsigned long)context;
+#else
+ void *p = pHba->ioctl_reply_context[context];
+ pHba->ioctl_reply_context[context] = NULL;
+
+ return p;
+#endif
+}
/*===========================================================================
* Error Handling routines
@@ -660,7 +782,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
msg[2] = 0;
msg[3]= 0;
- msg[4] = (u32)cmd;
+ msg[4] = adpt_cmd_to_context(cmd);
if (pHba->host)
spin_lock_irq(pHba->host->host_lock);
rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -861,27 +983,6 @@ static void adpt_i2o_sys_shutdown(void)
printk(KERN_INFO "Adaptec I2O controllers down.\n");
}
-/*
- * reboot/shutdown notification.
- *
- * - Quiesce each IOP in the system
- *
- */
-
-#ifdef REBOOT_NOTIFIER
-static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
-{
-
- if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
- return NOTIFY_DONE;
-
- adpt_i2o_sys_shutdown();
-
- return NOTIFY_DONE;
-}
-#endif
-
-
static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
{
@@ -893,6 +994,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
u32 hba_map1_area_size = 0;
void __iomem *base_addr_virt = NULL;
void __iomem *msg_addr_virt = NULL;
+ int dma64 = 0;
int raptorFlag = FALSE;
@@ -906,9 +1008,21 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
}
pci_set_master(pDev);
- if (pci_set_dma_mask(pDev, DMA_32BIT_MASK))
+
+ /*
+ * See if we should enable dma64 mode.
+ */
+ if (sizeof(dma_addr_t) > 4 &&
+ pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
+ if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
+ dma64 = 1;
+ }
+ if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
return -EINVAL;
+ /* adapter only supports message blocks below 4GB */
+ pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
+
base_addr0_phys = pci_resource_start(pDev,0);
hba_map0_area_size = pci_resource_len(pDev,0);
@@ -929,6 +1043,25 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
raptorFlag = TRUE;
}
+#if BITS_PER_LONG == 64
+ /*
+ * The original Adaptec 64 bit driver has this comment here:
+ * "x86_64 machines need more optimal mappings"
+ *
+ * I assume some HBAs report ridiculously large mappings
+ * and we need to limit them on platforms with IOMMUs.
+ */
+ if (raptorFlag == TRUE) {
+ if (hba_map0_area_size > 128)
+ hba_map0_area_size = 128;
+ if (hba_map1_area_size > 524288)
+ hba_map1_area_size = 524288;
+ } else {
+ if (hba_map0_area_size > 524288)
+ hba_map0_area_size = 524288;
+ }
+#endif
+
base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
if (!base_addr_virt) {
pci_release_regions(pDev);
@@ -991,16 +1124,22 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
pHba->state = DPTI_STATE_RESET;
pHba->pDev = pDev;
pHba->devices = NULL;
+ pHba->dma64 = dma64;
// Initializing the spinlocks
spin_lock_init(&pHba->state_lock);
spin_lock_init(&adpt_post_wait_lock);
if(raptorFlag == 0){
- printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
- hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
+ printk(KERN_INFO "Adaptec I2O RAID controller"
+ " %d at %p size=%x irq=%d%s\n",
+ hba_count-1, base_addr_virt,
+ hba_map0_area_size, pDev->irq,
+ dma64 ? " (64-bit DMA)" : "");
} else {
- printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
+ printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
+ hba_count-1, pDev->irq,
+ dma64 ? " (64-bit DMA)" : "");
printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
}
@@ -1053,10 +1192,26 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
if(pHba->msg_addr_virt != pHba->base_addr_virt){
iounmap(pHba->msg_addr_virt);
}
- kfree(pHba->hrt);
- kfree(pHba->lct);
- kfree(pHba->status_block);
- kfree(pHba->reply_pool);
+ if(pHba->FwDebugBuffer_P)
+ iounmap(pHba->FwDebugBuffer_P);
+ if(pHba->hrt) {
+ dma_free_coherent(&pHba->pDev->dev,
+ pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
+ pHba->hrt, pHba->hrt_pa);
+ }
+ if(pHba->lct) {
+ dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
+ pHba->lct, pHba->lct_pa);
+ }
+ if(pHba->status_block) {
+ dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
+ pHba->status_block, pHba->status_block_pa);
+ }
+ if(pHba->reply_pool) {
+ dma_free_coherent(&pHba->pDev->dev,
+ pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
+ pHba->reply_pool, pHba->reply_pool_pa);
+ }
for(d = pHba->devices; d ; d = next){
next = d->next;
@@ -1075,23 +1230,19 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
pci_dev_put(pHba->pDev);
kfree(pHba);
+ if (adpt_sysfs_class)
+ device_destroy(adpt_sysfs_class,
+ MKDEV(DPTI_I2O_MAJOR, pHba->unit));
+
if(hba_count <= 0){
unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
+ if (adpt_sysfs_class) {
+ class_destroy(adpt_sysfs_class);
+ adpt_sysfs_class = NULL;
+ }
}
}
-
-static int adpt_init(void)
-{
- printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
-#ifdef REBOOT_NOTIFIER
- register_reboot_notifier(&adpt_reboot_notifier);
-#endif
-
- return 0;
-}
-
-
static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
{
struct adpt_device* d;
@@ -1283,6 +1434,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
{
u32 msg[8];
u8* status;
+ dma_addr_t addr;
u32 m = EMPTY_QUEUE ;
ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
@@ -1305,12 +1457,13 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
schedule_timeout_uninterruptible(1);
} while (m == EMPTY_QUEUE);
- status = kzalloc(4, GFP_KERNEL|ADDR32);
+ status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
if(status == NULL) {
adpt_send_nop(pHba, m);
printk(KERN_ERR"IOP reset failed - no free memory.\n");
return -ENOMEM;
}
+ memset(status,0,4);
msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
@@ -1318,8 +1471,8 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
msg[3]=0;
msg[4]=0;
msg[5]=0;
- msg[6]=virt_to_bus(status);
- msg[7]=0;
+ msg[6]=dma_low(addr);
+ msg[7]=dma_high(addr);
memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
wmb();
@@ -1329,7 +1482,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
while(*status == 0){
if(time_after(jiffies,timeout)){
printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
- kfree(status);
+ /* We lose 4 bytes of "status" here, but we cannot
+ free these because controller may awake and corrupt
+ those bytes at any time */
+ /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
return -ETIMEDOUT;
}
rmb();
@@ -1348,6 +1504,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
}
if(time_after(jiffies,timeout)){
printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
+ /* We lose 4 bytes of "status" here, but we
+ cannot free these because controller may
+ awake and corrupt those bytes at any time */
+ /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
@@ -1364,7 +1524,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
PDEBUG("%s: Reset completed.\n", pHba->name);
}
- kfree(status);
+ dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
#ifdef UARTDELAY
// This delay is to allow someone attached to the card through the debug UART to
// set up the dump levels that they want before the rest of the initialization sequence
@@ -1636,6 +1796,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
u32 i = 0;
u32 rcode = 0;
void *p = NULL;
+ dma_addr_t addr;
ulong flags = 0;
memset(&msg, 0, MAX_MESSAGE_SIZE*4);
@@ -1668,10 +1829,13 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
}
sg_offset = (msg[0]>>4)&0xf;
msg[2] = 0x40000000; // IOCTL context
- msg[3] = (u32)reply;
+ msg[3] = adpt_ioctl_to_context(pHba, reply);
+ if (msg[3] == (u32)-1)
+ return -EBUSY;
+
memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
if(sg_offset) {
- // TODO 64bit fix
+ // TODO add 64 bit API
struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
if (sg_count > pHba->sg_tablesize){
@@ -1690,7 +1854,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
}
sg_size = sg[i].flag_count & 0xffffff;
/* Allocate memory for the transfer */
- p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
+ p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
if(!p) {
printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
pHba->name,sg_size,i,sg_count);
@@ -1700,15 +1864,15 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
/* Copy in the user's SG buffer if necessary */
if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
- // TODO 64bit fix
- if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
+ // sg_simple_element API is 32 bit
+ if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
rcode = -EFAULT;
goto cleanup;
}
}
- //TODO 64bit fix
- sg[i].addr_bus = (u32)virt_to_bus(p);
+ /* sg_simple_element API is 32 bit, but addr < 4GB */
+ sg[i].addr_bus = addr;
}
}
@@ -1736,7 +1900,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
if(sg_offset) {
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
- // TODO 64bit fix
+ // TODO add 64 bit API
struct sg_simple_element* sg;
int sg_size;
@@ -1756,14 +1920,14 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
}
sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
- // TODO 64bit fix
+ // TODO add 64 bit API
sg = (struct sg_simple_element*)(msg + sg_offset);
for (j = 0; j < sg_count; j++) {
/* Copy out the SG list to user's buffer if necessary */
if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
sg_size = sg[j].flag_count & 0xffffff;
- // TODO 64bit fix
- if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
+ // sg_simple_element API is 32 bit
+ if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
rcode = -EFAULT;
goto cleanup;
@@ -1787,12 +1951,17 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
cleanup:
- if (rcode != -ETIME && rcode != -EINTR)
+ if (rcode != -ETIME && rcode != -EINTR) {
+ struct sg_simple_element *sg =
+ (struct sg_simple_element*) (msg +sg_offset);
kfree (reply);
- while(sg_index) {
- if(sg_list[--sg_index]) {
- if (rcode != -ETIME && rcode != -EINTR)
- kfree(sg_list[sg_index]);
+ while(sg_index) {
+ if(sg_list[--sg_index]) {
+ dma_free_coherent(&pHba->pDev->dev,
+ sg[sg_index].flag_count & 0xffffff,
+ sg_list[sg_index],
+ sg[sg_index].addr_bus);
+ }
}
}
return rcode;
@@ -1978,6 +2147,38 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
return error;
}
+#ifdef CONFIG_COMPAT
+static long compat_adpt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode;
+ long ret;
+
+ inode = file->f_dentry->d_inode;
+
+ lock_kernel();
+
+ switch(cmd) {
+ case DPT_SIGNATURE:
+ case I2OUSRCMD:
+ case DPT_CTRLINFO:
+ case DPT_SYSINFO:
+ case DPT_BLINKLED:
+ case I2ORESETCMD:
+ case I2ORESCANCMD:
+ case (DPT_TARGET_BUSY & 0xFFFF):
+ case DPT_TARGET_BUSY:
+ ret = adpt_ioctl(inode, file, cmd, arg);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ unlock_kernel();
+
+ return ret;
+}
+#endif
static irqreturn_t adpt_isr(int irq, void *dev_id)
{
@@ -2009,7 +2210,16 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
goto out;
}
}
- reply = bus_to_virt(m);
+ if (pHba->reply_pool_pa <= m &&
+ m < pHba->reply_pool_pa +
+ (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
+ reply = (u8 *)pHba->reply_pool +
+ (m - pHba->reply_pool_pa);
+ } else {
+ /* Ick, we should *never* be here */
+ printk(KERN_ERR "dpti: reply frame not from pool\n");
+ reply = (u8 *)bus_to_virt(m);
+ }
if (readl(reply) & MSG_FAIL) {
u32 old_m = readl(reply+28);
@@ -2029,7 +2239,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
}
context = readl(reply+8);
if(context & 0x40000000){ // IOCTL
- void *p = (void *)readl(reply+12);
+ void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
if( p != NULL) {
memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
}
@@ -2043,15 +2253,17 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
status = I2O_POST_WAIT_OK;
}
if(!(context & 0x40000000)) {
- cmd = (struct scsi_cmnd*) readl(reply+12);
+ cmd = adpt_cmd_from_context(pHba,
+ readl(reply+12));
if(cmd != NULL) {
printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
}
}
adpt_i2o_post_wait_complete(context, status);
} else { // SCSI message
- cmd = (struct scsi_cmnd*) readl(reply+12);
+ cmd = adpt_cmd_from_context (pHba, readl(reply+12));
if(cmd != NULL){
+ scsi_dma_unmap(cmd);
if(cmd->serial_number != 0) { // If not timedout
adpt_i2o_to_scsi(reply, cmd);
}
@@ -2072,6 +2284,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
int i;
u32 msg[MAX_MESSAGE_SIZE];
u32* mptr;
+ u32* lptr;
u32 *lenptr;
int direction;
int scsidir;
@@ -2079,6 +2292,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
u32 len;
u32 reqlen;
s32 rcode;
+ dma_addr_t addr;
memset(msg, 0 , sizeof(msg));
len = scsi_bufflen(cmd);
@@ -2118,7 +2332,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
// I2O_CMD_SCSI_EXEC
msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
msg[2] = 0;
- msg[3] = (u32)cmd; /* We want the SCSI control block back */
+ msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
// Our cards use the transaction context as the tag for queueing
// Adaptec/DPT Private stuff
msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
@@ -2136,7 +2350,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
memcpy(mptr, cmd->cmnd, cmd->cmd_len);
mptr+=4;
lenptr=mptr++; /* Remember me - fill in when we know */
- reqlen = 14; // SINGLE SGE
+ if (dpt_dma64(pHba)) {
+ reqlen = 16; // SINGLE SGE
+ *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
+ *mptr++ = 1 << PAGE_SHIFT;
+ } else {
+ reqlen = 14; // SINGLE SGE
+ }
/* Now fill in the SGList and command */
nseg = scsi_dma_map(cmd);
@@ -2146,12 +2366,16 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
len = 0;
scsi_for_each_sg(cmd, sg, nseg, i) {
+ lptr = mptr;
*mptr++ = direction|0x10000000|sg_dma_len(sg);
len+=sg_dma_len(sg);
- *mptr++ = sg_dma_address(sg);
+ addr = sg_dma_address(sg);
+ *mptr++ = dma_low(addr);
+ if (dpt_dma64(pHba))
+ *mptr++ = dma_high(addr);
/* Make this an end of list */
if (i == nseg - 1)
- mptr[-2] = direction|0xD0000000|sg_dma_len(sg);
+ *lptr = direction|0xD0000000|sg_dma_len(sg);
}
reqlen = mptr - msg;
*lenptr = len;
@@ -2177,13 +2401,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
}
-static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
+static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
{
- struct Scsi_Host *host = NULL;
+ struct Scsi_Host *host;
- host = scsi_register(sht, sizeof(adpt_hba*));
+ host = scsi_host_alloc(sht, sizeof(adpt_hba*));
if (host == NULL) {
- printk ("%s: scsi_register returned NULL\n",pHba->name);
+ printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
return -1;
}
host->hostdata[0] = (unsigned long)pHba;
@@ -2200,7 +2424,7 @@ static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
host->max_lun = 256;
host->max_channel = pHba->top_scsi_channel + 1;
host->cmd_per_lun = 1;
- host->unique_id = (uint) pHba;
+ host->unique_id = (u32)sys_tbl_pa + pHba->unit;
host->sg_tablesize = pHba->sg_tablesize;
host->can_queue = pHba->post_fifo_size;
@@ -2640,11 +2864,10 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
{
u8 *status;
+ dma_addr_t addr;
u32 __iomem *msg = NULL;
int i;
ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
- u32* ptr;
- u32 outbound_frame; // This had to be a 32 bit address
u32 m;
do {
@@ -2663,13 +2886,14 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
- status = kzalloc(4, GFP_KERNEL|ADDR32);
+ status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
if (!status) {
adpt_send_nop(pHba, m);
printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
pHba->name);
return -ENOMEM;
}
+ memset(status, 0, 4);
writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
@@ -2678,7 +2902,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
writel(4096, &msg[4]); /* Host page frame size */
writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
- writel(virt_to_bus(status), &msg[7]);
+ writel((u32)addr, &msg[7]);
writel(m, pHba->post_port);
wmb();
@@ -2693,6 +2917,10 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
rmb();
if(time_after(jiffies,timeout)){
printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
+ /* We lose 4 bytes of "status" here, but we
+ cannot free these because controller may
+ awake and corrupt those bytes at any time */
+ /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
@@ -2701,25 +2929,30 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
// If the command was successful, fill the fifo with our reply
// message packets
if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
- kfree(status);
+ dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
return -2;
}
- kfree(status);
+ dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
- kfree(pHba->reply_pool);
+ if(pHba->reply_pool != NULL) {
+ dma_free_coherent(&pHba->pDev->dev,
+ pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
+ pHba->reply_pool, pHba->reply_pool_pa);
+ }
- pHba->reply_pool = kzalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
+ pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
+ pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
+ &pHba->reply_pool_pa, GFP_KERNEL);
if (!pHba->reply_pool) {
printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
return -ENOMEM;
}
+ memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
- ptr = pHba->reply_pool;
for(i = 0; i < pHba->reply_fifo_size; i++) {
- outbound_frame = (u32)virt_to_bus(ptr);
- writel(outbound_frame, pHba->reply_port);
+ writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
+ pHba->reply_port);
wmb();
- ptr += REPLY_FRAME_SIZE;
}
adpt_i2o_status_get(pHba);
return 0;
@@ -2743,11 +2976,11 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
u32 m;
u32 __iomem *msg;
u8 *status_block=NULL;
- ulong status_block_bus;
if(pHba->status_block == NULL) {
- pHba->status_block = (i2o_status_block*)
- kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
+ pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
+ sizeof(i2o_status_block),
+ &pHba->status_block_pa, GFP_KERNEL);
if(pHba->status_block == NULL) {
printk(KERN_ERR
"dpti%d: Get Status Block failed; Out of memory. \n",
@@ -2757,7 +2990,6 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
}
memset(pHba->status_block, 0, sizeof(i2o_status_block));
status_block = (u8*)(pHba->status_block);
- status_block_bus = virt_to_bus(pHba->status_block);
timeout = jiffies+TMOUT_GETSTATUS*HZ;
do {
rmb();
@@ -2782,8 +3014,8 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
writel(0, &msg[3]);
writel(0, &msg[4]);
writel(0, &msg[5]);
- writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
- writel(0, &msg[7]);
+ writel( dma_low(pHba->status_block_pa), &msg[6]);
+ writel( dma_high(pHba->status_block_pa), &msg[7]);
writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
//post message
@@ -2812,7 +3044,17 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
}
// Calculate the Scatter Gather list size
- pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
+ if (dpt_dma64(pHba)) {
+ pHba->sg_tablesize
+ = ((pHba->status_block->inbound_frame_size * 4
+ - 14 * sizeof(u32))
+ / (sizeof(struct sg_simple_element) + sizeof(u32)));
+ } else {
+ pHba->sg_tablesize
+ = ((pHba->status_block->inbound_frame_size * 4
+ - 12 * sizeof(u32))
+ / sizeof(struct sg_simple_element));
+ }
if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
pHba->sg_tablesize = SG_LIST_ELEMENTS;
}
@@ -2863,7 +3105,9 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
}
do {
if (pHba->lct == NULL) {
- pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
+ pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
+ pHba->lct_size, &pHba->lct_pa,
+ GFP_KERNEL);
if(pHba->lct == NULL) {
printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
pHba->name);
@@ -2879,7 +3123,7 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
msg[4] = 0xFFFFFFFF; /* All devices */
msg[5] = 0x00000000; /* Report now */
msg[6] = 0xD0000000|pHba->lct_size;
- msg[7] = virt_to_bus(pHba->lct);
+ msg[7] = (u32)pHba->lct_pa;
if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
@@ -2890,7 +3134,8 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
if ((pHba->lct->table_size << 2) > pHba->lct_size) {
pHba->lct_size = pHba->lct->table_size << 2;
- kfree(pHba->lct);
+ dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
+ pHba->lct, pHba->lct_pa);
pHba->lct = NULL;
}
} while (pHba->lct == NULL);
@@ -2901,13 +3146,19 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
pHba->FwDebugBufferSize = buf[1];
- pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
- pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
- pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
- pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
- pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
- pHba->FwDebugBuffer_P += buf[2];
- pHba->FwDebugFlags = 0;
+ pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
+ pHba->FwDebugBufferSize);
+ if (pHba->FwDebugBuffer_P) {
+ pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
+ FW_DEBUG_FLAGS_OFFSET;
+ pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
+ FW_DEBUG_BLED_OFFSET;
+ pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
+ pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
+ FW_DEBUG_STR_LENGTH_OFFSET;
+ pHba->FwDebugBuffer_P += buf[2];
+ pHba->FwDebugFlags = 0;
+ }
}
return 0;
@@ -2915,25 +3166,30 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
static int adpt_i2o_build_sys_table(void)
{
- adpt_hba* pHba = NULL;
+ adpt_hba* pHba = hba_chain;
int count = 0;
+ if (sys_tbl)
+ dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
+ sys_tbl, sys_tbl_pa);
+
sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
(hba_count) * sizeof(struct i2o_sys_tbl_entry);
- kfree(sys_tbl);
-
- sys_tbl = kzalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
+ sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
+ sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
if (!sys_tbl) {
printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
return -ENOMEM;
}
+ memset(sys_tbl, 0, sys_tbl_len);
sys_tbl->num_entries = hba_count;
sys_tbl->version = I2OVERSION;
sys_tbl->change_ind = sys_tbl_ind++;
for(pHba = hba_chain; pHba; pHba = pHba->next) {
+ u64 addr;
// Get updated Status Block so we have the latest information
if (adpt_i2o_status_get(pHba)) {
sys_tbl->num_entries--;
@@ -2949,8 +3205,9 @@ static int adpt_i2o_build_sys_table(void)
sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
- sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
- sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
+ addr = pHba->base_addr_phys + 0x40;
+ sys_tbl->iops[count].inbound_low = dma_low(addr);
+ sys_tbl->iops[count].inbound_high = dma_high(addr);
count++;
}
@@ -3086,7 +3343,8 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
do {
if (pHba->hrt == NULL) {
- pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
+ pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
+ size, &pHba->hrt_pa, GFP_KERNEL);
if (pHba->hrt == NULL) {
printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
return -ENOMEM;
@@ -3098,7 +3356,7 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
msg[2]= 0;
msg[3]= 0;
msg[4]= (0xD0000000 | size); /* Simple transaction */
- msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
+ msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
@@ -3106,8 +3364,10 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
}
if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
- size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
- kfree(pHba->hrt);
+ int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
+ dma_free_coherent(&pHba->pDev->dev, size,
+ pHba->hrt, pHba->hrt_pa);
+ size = newsize;
pHba->hrt = NULL;
}
} while(pHba->hrt == NULL);
@@ -3121,33 +3381,54 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
int group, int field, void *buf, int buflen)
{
u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
- u8 *resblk;
+ u8 *opblk_va;
+ dma_addr_t opblk_pa;
+ u8 *resblk_va;
+ dma_addr_t resblk_pa;
int size;
/* 8 bytes for header */
- resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
- if (resblk == NULL) {
+ resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
+ sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
+ if (resblk_va == NULL) {
printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
return -ENOMEM;
}
+ opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
+ sizeof(opblk), &opblk_pa, GFP_KERNEL);
+ if (opblk_va == NULL) {
+ dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
+ resblk_va, resblk_pa);
+ printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
+ pHba->name);
+ return -ENOMEM;
+ }
if (field == -1) /* whole group */
opblk[4] = -1;
+ memcpy(opblk_va, opblk, sizeof(opblk));
size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
- opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
+ opblk_va, opblk_pa, sizeof(opblk),
+ resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
+ dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
if (size == -ETIME) {
+ dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
+ resblk_va, resblk_pa);
printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
return -ETIME;
} else if (size == -EINTR) {
+ dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
+ resblk_va, resblk_pa);
printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
return -EINTR;
}
- memcpy(buf, resblk+8, buflen); /* cut off header */
+ memcpy(buf, resblk_va+8, buflen); /* cut off header */
- kfree(resblk);
+ dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
+ resblk_va, resblk_pa);
if (size < 0)
return size;
@@ -3164,10 +3445,11 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
* ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
*/
static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk, int oplen, void *resblk, int reslen)
+ void *opblk_va, dma_addr_t opblk_pa, int oplen,
+ void *resblk_va, dma_addr_t resblk_pa, int reslen)
{
u32 msg[9];
- u32 *res = (u32 *)resblk;
+ u32 *res = (u32 *)resblk_va;
int wait_status;
msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
@@ -3176,12 +3458,12 @@ static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
msg[3] = 0;
msg[4] = 0;
msg[5] = 0x54000000 | oplen; /* OperationBlock */
- msg[6] = virt_to_bus(opblk);
+ msg[6] = (u32)opblk_pa;
msg[7] = 0xD0000000 | reslen; /* ResultBlock */
- msg[8] = virt_to_bus(resblk);
+ msg[8] = (u32)resblk_pa;
if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
- printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
+ printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
return wait_status; /* -DetailedStatus */
}
@@ -3284,7 +3566,7 @@ static int adpt_i2o_systab_send(adpt_hba* pHba)
* Private i/o space declaration
*/
msg[6] = 0x54000000 | sys_tbl_len;
- msg[7] = virt_to_phys(sys_tbl);
+ msg[7] = (u32)sys_tbl_pa;
msg[8] = 0x54000000 | 0;
msg[9] = 0;
msg[10] = 0xD4000000 | 0;
@@ -3323,11 +3605,10 @@ static static void adpt_delay(int millisec)
#endif
static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
.name = "dpt_i2o",
.proc_name = "dpt_i2o",
.proc_info = adpt_proc_info,
- .detect = adpt_detect,
- .release = adpt_release,
.info = adpt_info,
.queuecommand = adpt_queue,
.eh_abort_handler = adpt_abort,
@@ -3341,5 +3622,48 @@ static struct scsi_host_template driver_template = {
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
-#include "scsi_module.c"
+
+static int __init adpt_init(void)
+{
+ int error;
+ adpt_hba *pHba, *next;
+
+ printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
+
+ error = adpt_detect(&driver_template);
+ if (error < 0)
+ return error;
+ if (hba_chain == NULL)
+ return -ENODEV;
+
+ for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ error = scsi_add_host(pHba->host, &pHba->pDev->dev);
+ if (error)
+ goto fail;
+ scsi_scan_host(pHba->host);
+ }
+ return 0;
+fail:
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
+ scsi_remove_host(pHba->host);
+ }
+ return error;
+}
+
+static void __exit adpt_exit(void)
+{
+ adpt_hba *pHba, *next;
+
+ for (pHba = hba_chain; pHba; pHba = pHba->next)
+ scsi_remove_host(pHba->host);
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
+ adpt_release(pHba->host);
+ }
+}
+
+module_init(adpt_init);
+module_exit(adpt_exit);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index fd79068c586..924cd5a5167 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -84,7 +84,6 @@ static int adpt_device_reset(struct scsi_cmnd* cmd);
#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
-//#define REBOOT_NOTIFIER 1
/* Debugging macro from Linux Device Drivers - Rubini */
#undef PDEBUG
#ifdef DEBUG
@@ -229,14 +228,19 @@ typedef struct _adpt_hba {
u32 post_fifo_size;
u32 reply_fifo_size;
u32* reply_pool;
+ dma_addr_t reply_pool_pa;
u32 sg_tablesize; // Scatter/Gather List Size.
u8 top_scsi_channel;
u8 top_scsi_id;
u8 top_scsi_lun;
+ u8 dma64;
i2o_status_block* status_block;
+ dma_addr_t status_block_pa;
i2o_hrt* hrt;
+ dma_addr_t hrt_pa;
i2o_lct* lct;
+ dma_addr_t lct_pa;
uint lct_size;
struct i2o_device* devices;
struct adpt_channel channel[MAX_CHANNEL];
@@ -249,6 +253,7 @@ typedef struct _adpt_hba {
void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
u32 FwDebugFlags;
+ u32 *ioctl_reply_context[4];
} adpt_hba;
struct sg_simple_element {
@@ -264,9 +269,6 @@ static void adpt_i2o_sys_shutdown(void);
static int adpt_init(void);
static int adpt_i2o_build_sys_table(void);
static irqreturn_t adpt_isr(int irq, void *dev_id);
-#ifdef REBOOT_NOTIFIER
-static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p);
-#endif
static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
@@ -275,7 +277,8 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
static const char *adpt_i2o_get_class_name(int class);
#endif
static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk, int oplen, void *resblk, int reslen);
+ void *opblk, dma_addr_t opblk_pa, int oplen,
+ void *resblk, dma_addr_t resblk_pa, int reslen);
static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
static int adpt_i2o_lct_get(adpt_hba* pHba);
static int adpt_i2o_parse_lct(adpt_hba* pHba);
@@ -289,7 +292,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
-static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht);
+static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
static s32 adpt_hba_reset(adpt_hba* pHba);
static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
static s32 adpt_rescan(adpt_hba* pHba);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c6d6e7c6559..8e2e964af66 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -465,7 +465,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
scp->request = (struct request *)&wait;
scp->timeout_per_command = timeout*HZ;
scp->cmd_len = 12;
- memcpy(scp->cmnd, cmnd, 12);
+ scp->cmnd = cmnd;
cmndinfo.priority = IOCTL_PRI;
cmndinfo.internal_cmd_str = gdtcmd;
cmndinfo.internal_command = 1;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 5b7be1e9841..aaa48e0c8ed 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -763,9 +763,9 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
scp,
host->host_no, scp->device->channel,
scp->device->id, scp->device->lun,
- *((u32 *)&scp->cmnd),
- *((u32 *)&scp->cmnd + 1),
- *((u32 *)&scp->cmnd + 2),
+ ((u32 *)scp->cmnd)[0],
+ ((u32 *)scp->cmnd)[1],
+ ((u32 *)scp->cmnd)[2],
_req->index, _req->req_virt);
scp->result = 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 4a922c57125..ccfd8aca376 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -686,7 +686,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
}
if (cmnd) {
- cmnd->result = rsp->status;
+ cmnd->result |= rsp->status;
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
memcpy(cmnd->sense_buffer,
rsp->data,
@@ -730,6 +730,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
u16 lun = lun_from_dev(cmnd->device);
u8 out_fmt, in_fmt;
+ cmnd->result = (DID_OK << 16);
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -738,7 +739,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
srp_cmd = &evt_struct->iu.srp.cmd;
memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD;
- memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
+ memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
srp_cmd->lun = ((u64) lun) << 48;
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
@@ -1347,6 +1348,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
del_timer(&evt_struct->timer);
+ if (crq->status != VIOSRP_OK && evt_struct->cmnd)
+ evt_struct->cmnd->result = DID_ERROR << 16;
if (evt_struct->done)
evt_struct->done(evt_struct);
else
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 90f1a61283a..4c4aadb3e40 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -59,6 +59,15 @@ enum viosrp_crq_formats {
VIOSRP_INLINE_FORMAT = 0x07
};
+enum viosrp_crq_status {
+ VIOSRP_OK = 0x0,
+ VIOSRP_NONRECOVERABLE_ERR = 0x1,
+ VIOSRP_VIOLATES_MAX_XFER = 0x2,
+ VIOSRP_PARTNER_PANIC = 0x3,
+ VIOSRP_DEVICE_BUSY = 0x8,
+ VIOSRP_ADAPTER_FAIL = 0x10
+};
+
struct viosrp_crq {
u8 valid; /* used by RPA */
u8 format; /* SCSI vs out-of-band */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index dbae3fdb850..e3f739776ba 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2590,7 +2590,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
cblk->hastat = 0;
cblk->tastat = 0;
/* Command the command */
- memcpy(&cblk->cdb[0], &cmnd->cmnd, cmnd->cmd_len);
+ memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len);
/* Set up tags */
if (cmnd->device->tagged_supported) { /* Tag Support */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index de5ae6a6502..999e91ea745 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2791,7 +2791,7 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
static struct device_attribute ipr_ioa_state_attr = {
.attr = {
- .name = "state",
+ .name = "online_state",
.mode = S_IRUGO | S_IWUSR,
},
.show = ipr_show_adapter_state,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 820f91fb63b..70a0f11f48b 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -3168,6 +3168,23 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
+ /*
+ * Newer firmware on Dell CERC expect a different
+ * random deletion handling, so disable it.
+ */
+ if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
+ adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
+ adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
+ (adapter->fw_version[0] > '6' ||
+ (adapter->fw_version[0] == '6' &&
+ adapter->fw_version[2] > '6') ||
+ (adapter->fw_version[0] == '6'
+ && adapter->fw_version[2] == '6'
+ && adapter->fw_version[3] > '1'))) {
+ con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
+ return 0;
+ }
mbox = (mbox_t *)raw_mbox;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 626459d1e90..c1d86d961a9 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -88,6 +88,7 @@
#define PCI_SUBSYS_ID_PERC3_QC 0x0471
#define PCI_SUBSYS_ID_PERC3_DC 0x0493
#define PCI_SUBSYS_ID_PERC3_SC 0x0475
+#define PCI_SUBSYS_ID_CERC_ATA100_4CH 0x0511
#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index b937e9cddb2..7d84c8bbcf3 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.c
- * Version : v00.00.03.16-rc1
+ * Version : v00.00.03.20-rc1
*
* Authors:
* (email-id : megaraidlinux@lsi.com)
@@ -2650,12 +2650,13 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
return;
}
+#ifdef CONFIG_PM
/**
* megasas_suspend - driver suspend entry point
* @pdev: PCI device structure
* @state: PCI power state to suspend routine
*/
-static int __devinit
+static int
megasas_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host;
@@ -2687,7 +2688,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
* megasas_resume- driver resume entry point
* @pdev: PCI device structure
*/
-static int __devinit
+static int
megasas_resume(struct pci_dev *pdev)
{
int rval;
@@ -2782,12 +2783,16 @@ fail_ready_state:
return -ENODEV;
}
+#else
+#define megasas_suspend NULL
+#define megasas_resume NULL
+#endif
/**
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
*/
-static void megasas_detach_one(struct pci_dev *pdev)
+static void __devexit megasas_detach_one(struct pci_dev *pdev)
{
int i;
struct Scsi_Host *host;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3a997eb457b..b0c41e67170 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.03.16-rc1"
-#define MEGASAS_RELDATE "Nov. 07, 2007"
-#define MEGASAS_EXT_VERSION "Thu. Nov. 07 10:09:32 PDT 2007"
+#define MEGASAS_VERSION "00.00.03.20-rc1"
+#define MEGASAS_RELDATE "March 10, 2008"
+#define MEGASAS_EXT_VERSION "Mon. March 10 11:02:31 PDT 2008"
/*
* Device IDs
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 09ab3eac1c1..fa060932d2b 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2858,7 +2858,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
/* Load SCSI command packet. */
pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
- memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd));
+ memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
/* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
/* Set transfer direction. */
@@ -3127,7 +3127,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
/* Load SCSI command packet. */
pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
- memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd));
+ memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
/*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
/* Set transfer direction. */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 12d69d7c857..110e776d1a0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -79,15 +79,6 @@ static void scsi_done(struct scsi_cmnd *cmd);
#define MIN_RESET_PERIOD (15*HZ)
/*
- * Macro to determine the size of SCSI command. This macro takes vendor
- * unique commands into account. SCSI commands in groups 6 and 7 are
- * vendor unique and we will depend upon the command length being
- * supplied correctly in cmd_len.
- */
-#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
- COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
-
-/*
* Note - the initial logging level can be set here to log events at boot time.
* After the system is up, you may enable logging via the /proc interface.
*/
@@ -469,6 +460,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
if (!cmd) {
scsi_put_host_cmd_pool(gfp_mask);
+ shost->cmd_pool = NULL;
return -ENOMEM;
}
list_add(&cmd->list, &shost->free_list);
@@ -481,6 +473,13 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
*/
void scsi_destroy_command_freelist(struct Scsi_Host *shost)
{
+ /*
+ * If cmd_pool is NULL the free list was not initialized, so
+ * do not attempt to release resources.
+ */
+ if (!shost->cmd_pool)
+ return;
+
while (!list_empty(&shost->free_list)) {
struct scsi_cmnd *cmd;
@@ -701,9 +700,11 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
* Before we queue this command, check if the command
* length exceeds what the host adapter can handle.
*/
- if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
+ if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
SCSI_LOG_MLQUEUE(3,
- printk("queuecommand : command too long.\n"));
+ printk("queuecommand : command too long. "
+ "cdb_size=%d host->max_cmd_len=%d\n",
+ cmd->cmd_len, cmd->device->host->max_cmd_len));
cmd->result = (DID_ABORT << 16);
scsi_done(cmd);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1eaba6cd80f..eaf5a8add1b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -626,7 +626,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
* @scmd: SCSI command structure to hijack
* @ses: structure to save restore information
* @cmnd: CDB to send. Can be NULL if no new cmnd is needed
- * @cmnd_size: size in bytes of @cmnd
+ * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB)
* @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
*
* This function is used to save a scsi command information before re-execution
@@ -648,12 +648,14 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
* command.
*/
ses->cmd_len = scmd->cmd_len;
- memcpy(ses->cmnd, scmd->cmnd, sizeof(scmd->cmnd));
+ ses->cmnd = scmd->cmnd;
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
ses->next_rq = scmd->request->next_rq;
ses->result = scmd->result;
+ scmd->cmnd = ses->eh_cmnd;
+ memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->request->next_rq = NULL;
@@ -665,14 +667,13 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
scmd->sdb.table.sgl = &ses->sense_sgl;
scmd->sc_data_direction = DMA_FROM_DEVICE;
scmd->sdb.table.nents = 1;
- memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
scmd->cmnd[0] = REQUEST_SENSE;
scmd->cmnd[4] = scmd->sdb.length;
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
} else {
scmd->sc_data_direction = DMA_NONE;
if (cmnd) {
- memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
+ BUG_ON(cmnd_size > BLK_MAX_CDB);
memcpy(scmd->cmnd, cmnd, cmnd_size);
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
}
@@ -705,7 +706,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
* Restore original data
*/
scmd->cmd_len = ses->cmd_len;
- memcpy(scmd->cmnd, ses->cmnd, sizeof(scmd->cmnd));
+ scmd->cmnd = ses->cmnd;
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
scmd->request->next_rq = ses->next_rq;
@@ -1775,8 +1776,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
scmd->request = &req;
memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
- memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd));
-
+ scmd->cmnd = req.cmd;
+
scmd->scsi_done = scsi_reset_provider_done_command;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d545ad1cf47..a82d2fe80fb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -445,7 +445,7 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
scsi_set_resid(cmd, 0);
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (cmd->cmd_len == 0)
- cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
+ cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
void scsi_device_unbusy(struct scsi_device *sdev)
@@ -1094,6 +1094,8 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
cmd->tag = req->tag;
cmd->request = req;
+ cmd->cmnd = req->cmd;
+
return cmd;
}
@@ -1131,8 +1133,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
req->buffer = NULL;
}
- BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
- memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
cmd->cmd_len = req->cmd_len;
if (!req->data_len)
cmd->sc_data_direction = DMA_NONE;
@@ -1169,6 +1169,7 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
if (unlikely(!cmd))
return BLKPREP_DEFER;
+ memset(cmd->cmnd, 0, BLK_MAX_CDB);
return scsi_init_io(cmd, GFP_ATOMIC);
}
EXPORT_SYMBOL(scsi_setup_fs_cmnd);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index ee8496aa033..257e097c39a 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -107,6 +107,8 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
cmd->jiffies_at_alloc = jiffies;
cmd->request = rq;
+ cmd->cmnd = rq->cmd;
+
rq->special = cmd;
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 640333b1e75..329eb8780e7 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -744,7 +744,8 @@ static int wait_on_busy(unsigned long iobase, unsigned int loop) {
static int board_inquiry(unsigned int j) {
struct mscp *cpp;
dma_addr_t id_dma_addr;
- unsigned int time, limit = 0;
+ unsigned int limit = 0;
+ unsigned long time;
id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id,
sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL);
@@ -1392,7 +1393,8 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
}
static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
- unsigned int i, j, time, k, c, limit = 0;
+ unsigned int i, j, k, c, limit = 0;
+ unsigned long time;
int arg_done = FALSE;
struct scsi_cmnd *SCpnt;
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index d88824b3511..898e67d30e5 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -46,7 +46,7 @@ void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
}
memcpy(save_cmnd, srb->cmnd, sizeof(save_cmnd));
- memset(srb->cmnd, 0, sizeof(srb->cmnd));
+ memset(srb->cmnd, 0, MAX_COMMAND_SIZE);
/* check if we support the command */
if (save_cmnd[1] >> 5) /* MULTIPLE_COUNT */
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 971d13dd5e6..3addcd8f827 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -292,6 +292,7 @@ struct isd200_info {
/* maximum number of LUNs supported */
unsigned char MaxLUNs;
+ unsigned char cmnd[BLK_MAX_CDB];
struct scsi_cmnd srb;
struct scatterlist sg;
};
@@ -450,6 +451,7 @@ static int isd200_action( struct us_data *us, int action,
memset(&ata, 0, sizeof(ata));
memset(&srb_dev, 0, sizeof(srb_dev));
+ srb->cmnd = info->cmnd;
srb->device = &srb_dev;
++srb->serial_number;
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 1f74bcd603f..32742c4563d 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -30,13 +30,6 @@
#endif
/*
- * SCSI command lengths
- */
-
-extern const unsigned char scsi_command_size[8];
-#define COMMAND_SIZE(opcode) scsi_command_size[((opcode) >> 5) & 7]
-
-/*
* Special value for scanning to specify scanning or rescanning of all
* possible channels, (target) ids, or luns on a given shost.
*/
@@ -109,6 +102,7 @@ extern const unsigned char scsi_command_size[8];
#define MODE_SENSE_10 0x5a
#define PERSISTENT_RESERVE_IN 0x5e
#define PERSISTENT_RESERVE_OUT 0x5f
+#define VARIABLE_LENGTH_CMD 0x7f
#define REPORT_LUNS 0xa0
#define MAINTENANCE_IN 0xa3
#define MOVE_MEDIUM 0xa5
@@ -136,6 +130,38 @@ extern const unsigned char scsi_command_size[8];
#define ATA_12 0xa1 /* 12-byte pass-thru */
/*
+ * SCSI command lengths
+ */
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+
+/* defined in T10 SCSI Primary Commands-2 (SPC2) */
+struct scsi_varlen_cdb_hdr {
+ u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
+ u8 control;
+ u8 misc[5];
+ u8 additional_cdb_length; /* total cdb length - 8 */
+ __be16 service_action;
+ /* service specific data follows */
+};
+
+static inline unsigned
+scsi_varlen_cdb_length(const void *hdr)
+{
+ return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
+}
+
+extern const unsigned char scsi_command_size_tbl[8];
+#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
+
+static inline unsigned
+scsi_command_size(const unsigned char *cmnd)
+{
+ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
+ scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
+}
+
+/*
* SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
* T10/1561-D Revision 4 Draft dated 7th November 2002.
*/
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 8d20e60a94b..3e46dfae819 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -7,10 +7,28 @@
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>
+#include <linux/blkdev.h>
struct Scsi_Host;
struct scsi_device;
+/*
+ * MAX_COMMAND_SIZE is:
+ * The longest fixed-length SCSI CDB as per the SCSI standard.
+ * fixed-length means: commands that their size can be determined
+ * by their opcode and the CDB does not carry a length specifier, (unlike
+ * the VARIABLE_LENGTH_CMD(0x7f) command). This is actually not exactly
+ * true and the SCSI standard also defines extended commands and
+ * vendor specific commands that can be bigger than 16 bytes. The kernel
+ * will support these using the same infrastructure used for VARLEN CDB's.
+ * So in effect MAX_COMMAND_SIZE means the maximum size command scsi-ml
+ * supports without specifying a cmd_len by ULD's
+ */
+#define MAX_COMMAND_SIZE 16
+#if (MAX_COMMAND_SIZE > BLK_MAX_CDB)
+# error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB
+#endif
+
struct scsi_data_buffer {
struct sg_table table;
unsigned length;
@@ -60,12 +78,11 @@ struct scsi_cmnd {
int allowed;
int timeout_per_command;
- unsigned char cmd_len;
+ unsigned short cmd_len;
enum dma_data_direction sc_data_direction;
/* These elements define the operation we are about to perform */
-#define MAX_COMMAND_SIZE 16
- unsigned char cmnd[MAX_COMMAND_SIZE];
+ unsigned char *cmnd;
struct timer_list eh_timeout; /* Used to time out the command. */
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index d3a133b4a07..2a9add21267 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -75,11 +75,11 @@ struct scsi_eh_save {
int result;
enum dma_data_direction data_direction;
unsigned char cmd_len;
- unsigned char cmnd[MAX_COMMAND_SIZE];
+ unsigned char *cmnd;
struct scsi_data_buffer sdb;
struct request *next_rq;
-
/* new command support */
+ unsigned char eh_cmnd[BLK_MAX_CDB];
struct scatterlist sense_sgl;
};
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index d967d6dc7a2..1834fdfe82a 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -573,13 +573,11 @@ struct Scsi_Host {
/*
* The maximum length of SCSI commands that this host can accept.
* Probably 12 for most host adapters, but could be 16 for others.
+ * or 260 if the driver supports variable length cdbs.
* For drivers that don't set this field, a value of 12 is
- * assumed. I am leaving this as a number rather than a bit
- * because you never know what subsequent SCSI standards might do
- * (i.e. could there be a 20 byte or a 24-byte command a few years
- * down the road?).
+ * assumed.
*/
- unsigned char max_cmd_len;
+ unsigned short max_cmd_len;
int this_id;
int can_queue;