summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorStephen M. Cameron <scameron@beardog.cce.hp.com>2014-02-18 13:56:29 -0600
committerJames Bottomley <JBottomley@Parallels.com>2014-03-15 10:19:05 -0700
commitb9af4937e6f5b55b6ffb2a92ec580e79e1401825 (patch)
tree66b5ac5850788e3486bfc744ad732420660a3141 /drivers
parent317d4adfd37729452c840821c71d1162d5d63f39 (diff)
[SCSI] hpsa: initialize controller to perform io accelerator mode 2
Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com> Signed-off-by: Scott Teel <scott.teel@hp.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/hpsa.c111
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h2
3 files changed, 78 insertions, 36 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 42bb7d7fc8de..05c4852567ea 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5137,6 +5137,7 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)
/* Update the field, and then ring the doorbell */
writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
print_cfg_table(&h->pdev->dev, h->cfgtable);
@@ -5902,9 +5903,9 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
unsigned long register_value;
unsigned long transMethod = CFGTBL_Trans_Performant |
(trans_support & CFGTBL_Trans_use_short_tags) |
- CFGTBL_Trans_enable_directed_msix |
- (trans_support & CFGTBL_Trans_io_accel1);
-
+ CFGTBL_Trans_enable_directed_msix |
+ (trans_support & (CFGTBL_Trans_io_accel1 |
+ CFGTBL_Trans_io_accel2));
struct access_method access = SA5_performant_access;
/* This is a bit complicated. There are 8 registers on
@@ -5925,6 +5926,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
* sizes for small commands, and fewer sizes for larger commands.
*/
int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
+#define MIN_IOACCEL2_BFT_ENTRY 5
+#define HPSA_IOACCEL2_HEADER_SZ 4
+ int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19,
+ HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
+ BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
+ BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
+ 16 * MIN_IOACCEL2_BFT_ENTRY);
+ BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
/* 5 = 1 s/g entry or 4k
* 6 = 2 s/g entry or 8k
@@ -5954,6 +5965,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
&h->transtable->RepQAddr[i].lower);
}
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
/*
* enable outbound interrupt coalescing in accelerator mode;
@@ -5975,43 +5987,72 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
h->access = access;
h->transMethod = transMethod;
- if (!(trans_support & CFGTBL_Trans_io_accel1))
+ if (!((trans_support & CFGTBL_Trans_io_accel1) ||
+ (trans_support & CFGTBL_Trans_io_accel2)))
return;
- /* Set up I/O accelerator mode */
- for (i = 0; i < h->nreply_queues; i++) {
- writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
- h->reply_queue[i].current_entry =
- readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
- }
- bft[7] = h->ioaccel_maxsg + 8;
- calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
- h->ioaccel1_blockFetchTable);
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ /* Set up I/O accelerator mode */
+ for (i = 0; i < h->nreply_queues; i++) {
+ writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
+ h->reply_queue[i].current_entry =
+ readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
+ }
+ bft[7] = h->ioaccel_maxsg + 8;
+ calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
+ h->ioaccel1_blockFetchTable);
- /* initialize all reply queue entries to unused */
- memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
- h->reply_pool_size);
+ /* initialize all reply queue entries to unused */
+ memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
+ h->reply_pool_size);
- /* set all the constant fields in the accelerator command
- * frames once at init time to save CPU cycles later.
- */
- for (i = 0; i < h->nr_cmds; i++) {
- struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
-
- cp->function = IOACCEL1_FUNCTION_SCSIIO;
- cp->err_info = (u32) (h->errinfo_pool_dhandle +
- (i * sizeof(struct ErrorInfo)));
- cp->err_info_len = sizeof(struct ErrorInfo);
- cp->sgl_offset = IOACCEL1_SGLOFFSET;
- cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
- cp->timeout_sec = 0;
- cp->ReplyQueue = 0;
- cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | DIRECT_LOOKUP_BIT;
- cp->Tag.upper = 0;
- cp->host_addr.lower = (u32) (h->ioaccel_cmd_pool_dhandle +
- (i * sizeof(struct io_accel1_cmd)));
- cp->host_addr.upper = 0;
+ /* set all the constant fields in the accelerator command
+ * frames once at init time to save CPU cycles later.
+ */
+ for (i = 0; i < h->nr_cmds; i++) {
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
+
+ cp->function = IOACCEL1_FUNCTION_SCSIIO;
+ cp->err_info = (u32) (h->errinfo_pool_dhandle +
+ (i * sizeof(struct ErrorInfo)));
+ cp->err_info_len = sizeof(struct ErrorInfo);
+ cp->sgl_offset = IOACCEL1_SGLOFFSET;
+ cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
+ cp->timeout_sec = 0;
+ cp->ReplyQueue = 0;
+ cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
+ DIRECT_LOOKUP_BIT;
+ cp->Tag.upper = 0;
+ cp->host_addr.lower =
+ (u32) (h->ioaccel_cmd_pool_dhandle +
+ (i * sizeof(struct io_accel1_cmd)));
+ cp->host_addr.upper = 0;
+ }
+ } else if (trans_support & CFGTBL_Trans_io_accel2) {
+ u64 cfg_offset, cfg_base_addr_index;
+ u32 bft2_offset, cfg_base_addr;
+ int rc;
+
+ rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
+ bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
+ calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
+ 4, h->ioaccel2_blockFetchTable);
+ bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
+ BUILD_BUG_ON(offsetof(struct CfgTable,
+ io_accel_request_size_offset) != 0xb8);
+ h->ioaccel2_bft2_regs =
+ remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index) +
+ cfg_offset + bft2_offset,
+ ARRAY_SIZE(bft2) *
+ sizeof(*h->ioaccel2_bft2_regs));
+ for (i = 0; i < ARRAY_SIZE(bft2); i++)
+ writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
}
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_mode_change_ack(h);
}
static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 2a4a0cb4f034..0e14e9bf735a 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -142,6 +142,7 @@ struct ctlr_info {
u32 *blockFetchTable;
u32 *ioaccel1_blockFetchTable;
u32 *ioaccel2_blockFetchTable;
+ u32 *ioaccel2_bft2_regs;
unsigned char *hba_inquiry_data;
u32 driver_support;
u32 fw_support;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 6b336e833fa3..dfdb4a5c3646 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -547,7 +547,7 @@ struct hpsa_tmf_struct {
/* Configuration Table Structure */
struct HostWrite {
u32 TransportRequest;
- u32 Reserved;
+ u32 command_pool_addr_hi;
u32 CoalIntDelay;
u32 CoalIntCount;
};