summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorPhilip J Kelleher <pjk1939@linux.vnet.ibm.com>2013-09-04 13:59:35 -0500
committerJens Axboe <axboe@kernel.dk>2013-11-08 09:10:28 -0700
commit1b21f5b2ad6047995b19b15024353a9fa64810f1 (patch)
treee0ecbc01dfe685322485b801540bd7a289283378 /drivers/block
parente5feab229f199dadee91073fbef5b507046086fd (diff)
downloadlinux-rpi3-1b21f5b2ad6047995b19b15024353a9fa64810f1.tar.gz
linux-rpi3-1b21f5b2ad6047995b19b15024353a9fa64810f1.tar.bz2
linux-rpi3-1b21f5b2ad6047995b19b15024353a9fa64810f1.zip
rsxx: Moving pci_map_page to prevent overflow.
The pci_map_page function has been moved into our issued workqueue to prevent an us running out of mappable addresses on non-HWWD PCIe x8 slots. The maximum amount that can possible be mapped at one time now is: 255 dmas X 4 dma channels X 4096 Bytes. Signed-off-by: Philip J Kelleher <pjk1939@linux.vnet.ibm.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/rsxx/core.c5
-rw-r--r--drivers/block/rsxx/dma.c70
2 files changed, 28 insertions, 47 deletions
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index e740a650d546..a8de2eec6ff3 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -749,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
card->eeh_state = 0;
- st = rsxx_eeh_remap_dmas(card);
- if (st)
- goto failed_remap_dmas;
-
spin_lock_irqsave(&card->irq_lock, flags);
if (card->n_targets & RSXX_MAX_TARGETS)
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
@@ -779,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
return PCI_ERS_RESULT_RECOVERED;
failed_hw_buffers_init:
-failed_remap_dmas:
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
pci_free_consistent(card->dev,
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 71d1ca2a1444..34fd1018c8e5 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -397,6 +397,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
int tag;
int cmds_pending = 0;
struct hw_cmd *hw_cmd_buf;
+ int dir;
hw_cmd_buf = ctrl->cmd.buf;
@@ -433,6 +434,28 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
continue;
}
+ if (dma->cmd == HW_CMD_BLK_WRITE)
+ dir = PCI_DMA_TODEVICE;
+ else
+ dir = PCI_DMA_FROMDEVICE;
+
+ /*
+ * The function pci_map_page is placed here because we can
+ * only, by design, issue up to 255 commands to the hardware
+ * at one time per DMA channel. So the maximum amount of mapped
+ * memory would be 255 * 4 channels * 4096 Bytes which is less
+ * than 2GB, the limit of a x8 Non-HWWD PCIe slot. This way the
+ * pci_map_page function should never fail because of a
+ * lack of mappable memory.
+ */
+ dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
+ dma->pg_off, dma->sub_page.cnt << 9, dir);
+ if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ push_tracker(ctrl->trackers, tag);
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ continue;
+ }
+
set_tracker_dma(ctrl->trackers, tag, dma);
hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
hw_cmd_buf[ctrl->cmd.idx].tag = tag;
@@ -629,14 +652,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
if (!dma)
return -ENOMEM;
- dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
- dir ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(card->dev, dma->dma_addr)) {
- kmem_cache_free(rsxx_dma_pool, dma);
- return -ENOMEM;
- }
-
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
dma->laddr = laddr;
dma->sub_page.off = (dma_off >> 9);
@@ -1039,6 +1054,11 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
else
card->ctrl[i].stats.reads_issued--;
+ pci_unmap_page(card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
list_add_tail(&dma->list, &issued_dmas[i]);
push_tracker(card->ctrl[i].trackers, j);
cnt++;
@@ -1050,15 +1070,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
card->ctrl[i].stats.sw_q_depth += cnt;
card->ctrl[i].e_cnt = 0;
-
- list_for_each_entry(dma, &card->ctrl[i].queue, list) {
- if (!pci_dma_mapping_error(card->dev, dma->dma_addr))
- pci_unmap_page(card->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- }
spin_unlock_bh(&card->ctrl[i].queue_lock);
}
@@ -1067,31 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
return 0;
}
-int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
-{
- struct rsxx_dma *dma;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- list_for_each_entry(dma, &card->ctrl[i].queue, list) {
- dma->dma_addr = pci_map_page(card->dev, dma->page,
- dma->pg_off, get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(card->dev, dma->dma_addr)) {
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- kmem_cache_free(rsxx_dma_pool, dma);
- return -ENOMEM;
- }
- }
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
-
- return 0;
-}
-
int rsxx_dma_init(void)
{
rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);