diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2012-12-20 18:48:20 +0000 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-03-28 12:11:56 -0700 |
commit | 56bed2f8e6061b65ce3f9f27df92ac2e1cc2e7e0 (patch) | |
tree | 26d2908359f31e8d1422e49a44702e9838d913d0 /drivers/net | |
parent | 41ade8ee0d52f9e02d05cb1b1c376445d5ec3689 (diff) | |
download | linux-3.10-56bed2f8e6061b65ce3f9f27df92ac2e1cc2e7e0.tar.gz linux-3.10-56bed2f8e6061b65ce3f9f27df92ac2e1cc2e7e0.tar.bz2 linux-3.10-56bed2f8e6061b65ce3f9f27df92ac2e1cc2e7e0.zip |
sfc: Properly sync RX DMA buffer when it is not the last in the page
[ Upstream commit 3a68f19d7afb80f548d016effbc6ed52643a8085 ]
We may currently allocate two RX DMA buffers to a page, and only unmap
the page when the second is completed. We do not sync the first RX
buffer to be completed; this can result in packet loss or corruption
if the last RX buffer completed in a NAPI poll is the first in a page
and is not DMA-coherent. (In the middle of a NAPI poll, we will
handle the following RX completion and unmap the page *before* looking
at the content of the first buffer.)
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
[bwh: Backported to 3.4: adjust context]
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/sfc/rx.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 763fa2fe1a3..c9766cad98f 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -245,7 +245,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) } static void efx_unmap_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf) + struct efx_rx_buffer *rx_buf, + unsigned int used_len) { if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { struct efx_rx_page_state *state; @@ -256,6 +257,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, state->dma_addr, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); + } else if (used_len) { + dma_sync_single_for_cpu(&efx->pci_dev->dev, + rx_buf->dma_addr, used_len, + DMA_FROM_DEVICE); } } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, @@ -278,7 +283,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx, static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) { - efx_unmap_rx_buffer(rx_queue->efx, rx_buf); + efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); efx_free_rx_buffer(rx_queue->efx, rx_buf); } @@ -544,10 +549,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, goto out; } - /* Release card resources - assumes all RX buffers consumed in-order - * per RX queue + /* Release and/or sync DMA mapping - assumes all RX buffers + * consumed in-order per RX queue */ - efx_unmap_rx_buffer(efx, rx_buf); + efx_unmap_rx_buffer(efx, rx_buf, len); /* Prefetch nice and early so data will (hopefully) be in cache by * the time we look at it. |