summaryrefslogtreecommitdiff
path: root/drivers/bus
diff options
context:
space:
mode:
authorManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>2024-06-03 22:13:54 +0530
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-10-17 15:24:10 +0200
commit43987659fa9c1a183af71730304f041de3fd264c (patch)
treed43e6f69cc755793c87f1046f7d66e0446332c36 /drivers/bus
parent39601f49c9e0fc4c9ecea0fe583127f1616204a5 (diff)
downloadlinux-rpi-43987659fa9c1a183af71730304f041de3fd264c.tar.gz
linux-rpi-43987659fa9c1a183af71730304f041de3fd264c.tar.bz2
linux-rpi-43987659fa9c1a183af71730304f041de3fd264c.zip
bus: mhi: ep: Do not allocate memory for MHI objects from DMA zone
[ Upstream commit c7d0b2db5bc5e8c0fdc67b3c8f463c3dfec92f77 ] MHI endpoint stack accidentally started allocating memory for objects from DMA zone since commit 62210a26cd4f ("bus: mhi: ep: Use slab allocator where applicable"). But there is no real need to allocate memory from this naturally limited DMA zone. This also causes the MHI endpoint stack to run out of memory while doing high bandwidth transfers. So let's switch over to normal memory. Cc: <stable@vger.kernel.org> # 6.8 Fixes: 62210a26cd4f ("bus: mhi: ep: Use slab allocator where applicable") Reviewed-by: Mayank Rana <quic_mrana@quicinc.com> Link: https://lore.kernel.org/r/20240603164354.79035-1-manivannan.sadhasivam@linaro.org Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/mhi/ep/main.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 2cb7e21ad3b7..c48f4d9f2c69 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -74,7 +74,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -93,7 +93,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -111,7 +111,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -130,7 +130,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -422,7 +422,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
- buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+ buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
if (!buf_addr)
return -ENOMEM;
@@ -1460,14 +1460,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
sizeof(struct mhi_ring_element), 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!mhi_cntrl->ev_ring_el_cache) {
ret = -ENOMEM;
goto err_free_cmd;
}
mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!mhi_cntrl->tre_buf_cache) {
ret = -ENOMEM;
goto err_destroy_ev_ring_el_cache;