Skip to content

Commit f2ae927

Browse files
committed
firewire: ohci: split page allocation from dma mapping
1394 OHCI PCI driver had long been the only user of dma_alloc_pages(). Although tee subsystem recently started using it, they are still a few users of the infrequently-used function. In the discussion for dma-mapping function, Jason Gunthorpe shows his opinion about the design of public API for the function. According to it, the users provide physical address to the function, then receive DMA mapping address, regardless of the location of mapping target. With the above aspects, this commit eliminates the use of dma_alloc_pages() from this subsystem. Link: https://lore.kernel.org/lkml/20250905174324.GI616306@nvidia.com/ Link: https://lore.kernel.org/r/20260110013911.19160-8-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
1 parent e62b46c commit f2ae927

File tree

1 file changed

+51
-26
lines changed

1 file changed

+51
-26
lines changed

drivers/firewire/ohci.c

Lines changed: 51 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -539,18 +539,22 @@ static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
539539
static void ar_context_release(struct ar_context *ctx)
540540
{
541541
struct device *dev = ctx->ohci->card.device;
542-
unsigned int i;
543542

544543
if (!ctx->buffer)
545544
return;
546545

547-
vunmap(ctx->buffer);
546+
for (int i = 0; i < AR_BUFFERS; ++i) {
547+
dma_addr_t dma_addr = page_private(ctx->pages[i]);
548548

549-
for (i = 0; i < AR_BUFFERS; i++) {
550-
if (ctx->pages[i])
551-
dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
552-
ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
549+
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
550+
set_page_private(ctx->pages[i], 0);
553551
}
552+
553+
vunmap(ctx->buffer);
554+
ctx->buffer = NULL;
555+
556+
release_pages(ctx->pages, AR_BUFFERS);
557+
memset(ctx->pages, 0, sizeof(ctx->pages));
554558
}
555559

556560
static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
@@ -845,31 +849,57 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
845849
{
846850
struct device *dev = ohci->card.device;
847851
unsigned int i;
848-
dma_addr_t dma_addr;
849852
struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
853+
void *vaddr;
850854
struct descriptor *d;
851855

852856
ctx->regs = regs;
853857
ctx->ohci = ohci;
854858
INIT_WORK(&ctx->work, ohci_ar_context_work);
855859

856-
for (i = 0; i < AR_BUFFERS; i++) {
857-
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
858-
DMA_FROM_DEVICE, GFP_KERNEL);
859-
if (!ctx->pages[i])
860-
goto out_of_memory;
861-
set_page_private(ctx->pages[i], dma_addr);
862-
dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
863-
DMA_FROM_DEVICE);
860+
// Retrieve noncontiguous pages. The descriptors for 1394 OHCI AR DMA contexts have a set
861+
// of address and length per each. The reason to use pages is to construct contiguous
862+
// address range in kernel virtual address space.
863+
unsigned long nr_populated = alloc_pages_bulk(GFP_KERNEL | GFP_DMA32, AR_BUFFERS, pages);
864+
865+
if (nr_populated != AR_BUFFERS) {
866+
release_pages(pages, nr_populated);
867+
return -ENOMEM;
864868
}
865869

866-
for (i = 0; i < AR_BUFFERS; i++)
867-
pages[i] = ctx->pages[i];
870+
// Map the pages into contiguous kernel virtual addresses so that the packet data
871+
// across the pages can be referred as being contiguous, especially across the last
872+
// and first pages.
868873
for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
869-
pages[AR_BUFFERS + i] = ctx->pages[i];
870-
ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
871-
if (!ctx->buffer)
872-
goto out_of_memory;
874+
pages[AR_BUFFERS + i] = pages[i];
875+
vaddr = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
876+
if (!vaddr) {
877+
release_pages(pages, nr_populated);
878+
return -ENOMEM;
879+
}
880+
881+
// Retrieve DMA mapping addresses for the pages. They are not contiguous. Maintain the cache
882+
// coherency for the pages by hand.
883+
for (i = 0; i < AR_BUFFERS; i++) {
884+
// The dma_map_phys() with a physical address per page is available here, instead.
885+
dma_addr_t dma_addr = dma_map_page(dev, pages[i], 0, PAGE_SIZE, DMA_FROM_DEVICE);
886+
if (dma_mapping_error(dev, dma_addr))
887+
break;
888+
set_page_private(pages[i], dma_addr);
889+
dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
890+
}
891+
if (i < AR_BUFFERS) {
892+
while (i-- > 0) {
893+
dma_addr_t dma_addr = page_private(pages[i]);
894+
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
895+
}
896+
vunmap(vaddr);
897+
release_pages(pages, nr_populated);
898+
return -ENOMEM;
899+
}
900+
901+
ctx->buffer = vaddr;
902+
memcpy(ctx->pages, pages, sizeof(ctx->pages));
873903

874904
ctx->descriptors = ohci->misc_buffer + descriptors_offset;
875905
ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
@@ -886,11 +916,6 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
886916
}
887917

888918
return 0;
889-
890-
out_of_memory:
891-
ar_context_release(ctx);
892-
893-
return -ENOMEM;
894919
}
895920

896921
static void ar_context_run(struct ar_context *ctx)

0 commit comments

Comments
 (0)