dma-mapping: use trace_dma_alloc for dma_alloc* instead of using trace_dma_map

In some cases, we use trace_dma_map to trace dma_alloc* functions. This
generally follows dma_debug. However, this does not record all of the
relevant information for allocations, such as GFP flags. Create new
dma_alloc tracepoints for these functions. Note that while
dma_alloc_noncontiguous may allocate discontiguous pages (from the CPU's
point of view), the device will only see one contiguous mapping.
Therefore, we just need to trace dma_addr and size.

Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Sean Anderson 2024-10-18 11:00:36 -04:00 committed by Christoph Hellwig
parent 3afff779a7
commit c4484ab86e
2 changed files with 102 additions and 7 deletions

View File

@ -112,7 +112,7 @@ DEFINE_EVENT(dma_unmap, name, \
DEFINE_UNMAP_EVENT(dma_unmap_page); DEFINE_UNMAP_EVENT(dma_unmap_page);
DEFINE_UNMAP_EVENT(dma_unmap_resource); DEFINE_UNMAP_EVENT(dma_unmap_resource);
TRACE_EVENT(dma_alloc, DECLARE_EVENT_CLASS(dma_alloc_class,
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, gfp_t flags, size_t size, enum dma_data_direction dir, gfp_t flags,
unsigned long attrs), unsigned long attrs),
@ -147,7 +147,58 @@ TRACE_EVENT(dma_alloc,
decode_dma_attrs(__entry->attrs)) decode_dma_attrs(__entry->attrs))
); );
TRACE_EVENT(dma_free, #define DEFINE_ALLOC_EVENT(name) \
DEFINE_EVENT(dma_alloc_class, name, \
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
size_t size, enum dma_data_direction dir, gfp_t flags, \
unsigned long attrs), \
TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs))
DEFINE_ALLOC_EVENT(dma_alloc);
DEFINE_ALLOC_EVENT(dma_alloc_pages);
TRACE_EVENT(dma_alloc_sgt,
TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
enum dma_data_direction dir, gfp_t flags, unsigned long attrs),
TP_ARGS(dev, sgt, size, dir, flags, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__dynamic_array(u64, phys_addrs, sgt->orig_nents)
__field(u64, dma_addr)
__field(size_t, size)
__field(enum dma_data_direction, dir)
__field(gfp_t, flags)
__field(unsigned long, attrs)
),
TP_fast_assign(
struct scatterlist *sg;
int i;
__assign_str(device);
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
__entry->dma_addr = sg_dma_address(sgt->sgl);
__entry->size = size;
__entry->dir = dir;
__entry->flags = flags;
__entry->attrs = attrs;
),
TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s flags=%s attrs=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__entry->dma_addr,
__entry->size,
__print_array(__get_dynamic_array(phys_addrs),
__get_dynamic_array_len(phys_addrs) /
sizeof(u64), sizeof(u64)),
show_gfp_flags(__entry->flags),
decode_dma_attrs(__entry->attrs))
);
DECLARE_EVENT_CLASS(dma_free_class,
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs), size_t size, enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs), TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs),
@ -179,6 +230,50 @@ TRACE_EVENT(dma_free,
decode_dma_attrs(__entry->attrs)) decode_dma_attrs(__entry->attrs))
); );
#define DEFINE_FREE_EVENT(name) \
DEFINE_EVENT(dma_free_class, name, \
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
size_t size, enum dma_data_direction dir, unsigned long attrs), \
TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs))
DEFINE_FREE_EVENT(dma_free);
DEFINE_FREE_EVENT(dma_free_pages);
TRACE_EVENT(dma_free_sgt,
TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
enum dma_data_direction dir),
TP_ARGS(dev, sgt, size, dir),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__dynamic_array(u64, phys_addrs, sgt->orig_nents)
__field(u64, dma_addr)
__field(size_t, size)
__field(enum dma_data_direction, dir)
),
TP_fast_assign(
struct scatterlist *sg;
int i;
__assign_str(device);
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
__entry->dma_addr = sg_dma_address(sgt->sgl);
__entry->size = size;
__entry->dir = dir;
),
TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__entry->dma_addr,
__entry->size,
__print_array(__get_dynamic_array(phys_addrs),
__get_dynamic_array_len(phys_addrs) /
sizeof(u64), sizeof(u64)))
);
TRACE_EVENT(dma_map_sg, TRACE_EVENT(dma_map_sg,
TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents, TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
int ents, enum dma_data_direction dir, unsigned long attrs), int ents, enum dma_data_direction dir, unsigned long attrs),

View File

@ -685,8 +685,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
if (page) { if (page) {
trace_dma_map_page(dev, page_to_phys(page), *dma_handle, size, trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
dir, 0); size, dir, gfp, 0);
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
} }
return page; return page;
@ -710,7 +710,7 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
void dma_free_pages(struct device *dev, size_t size, struct page *page, void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir) dma_addr_t dma_handle, enum dma_data_direction dir)
{ {
trace_dma_unmap_page(dev, dma_handle, size, dir, 0); trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
debug_dma_unmap_page(dev, dma_handle, size, dir); debug_dma_unmap_page(dev, dma_handle, size, dir);
__dma_free_pages(dev, size, page, dma_handle, dir); __dma_free_pages(dev, size, page, dma_handle, dir);
} }
@ -770,7 +770,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
if (sgt) { if (sgt) {
sgt->nents = 1; sgt->nents = 1;
trace_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
} }
return sgt; return sgt;
@ -789,7 +789,7 @@ static void free_single_sgt(struct device *dev, size_t size,
void dma_free_noncontiguous(struct device *dev, size_t size, void dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir) struct sg_table *sgt, enum dma_data_direction dir)
{ {
trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0); trace_dma_free_sgt(dev, sgt, size, dir);
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
if (use_dma_iommu(dev)) if (use_dma_iommu(dev))