dma-mapping fixes for Linux 6.10

- dma-mapping benchmark error handling fixes (Fedor Pchelkin)
  - correct a config symbol reference in the DMA API documentation
    (Lukas Bulwahn)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmZZwtkLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYMJ/RAAvjmGkeGWNFEYB7Vl8uBII6bLJoWWWkyqjkkVOKzQ
 uWnkrAlsMGOG1NV5nkE9rE6ceS3yyjSjsj+2mF+lsSlfsUpcOgIBDmWsiSCuLE/Q
 8LqcF0UUjZTB8O6rU+Gzfix/qd4xUOsnqXLwVi6Mm1pEEqIhRbTi86MHRJM6aMmu
 RypA+RwXopUiWJHLOEPi4czTrbUNPJy2hdmUlB7Bv4ZHnYnGaZRbFgqwOfm8VeUn
 Uw673fnSbVMSgKIk2SvIG0RMPJyFalnHTL+cD+BgjZNHJLxvuCF/0T+jBJnaY+UR
 +jZfAWeZcjflqWrNb22+kLB7AHeccWvz1YDAOFeDU2ayRqmIDgzfx1fGI35s/NIr
 O+sloP1FZXryWc8eIfwfwm15hQCvU5k2tHraZKGf2eNnG44GLAA/kuh/AjW3vKSc
 Jcu5Jps6aJdiOBVd8QE8MJyI+wsQeZMiyffQ2p0DpF/bUxnpRV0uONn8ZTlY/tFM
 L32H6xlmi1K2oUBsqEGZr0+glZn7MIJFvQ++ipDXuL/0SEWu+u8KdP9pk+w6yVSt
 3X40uD4KQofys2EWPVRtJmnLWOXPuGLLC1NjPCrg5KkwZBIQ3BIwaoLg6M3bc4jV
 in3seFove+13gHFq7G2Q/XxM8ODohrKduVTZwOQF5RnN7ro7sS5dOR44cCsQ5Yz7
 eDU=
 =T5jp
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-6.10-2024-05-31' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - dma-mapping benchmark error handling fixes (Fedor Pchelkin)

 - correct a config symbol reference in the DMA API documentation (Lukas
   Bulwahn)

* tag 'dma-mapping-6.10-2024-05-31' of git://git.infradead.org/users/hch/dma-mapping:
  Documentation/core-api: correct reference to SWIOTLB_DYNAMIC
  dma-mapping: benchmark: handle NUMA_NO_NODE correctly
  dma-mapping: benchmark: fix node id validation
  dma-mapping: benchmark: avoid needless copy_to_user if benchmark fails
  dma-mapping: benchmark: fix up kthread-related error handling
This commit is contained in:
Linus Torvalds 2024-05-31 12:14:55 -07:00
commit b050496579
2 changed files with 17 additions and 10 deletions

View File

@ -192,7 +192,7 @@ alignment larger than PAGE_SIZE.
Dynamic swiotlb Dynamic swiotlb
--------------- ---------------
When CONFIG_DYNAMIC_SWIOTLB is enabled, swiotlb can do on-demand expansion of When CONFIG_SWIOTLB_DYNAMIC is enabled, swiotlb can do on-demand expansion of
the amount of memory available for allocation as bounce buffers. If a bounce the amount of memory available for allocation as bounce buffers. If a bounce
buffer request fails due to lack of available space, an asynchronous background buffer request fails due to lack of available space, an asynchronous background
task is kicked off to allocate memory from general system memory and turn it task is kicked off to allocate memory from general system memory and turn it

View File

@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
struct task_struct **tsk; struct task_struct **tsk;
int threads = map->bparam.threads; int threads = map->bparam.threads;
int node = map->bparam.node; int node = map->bparam.node;
const cpumask_t *cpu_mask = cpumask_of_node(node);
u64 loops; u64 loops;
int ret = 0; int ret = 0;
int i; int i;
@ -118,11 +117,13 @@ static int do_map_benchmark(struct map_benchmark_data *map)
if (IS_ERR(tsk[i])) { if (IS_ERR(tsk[i])) {
pr_err("create dma_map thread failed\n"); pr_err("create dma_map thread failed\n");
ret = PTR_ERR(tsk[i]); ret = PTR_ERR(tsk[i]);
while (--i >= 0)
kthread_stop(tsk[i]);
goto out; goto out;
} }
if (node != NUMA_NO_NODE) if (node != NUMA_NO_NODE)
kthread_bind_mask(tsk[i], cpu_mask); kthread_bind_mask(tsk[i], cpumask_of_node(node));
} }
/* clear the old value in the previous benchmark */ /* clear the old value in the previous benchmark */
@ -139,13 +140,17 @@ static int do_map_benchmark(struct map_benchmark_data *map)
msleep_interruptible(map->bparam.seconds * 1000); msleep_interruptible(map->bparam.seconds * 1000);
/* wait for the completion of benchmark threads */ /* wait for the completion of all started benchmark threads */
for (i = 0; i < threads; i++) { for (i = 0; i < threads; i++) {
ret = kthread_stop(tsk[i]); int kthread_ret = kthread_stop_put(tsk[i]);
if (ret)
goto out; if (kthread_ret)
ret = kthread_ret;
} }
if (ret)
goto out;
loops = atomic64_read(&map->loops); loops = atomic64_read(&map->loops);
if (likely(loops > 0)) { if (likely(loops > 0)) {
u64 map_variance, unmap_variance; u64 map_variance, unmap_variance;
@ -170,8 +175,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
} }
out: out:
for (i = 0; i < threads; i++)
put_task_struct(tsk[i]);
put_device(map->dev); put_device(map->dev);
kfree(tsk); kfree(tsk);
return ret; return ret;
@ -208,7 +211,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
} }
if (map->bparam.node != NUMA_NO_NODE && if (map->bparam.node != NUMA_NO_NODE &&
!node_possible(map->bparam.node)) { (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
!node_possible(map->bparam.node))) {
pr_err("invalid numa node\n"); pr_err("invalid numa node\n");
return -EINVAL; return -EINVAL;
} }
@ -252,6 +256,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
* dma_mask changed by benchmark * dma_mask changed by benchmark
*/ */
dma_set_mask(map->dev, old_dma_mask); dma_set_mask(map->dev, old_dma_mask);
if (ret)
return ret;
break; break;
default: default:
return -EINVAL; return -EINVAL;