mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 21:23:23 +00:00
virtio-mem: support CONFIG_PROC_VMCORE_DEVICE_RAM
Let's implement the get_device_ram() vmcore callback, so architectures that select NEED_PROC_VMCORE_NEED_DEVICE_RAM, like s390 soon, can include that memory in a crash dump. Merge ranges, and process ranges that might contain a mixture of plugged and unplugged, to reduce the total number of ranges. Link: https://lkml.kernel.org/r/20241204125444.1734652-12-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Baoquan He <bhe@redhat.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Dave Young <dyoung@redhat.com> Cc: Eric Farman <farman@linux.ibm.com> Cc: Eugenio Pérez <eperezma@redhat.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jason Wang <jasowang@redhat.com> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Huth <thuth@redhat.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3a40a409af
commit
361469d853
@ -2728,6 +2728,91 @@ static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
|
||||
mutex_unlock(&vm->hotplug_mutex);
|
||||
return is_ram;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
|
||||
static int virtio_mem_vmcore_add_device_ram(struct virtio_mem *vm,
|
||||
struct list_head *list, uint64_t start, uint64_t end)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = vmcore_alloc_add_range(list, start, end - start);
|
||||
if (rc)
|
||||
dev_err(&vm->vdev->dev,
|
||||
"Error adding device RAM range: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int virtio_mem_vmcore_get_device_ram(struct vmcore_cb *cb,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct virtio_mem *vm = container_of(cb, struct virtio_mem,
|
||||
vmcore_cb);
|
||||
const uint64_t device_start = vm->addr;
|
||||
const uint64_t device_end = vm->addr + vm->usable_region_size;
|
||||
uint64_t chunk_size, cur_start, cur_end, plugged_range_start = 0;
|
||||
LIST_HEAD(tmp_list);
|
||||
int rc;
|
||||
|
||||
if (!vm->plugged_size)
|
||||
return 0;
|
||||
|
||||
/* Process memory sections, unless the device block size is bigger. */
|
||||
chunk_size = max_t(uint64_t, PFN_PHYS(PAGES_PER_SECTION),
|
||||
vm->device_block_size);
|
||||
|
||||
mutex_lock(&vm->hotplug_mutex);
|
||||
|
||||
/*
|
||||
* We process larger chunks and indicate the complete chunk if any
|
||||
* block in there is plugged. This reduces the number of pfn_is_ram()
|
||||
* callbacks and mimic what is effectively being done when the old
|
||||
* kernel would add complete memory sections/blocks to the elfcore hdr.
|
||||
*/
|
||||
cur_start = device_start;
|
||||
for (cur_start = device_start; cur_start < device_end; cur_start = cur_end) {
|
||||
cur_end = ALIGN_DOWN(cur_start + chunk_size, chunk_size);
|
||||
cur_end = min_t(uint64_t, cur_end, device_end);
|
||||
|
||||
rc = virtio_mem_send_state_request(vm, cur_start,
|
||||
cur_end - cur_start);
|
||||
|
||||
if (rc < 0) {
|
||||
dev_err(&vm->vdev->dev,
|
||||
"Error querying block states: %d\n", rc);
|
||||
goto out;
|
||||
} else if (rc != VIRTIO_MEM_STATE_UNPLUGGED) {
|
||||
/* Merge ranges with plugged memory. */
|
||||
if (!plugged_range_start)
|
||||
plugged_range_start = cur_start;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Flush any plugged range. */
|
||||
if (plugged_range_start) {
|
||||
rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
|
||||
plugged_range_start,
|
||||
cur_start);
|
||||
if (rc)
|
||||
goto out;
|
||||
plugged_range_start = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush any plugged range. */
|
||||
if (plugged_range_start)
|
||||
rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
|
||||
plugged_range_start,
|
||||
cur_start);
|
||||
out:
|
||||
mutex_unlock(&vm->hotplug_mutex);
|
||||
if (rc < 0) {
|
||||
vmcore_free_ranges(&tmp_list);
|
||||
return rc;
|
||||
}
|
||||
list_splice_tail(&tmp_list, list);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
|
||||
#endif /* CONFIG_PROC_VMCORE */
|
||||
|
||||
static int virtio_mem_init_kdump(struct virtio_mem *vm)
|
||||
@ -2737,6 +2822,9 @@ static int virtio_mem_init_kdump(struct virtio_mem *vm)
|
||||
#ifdef CONFIG_PROC_VMCORE
|
||||
dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
|
||||
vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
|
||||
#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
|
||||
vm->vmcore_cb.get_device_ram = virtio_mem_vmcore_get_device_ram;
|
||||
#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
|
||||
register_vmcore_cb(&vm->vmcore_cb);
|
||||
return 0;
|
||||
#else /* CONFIG_PROC_VMCORE */
|
||||
|
@ -67,6 +67,7 @@ config NEED_PROC_VMCORE_DEVICE_RAM
|
||||
config PROC_VMCORE_DEVICE_RAM
|
||||
def_bool y
|
||||
depends on PROC_VMCORE && NEED_PROC_VMCORE_DEVICE_RAM
|
||||
depends on VIRTIO_MEM
|
||||
help
|
||||
If the elfcore hdr is allocated and prepared by the dump kernel
|
||||
("2nd kernel") instead of the crashed kernel, RAM provided by memory
|
||||
|
Loading…
x
Reference in New Issue
Block a user