diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 02071f213c58..7af84bac6fc2 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1303,7 +1303,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, /* * Record which node the original page is from and save this * information to khugepaged_node_load[]. - * Khupaged will allocate hugepage from the node has the max + * Khugepaged will allocate hugepage from the node has the max * hit record. */ node = page_to_nid(page); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 6a2b4b86b679..373837bb94cb 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1306,7 +1306,7 @@ static int __get_unpoison_page(struct page *page) * * get_hwpoison_page() takes a page refcount of an error page to handle memory * error on it, after checking that the error page is in a well-defined state - * (defined as a page-type we can successfully handle the memor error on it, + * (defined as a page-type we can successfully handle the memory error on it, * such as LRU page and hugetlb page). * * Memory error handling could be triggered at any time on any type of page, diff --git a/mm/slab_common.c b/mm/slab_common.c index 1f75bd4e95d6..9513244457e6 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -819,7 +819,7 @@ void __init setup_kmalloc_cache_index_table(void) if (KMALLOC_MIN_SIZE >= 64) { /* - * The 96 byte size cache is not used if the alignment + * The 96 byte sized cache is not used if the alignment * is 64 byte. */ for (i = 64 + 8; i <= 96; i += 8) diff --git a/mm/swap.c b/mm/swap.c index e8c9dc6d0377..b461814ce0cb 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -882,7 +882,7 @@ void lru_cache_disable(void) * all online CPUs so any calls of lru_cache_disabled wrapped by * local_lock or preemption disabled would be ordered by that. * The atomic operation doesn't need to have stronger ordering - * requirements because that is enforeced by the scheduling + * requirements because that is enforced by the scheduling * guarantees. */ __lru_add_drain_all(true);