mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
mm: Reduce the number of slab->folio casts
Mark a few more folio functions as taking a const folio pointer, which allows us to remove a few places in slab which cast away the const. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
a0a44d9175
commit
4d2bcefa96
@ -1105,7 +1105,7 @@ static inline unsigned int compound_order(struct page *page)
|
||||
*
|
||||
* Return: The order of the folio.
|
||||
*/
|
||||
static inline unsigned int folio_order(struct folio *folio)
|
||||
static inline unsigned int folio_order(const struct folio *folio)
|
||||
{
|
||||
if (!folio_test_large(folio))
|
||||
return 0;
|
||||
@ -2145,7 +2145,7 @@ static inline struct folio *folio_next(struct folio *folio)
|
||||
* it from being split. It is not necessary for the folio to be locked.
|
||||
* Return: The base-2 logarithm of the size of this folio.
|
||||
*/
|
||||
static inline unsigned int folio_shift(struct folio *folio)
|
||||
static inline unsigned int folio_shift(const struct folio *folio)
|
||||
{
|
||||
return PAGE_SHIFT + folio_order(folio);
|
||||
}
|
||||
@ -2158,7 +2158,7 @@ static inline unsigned int folio_shift(struct folio *folio)
|
||||
* it from being split. It is not necessary for the folio to be locked.
|
||||
* Return: The number of bytes in this folio.
|
||||
*/
|
||||
static inline size_t folio_size(struct folio *folio)
|
||||
static inline size_t folio_size(const struct folio *folio)
|
||||
{
|
||||
return PAGE_SIZE << folio_order(folio);
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
|
||||
*/
|
||||
static inline bool slab_test_pfmemalloc(const struct slab *slab)
|
||||
{
|
||||
return folio_test_active((struct folio *)slab_folio(slab));
|
||||
return folio_test_active(slab_folio(slab));
|
||||
}
|
||||
|
||||
static inline void slab_set_pfmemalloc(struct slab *slab)
|
||||
@ -211,7 +211,7 @@ static inline struct slab *virt_to_slab(const void *addr)
|
||||
|
||||
static inline int slab_order(const struct slab *slab)
|
||||
{
|
||||
return folio_order((struct folio *)slab_folio(slab));
|
||||
return folio_order(slab_folio(slab));
|
||||
}
|
||||
|
||||
static inline size_t slab_size(const struct slab *slab)
|
||||
|
@ -962,11 +962,9 @@ void print_tracking(struct kmem_cache *s, void *object)
|
||||
|
||||
static void print_slab_info(const struct slab *slab)
|
||||
{
|
||||
struct folio *folio = (struct folio *)slab_folio(slab);
|
||||
|
||||
pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
|
||||
slab, slab->objects, slab->inuse, slab->freelist,
|
||||
folio_flags(folio, 0));
|
||||
&slab->__page_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2532,7 +2530,7 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
|
||||
*/
|
||||
static inline bool slab_test_node_partial(const struct slab *slab)
|
||||
{
|
||||
return folio_test_workingset((struct folio *)slab_folio(slab));
|
||||
return folio_test_workingset(slab_folio(slab));
|
||||
}
|
||||
|
||||
static inline void slab_set_node_partial(struct slab *slab)
|
||||
|
Loading…
Reference in New Issue
Block a user