aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2025-08-05 18:22:51 +0100
committerAndrew Morton <akpm@linux-foundation.org>2025-09-13 16:55:07 -0700
commit53fbef56e07df822ea3029109ffca25328c2e5ac (patch)
tree8f68e237755cc342682e0515e5b691536859312e
parent4e915656a38afe8aeebb283493f49c22d675a9fc (diff)
mm: introduce memdesc_flags_t
Patch series "Add and use memdesc_flags_t". At some point struct page will be separated from struct slab and struct folio. This is a step towards that by introducing a type for the 'flags' word of all three structures. This gives us a certain amount of type safety by establishing that some of these unsigned longs are different from other unsigned longs in that they contain things like node ID, section number and zone number in the upper bits. That lets us have functions that can be easily called by anyone who has a slab, folio or page (but not easily by anyone else) to get the node or zone. There's going to be some unusual merge problems with this as some odd bits of the kernel decide they want to print out the flags value or something similar by writing page->flags and now they'll need to write page->flags.f instead. That's most of the churn here. Maybe we should be removing these things from the debug output? This patch (of 11): Wrap the unsigned long flags in a typedef. In upcoming patches, this will provide a strong hint that you can't just pass a random unsigned long to functions which take this as an argument. [willy@infradead.org: s/flags/flags.f/ in several architectures] Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org [nicola.vetrini@gmail.com: mips: fix compilation error] Link: https://lore.kernel.org/lkml/CA+G9fYvkpmqGr6wjBNHY=dRp71PLCoi2341JxOudi60yqaeUdg@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250825214245.1838158-1-nicola.vetrini@gmail.com Link: https://lkml.kernel.org/r/20250805172307.1302730-1-willy@infradead.org Link: https://lkml.kernel.org/r/20250805172307.1302730-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Zi Yan <ziy@nvidia.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--arch/arc/mm/cache.c8
-rw-r--r--arch/arc/mm/tlb.c2
-rw-r--r--arch/arm/include/asm/hugetlb.h2
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/flush.c10
-rw-r--r--arch/arm64/include/asm/hugetlb.h6
-rw-r--r--arch/arm64/include/asm/mte.h16
-rw-r--r--arch/arm64/mm/flush.c8
-rw-r--r--arch/csky/abiv1/cacheflush.c6
-rw-r--r--arch/mips/include/asm/cacheflush.h6
-rw-r--r--arch/nios2/mm/cacheflush.c6
-rw-r--r--arch/openrisc/include/asm/cacheflush.h2
-rw-r--r--arch/openrisc/mm/cache.c2
-rw-r--r--arch/parisc/kernel/cache.c6
-rw-r--r--arch/powerpc/include/asm/cacheflush.h4
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h4
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c4
-rw-r--r--arch/powerpc/mm/pgtable.c12
-rw-r--r--arch/riscv/include/asm/cacheflush.h4
-rw-r--r--arch/riscv/include/asm/hugetlb.h2
-rw-r--r--arch/riscv/mm/cacheflush.c4
-rw-r--r--arch/s390/include/asm/hugetlb.h2
-rw-r--r--arch/s390/kernel/uv.c12
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/sh/include/asm/hugetlb.h2
-rw-r--r--arch/sh/mm/cache-sh4.c2
-rw-r--r--arch/sh/mm/cache-sh7705.c2
-rw-r--r--arch/sh/mm/cache.c14
-rw-r--r--arch/sh/mm/kmap.c2
-rw-r--r--arch/sparc/mm/init_64.c10
-rw-r--r--arch/x86/mm/pat/memtype.c6
-rw-r--r--arch/xtensa/mm/cache.c12
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--fs/gfs2/glops.c2
-rw-r--r--fs/jffs2/file.c4
-rw-r--r--fs/nilfs2/page.c2
-rw-r--r--fs/proc/page.c4
-rw-r--r--fs/ubifs/file.c6
-rw-r--r--include/linux/mm.h32
-rw-r--r--include/linux/mm_inline.h12
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/page-flags.h40
-rw-r--r--include/linux/pgalloc_tag.h7
-rw-r--r--include/trace/events/page_ref.h4
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/mmzone.c4
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/swap.c8
-rw-r--r--mm/vmscan.c18
-rw-r--r--mm/workingset.c2
58 files changed, 195 insertions, 190 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9106ceac323c..7d2f93dc1e91 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -704,7 +704,7 @@ static inline void arc_slc_enable(void)
void flush_dcache_folio(struct folio *folio)
{
- clear_bit(PG_dc_clean, &folio->flags);
+ clear_bit(PG_dc_clean, &folio->flags.f);
return;
}
EXPORT_SYMBOL(flush_dcache_folio);
@@ -889,8 +889,8 @@ void copy_user_highpage(struct page *to, struct page *from,
copy_page(kto, kfrom);
- clear_bit(PG_dc_clean, &dst->flags);
- clear_bit(PG_dc_clean, &src->flags);
+ clear_bit(PG_dc_clean, &dst->flags.f);
+ clear_bit(PG_dc_clean, &src->flags.f);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
@@ -900,7 +900,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
{
struct folio *folio = page_folio(page);
clear_page(to);
- clear_bit(PG_dc_clean, &folio->flags);
+ clear_bit(PG_dc_clean, &folio->flags.f);
}
EXPORT_SYMBOL(clear_user_page);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index cae4a7aae0ed..ed6915ba76ec 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -488,7 +488,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
*/
if (vma->vm_flags & VM_EXEC) {
struct folio *folio = page_folio(page);
- int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
if (dirty) {
unsigned long offset = offset_in_folio(folio, paddr);
nr = folio_nr_pages(folio);
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
index b766c4b373f6..700055b1ccb3 100644
--- a/arch/arm/include/asm/hugetlb.h
+++ b/arch/arm/include/asm/hugetlb.h
@@ -17,7 +17,7 @@
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 7ddd82b9fe8b..ed843bb22020 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -67,7 +67,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
- if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index a1a71f36d850..0710dba5c0bf 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -73,7 +73,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
- if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
/* FIXME: not highmem safe */
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index f1e29d3e8193..e16af68d709f 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -87,7 +87,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
- if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 88c2d68a69c9..08641a936394 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -718,7 +718,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
if (size < sz)
break;
if (!offset)
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
offset = 0;
size -= sz;
if (!size)
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 39fd5df73317..91e488767783 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -203,7 +203,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
folio = page_folio(pfn_to_page(pfn));
mapping = folio_flush_mapping(folio);
- if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(mapping, folio);
if (mapping) {
if (cache_is_vivt())
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 5219158d54cf..19470d938b23 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -304,7 +304,7 @@ void __sync_icache_dcache(pte_t pteval)
else
mapping = NULL;
- if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(mapping, folio);
if (pte_exec(pteval))
@@ -343,8 +343,8 @@ void flush_dcache_folio(struct folio *folio)
return;
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
return;
}
@@ -352,14 +352,14 @@ void flush_dcache_folio(struct folio *folio)
if (!cache_ops_need_broadcast() &&
mapping && !folio_mapped(folio))
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
else {
__flush_dcache_folio(mapping, folio);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, folio);
else if (mapping)
__flush_icache_all();
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 2a8155c4a882..44c1f757bfcf 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
#ifdef CONFIG_ARM64_MTE
if (system_supports_mte()) {
- clear_bit(PG_mte_tagged, &folio->flags);
- clear_bit(PG_mte_lock, &folio->flags);
+ clear_bit(PG_mte_tagged, &folio->flags.f);
+ clear_bit(PG_mte_lock, &folio->flags.f);
}
#endif
}
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 6567df8ec8ca..3b5069f4683d 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -48,12 +48,12 @@ static inline void set_page_mte_tagged(struct page *page)
* before the page flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &page->flags);
+ set_bit(PG_mte_tagged, &page->flags.f);
}
static inline bool page_mte_tagged(struct page *page)
{
- bool ret = test_bit(PG_mte_tagged, &page->flags);
+ bool ret = test_bit(PG_mte_tagged, &page->flags.f);
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
@@ -82,7 +82,7 @@ static inline bool try_page_mte_tagging(struct page *page)
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
- if (!test_and_set_bit(PG_mte_lock, &page->flags))
+ if (!test_and_set_bit(PG_mte_lock, &page->flags.f))
return true;
/*
@@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
@@ -173,13 +173,13 @@ static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
* before the folio flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &folio->flags);
+ set_bit(PG_mte_tagged, &folio->flags.f);
}
static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
{
- bool ret = test_bit(PG_mte_tagged, &folio->flags);
+ bool ret = test_bit(PG_mte_tagged, &folio->flags.f);
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
@@ -196,7 +196,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
{
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
- if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+ if (!test_and_set_bit(PG_mte_lock, &folio->flags.f))
return true;
/*
@@ -204,7 +204,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 013eead9b695..fbf08b543c3f 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -53,11 +53,11 @@ void __sync_icache_dcache(pte_t pte)
{
struct folio *folio = page_folio(pte_page(pte));
- if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
sync_icache_aliases((unsigned long)folio_address(folio),
(unsigned long)folio_address(folio) +
folio_size(folio));
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
@@ -69,8 +69,8 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
*/
void flush_dcache_folio(struct folio *folio)
{
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
EXPORT_SYMBOL(flush_dcache_folio);
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c
index 171e8fb32285..4bc0aad3cf8a 100644
--- a/arch/csky/abiv1/cacheflush.c
+++ b/arch/csky/abiv1/cacheflush.c
@@ -25,12 +25,12 @@ void flush_dcache_folio(struct folio *folio)
mapping = folio_flush_mapping(folio);
if (mapping && !folio_mapped(folio))
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
else {
dcache_wbinv_all();
if (mapping)
icache_inv_all();
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
@@ -56,7 +56,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
return;
folio = page_folio(pfn_to_page(pfn));
- if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
dcache_wbinv_all();
if (folio_flush_mapping(folio)) {
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 1f14132b3fc9..5d283ef89d90 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -37,11 +37,11 @@
#define PG_dcache_dirty PG_arch_1
#define folio_test_dcache_dirty(folio) \
- test_bit(PG_dcache_dirty, &(folio)->flags)
+ test_bit(PG_dcache_dirty, &(folio)->flags.f)
#define folio_set_dcache_dirty(folio) \
- set_bit(PG_dcache_dirty, &(folio)->flags)
+ set_bit(PG_dcache_dirty, &(folio)->flags.f)
#define folio_clear_dcache_dirty(folio) \
- clear_bit(PG_dcache_dirty, &(folio)->flags)
+ clear_bit(PG_dcache_dirty, &(folio)->flags.f)
extern void (*flush_cache_all)(void);
extern void (*__flush_cache_all)(void);
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 0ee9c5f02e08..8321182eb927 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -187,7 +187,7 @@ void flush_dcache_folio(struct folio *folio)
/* Flush this page if there are aliases. */
if (mapping && !mapping_mapped(mapping)) {
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
} else {
__flush_dcache_folio(folio);
if (mapping) {
@@ -195,7 +195,7 @@ void flush_dcache_folio(struct folio *folio)
flush_aliases(mapping, folio);
flush_icache_range(start, start + folio_size(folio));
}
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
@@ -227,7 +227,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
return;
folio = page_folio(pfn_to_page(pfn));
- if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(folio);
mapping = folio_flush_mapping(folio);
diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h
index 0e60af486ec1..cd8f971c0fec 100644
--- a/arch/openrisc/include/asm/cacheflush.h
+++ b/arch/openrisc/include/asm/cacheflush.h
@@ -75,7 +75,7 @@ static inline void sync_icache_dcache(struct page *page)
static inline void flush_dcache_folio(struct folio *folio)
{
- clear_bit(PG_dc_clean, &folio->flags);
+ clear_bit(PG_dc_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index 0f265b8e73ec..f33df46dae4e 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -83,7 +83,7 @@ void update_cache(struct vm_area_struct *vma, unsigned long address,
{
unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
struct folio *folio = page_folio(pfn_to_page(pfn));
- int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
/*
* Since icaches do not snoop for updated data on OpenRISC, we
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 37ca484cc495..4c5240d3a3c7 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -122,10 +122,10 @@ void __update_cache(pte_t pte)
pfn = folio_pfn(folio);
nr = folio_nr_pages(folio);
if (folio_flush_mapping(folio) &&
- test_bit(PG_dcache_dirty, &folio->flags)) {
+ test_bit(PG_dcache_dirty, &folio->flags.f)) {
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
- clear_bit(PG_dcache_dirty, &folio->flags);
+ clear_bit(PG_dcache_dirty, &folio->flags.f);
} else if (parisc_requires_coherency())
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
@@ -481,7 +481,7 @@ void flush_dcache_folio(struct folio *folio)
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_dcache_dirty, &folio->flags);
+ set_bit(PG_dcache_dirty, &folio->flags.f);
return;
}
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index f2656774aaa9..1fea42928f64 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -40,8 +40,8 @@ static inline void flush_dcache_folio(struct folio *folio)
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index ca3829d47ab7..0953f2daa466 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -939,9 +939,9 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
/* Clear i-cache for new pages */
folio = page_folio(pfn_to_page(pfn));
- if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 4693c464fc5a..3aee3af614af 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1562,11 +1562,11 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
folio = page_folio(pte_page(pte));
/* page is dirty */
- if (!test_bit(PG_dcache_clean, &folio->flags) &&
+ if (!test_bit(PG_dcache_clean, &folio->flags.f) &&
!folio_test_reserved(folio)) {
if (trap == INTERRUPT_INST_STORAGE) {
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
} else
pp |= HPTE_R_N;
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index dfaa9fd86f7e..56d7e8960e77 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -87,9 +87,9 @@ static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
struct folio *folio = maybe_pte_to_folio(pte);
if (!folio)
return pte;
- if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
return pte;
@@ -127,13 +127,13 @@ static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
return pte;
/* If the page clean, we move on */
- if (test_bit(PG_dcache_clean, &folio->flags))
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
return pte;
/* If it's an exec fault, we flush the cache and make it clean */
if (is_exec_fault()) {
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
return pte;
}
@@ -175,12 +175,12 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
goto bail;
/* If the page is already clean, we move on */
- if (test_bit(PG_dcache_clean, &folio->