From 3d0186bb068e6cc6c23dc1d2f0b1cf64894c40ea Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Sat, 16 Jun 2018 17:32:07 -0400 Subject: Update email address Redirect some older email addresses that are in the git logs. Signed-off-by: Matthew Wilcox --- .mailmap | 7 +++++++ MAINTAINERS | 6 +++--- arch/parisc/kernel/syscall.S | 2 +- drivers/input/keyboard/hilkbd.c | 2 +- drivers/pci/hotplug/acpiphp.h | 2 +- drivers/pci/hotplug/acpiphp_core.c | 4 ++-- drivers/pci/hotplug/acpiphp_glue.c | 2 +- fs/isofs/dir.c | 2 +- include/linux/xarray.h | 2 +- 9 files changed, 18 insertions(+), 11 deletions(-) diff --git a/.mailmap b/.mailmap index 285e09645b31..89f532caf639 100644 --- a/.mailmap +++ b/.mailmap @@ -119,6 +119,13 @@ Mark Brown Mark Yao Martin Kepplinger Martin Kepplinger +Matthew Wilcox +Matthew Wilcox +Matthew Wilcox +Matthew Wilcox +Matthew Wilcox +Matthew Wilcox +Matthew Wilcox Matthieu CASTET Mauro Carvalho Chehab Mauro Carvalho Chehab diff --git a/MAINTAINERS b/MAINTAINERS index a255240d1452..612e5dbf3431 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -536,7 +536,7 @@ F: Documentation/hwmon/adt7475 F: drivers/hwmon/adt7475.c ADVANSYS SCSI DRIVER -M: Matthew Wilcox +M: Matthew Wilcox M: Hannes Reinecke L: linux-scsi@vger.kernel.org S: Maintained @@ -4364,7 +4364,7 @@ S: Maintained F: drivers/i2c/busses/i2c-diolan-u2c.c FILESYSTEM DIRECT ACCESS (DAX) -M: Matthew Wilcox +M: Matthew Wilcox M: Ross Zwisler M: Jan Kara L: linux-fsdevel@vger.kernel.org @@ -8626,7 +8626,7 @@ F: drivers/message/fusion/ F: drivers/scsi/mpt3sas/ LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers -M: Matthew Wilcox +M: Matthew Wilcox L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/sym53c8xx_2/ diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f453997a7b8f..a9bc90dc4ae7 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -2,7 +2,7 @@ * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * System call entry code / Linux gateway page - * Copyright (c) Matthew Wilcox 1999 + * Copyright (c) Matthew Wilcox 1999 * Licensed under the GNU GPL. * thanks to Philipp Rumpf, Mike Shaver and various others * sorry about the wall, puffin.. diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c index 5c7afdec192c..f5c5ae8b6c06 100644 --- a/drivers/input/keyboard/hilkbd.c +++ b/drivers/input/keyboard/hilkbd.c @@ -2,7 +2,7 @@ * linux/drivers/hil/hilkbd.c * * Copyright (C) 1998 Philip Blundell - * Copyright (C) 1999 Matthew Wilcox + * Copyright (C) 1999 Matthew Wilcox * Copyright (C) 1999-2007 Helge Deller * * Very basic HP Human Interface Loop (HIL) driver. diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index e438a2d734f2..14cef4cf592b 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -8,7 +8,7 @@ * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com) * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com) * Copyright (C) 2002,2003 NEC Corporation - * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com) + * Copyright (C) 2003-2005 Matthew Wilcox (willy@infradead.org) * Copyright (C) 2003-2005 Hewlett Packard * * All rights reserved. diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index ad32ffbc4b91..0447b169e7ff 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -8,7 +8,7 @@ * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com) * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com) * Copyright (C) 2002,2003 NEC Corporation - * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com) + * Copyright (C) 2003-2005 Matthew Wilcox (willy@infradead.org) * Copyright (C) 2003-2005 Hewlett Packard * * All rights reserved. @@ -40,7 +40,7 @@ bool acpiphp_disabled; static struct acpiphp_attention_info *attention_info; #define DRIVER_VERSION "0.5" -#define DRIVER_AUTHOR "Greg Kroah-Hartman , Takayoshi Kochi , Matthew Wilcox " +#define DRIVER_AUTHOR "Greg Kroah-Hartman , Takayoshi Kochi , Matthew Wilcox " #define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver" MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 12afa7fdf77e..e4c46637f32f 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -5,7 +5,7 @@ * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com) * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com) * Copyright (C) 2002,2003 NEC Corporation - * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com) + * Copyright (C) 2003-2005 Matthew Wilcox (willy@infradead.org) * Copyright (C) 2003-2005 Hewlett Packard * Copyright (C) 2005 Rajesh Shah (rajesh.shah@intel.com) * Copyright (C) 2005 Intel Corporation diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index 947ce22f5b3c..f0fe641893a5 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -46,7 +46,7 @@ int isofs_name_translate(struct iso_directory_record *de, char *new, struct inod return i; } -/* Acorn extensions written by Matthew Wilcox 1998 */ +/* Acorn extensions written by Matthew Wilcox 1998 */ int get_acorn_filename(struct iso_directory_record *de, char *retname, struct inode *inode) { diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 2dfc8006fe64..9e4c86853fa4 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -4,7 +4,7 @@ /* * eXtensible Arrays * Copyright (c) 2017 Microsoft Corporation - * Author: Matthew Wilcox + * Author: Matthew Wilcox */ #include -- cgit v1.2.3 From 66ee620f06f99d72475db6eb638559ba608c7dee Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 25 Jun 2018 06:56:50 -0400 Subject: idr: Permit any valid kernel pointer to be stored An upcoming change to the encoding of internal entries will set the bottom two bits to 0b10. Unfortunately, m68k only aligns some data structures to 2 bytes, so the IDR will interpret them as internal entries and things will go badly wrong. Change the radix tree so that it stops either when the node indicates that it's the bottom of the tree (shift == 0) or when the entry is not an internal entry. This means we cannot insert an arbitrary kernel pointer as a multiorder entry, but the IDR does not permit multiorder entries. Annoyingly, this means the IDR can no longer take advantage of the radix tree's ability to store a single entry at offset 0 without allocating memory. A pointer which is 2-byte aligned cannot be stored directly in the root as it would be indistinguishable from a node, so we must allocate a node in order to store a 2-byte pointer at index 0. The idr_replace() function does not take a GFP flags argument, so cannot allocate memory. If a user inserts a 4-byte aligned pointer at index 0 and then replaces it with a 2-byte aligned pointer, we must be able to store it. Arbitrary pointer values are still not permitted; pointers of the form 2 + (i * 4) for values of i between 0 and 1023 are reserved for the implementation. These are not valid kernel pointers as they would point into the zero page. This change does cause a runtime memory consumption regression for the IDA. I will recover that later. Signed-off-by: Matthew Wilcox Tested-by: Guenter Roeck --- lib/idr.c | 4 --- lib/radix-tree.c | 21 +++++++++---- tools/testing/radix-tree/idr-test.c | 63 +++++++++++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+), 10 deletions(-) diff --git a/lib/idr.c b/lib/idr.c index fab2fd5bc326..729e381e23b4 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -39,8 +39,6 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, unsigned int base = idr->idr_base; unsigned int id = *nextid; - if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) - return -EINVAL; if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR))) idr->idr_rt.gfp_mask |= IDR_RT_MARKER; @@ -295,8 +293,6 @@ void *idr_replace(struct idr *idr, void *ptr, unsigned long id) void __rcu **slot = NULL; void *entry; - if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) - return ERR_PTR(-EINVAL); id -= idr->idr_base; entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index bc03ecc4dfd2..a904a8ddd174 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -703,6 +703,14 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root, if (!radix_tree_is_internal_node(child) && node->shift) break; + /* + * For an IDR, we must not shrink entry 0 into the root in + * case somebody calls idr_replace() with a pointer that + * appears to be an internal entry + */ + if (!node->shift && is_idr(root)) + break; + if (radix_tree_is_internal_node(child)) entry_to_node(child)->parent = NULL; @@ -875,8 +883,8 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) for (;;) { void *entry = rcu_dereference_raw(child->slots[offset]); - if (radix_tree_is_internal_node(entry) && - !is_sibling_entry(child, entry)) { + if (radix_tree_is_internal_node(entry) && child->shift && + !is_sibling_entry(child, entry)) { child = entry_to_node(entry); offset = 0; continue; @@ -1049,6 +1057,8 @@ void *__radix_tree_lookup(const struct radix_tree_root *root, parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); slot = parent->slots + offset; + if (parent->shift == 0) + break; } if (nodep) @@ -1123,9 +1133,6 @@ static inline void replace_sibling_entries(struct radix_tree_node *node, static void replace_slot(void __rcu **slot, void *item, struct radix_tree_node *node, int count, int exceptional) { - if (WARN_ON_ONCE(radix_tree_is_internal_node(item))) - return; - if (node && (count || exceptional)) { node->count += count; node->exceptional += exceptional; @@ -1784,7 +1791,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, goto restart; if (child == RADIX_TREE_RETRY) break; - } while (radix_tree_is_internal_node(child)); + } while (node->shift && radix_tree_is_internal_node(child)); /* Update the iterator state */ iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); @@ -2150,6 +2157,8 @@ void __rcu **idr_get_free(struct radix_tree_root *root, shift = error; child = rcu_dereference_raw(root->rnode); } + if (start == 0 && shift == 0) + shift = RADIX_TREE_MAP_SHIFT; while (shift) { shift -= RADIX_TREE_MAP_SHIFT; diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 321ba92c70d2..f620c831a4b5 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -227,6 +227,66 @@ void idr_u32_test(int base) idr_u32_test1(&idr, 0xffffffff); } +static void idr_align_test(struct idr *idr) +{ + char name[] = "Motorola 68000"; + int i, id; + void *entry; + + for (i = 0; i < 9; i++) { + BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i); + idr_for_each_entry(idr, entry, id); + } + idr_destroy(idr); + + for (i = 1; i < 10; i++) { + BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1); + idr_for_each_entry(idr, entry, id); + } + idr_destroy(idr); + + for (i = 2; i < 11; i++) { + BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2); + idr_for_each_entry(idr, entry, id); + } + idr_destroy(idr); + + for (i = 3; i < 12; i++) { + BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3); + idr_for_each_entry(idr, entry, id); + } + idr_destroy(idr); + + for (i = 0; i < 8; i++) { + BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); + BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1); + idr_for_each_entry(idr, entry, id); + idr_remove(idr, 1); + idr_for_each_entry(idr, entry, id); + idr_remove(idr, 0); + BUG_ON(!idr_is_empty(idr)); + } + + for (i = 0; i < 8; i++) { + BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0); + idr_for_each_entry(idr, entry, id); + idr_replace(idr, &name[i], 0); + idr_for_each_entry(idr, entry, id); + BUG_ON(idr_find(idr, 0) != &name[i]); + idr_remove(idr, 0); + } + + for (i = 0; i < 8; i++) { + BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); + BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1); + idr_remove(idr, 1); + idr_for_each_entry(idr, entry, id); + idr_replace(idr, &name[i + 1], 0); + idr_for_each_entry(idr, entry, id); + idr_remove(idr, 0); + } +} + void idr_checks(void) { unsigned long i; @@ -307,6 +367,7 @@ void idr_checks(void) idr_u32_test(4); idr_u32_test(1); idr_u32_test(0); + idr_align_test(&idr); } #define module_init(x) @@ -341,6 +402,7 @@ void ida_check_nomem(void) */ void ida_check_conv_user(void) { +#if 0 DEFINE_IDA(ida); unsigned long i; @@ -358,6 +420,7 @@ void ida_check_conv_user(void) IDA_BUG_ON(&ida, id != i); } ida_destroy(&ida); +#endif } void ida_check_random(void) -- cgit v1.2.3 From 3159f943aafdbacb2f94c38fdaadabf2bbde2a14 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 3 Nov 2017 13:30:42 -0400 Subject: xarray: Replace exceptional entries Introduce xarray value entries and tagged pointers to replace radix tree exceptional entries. This is a slight change in encoding to allow the use of an extra bit (we can now store BITS_PER_LONG - 1 bits in a value entry). It is also a change in emphasis; exceptional entries are intimidating and different. As the comment explains, you can choose to store values or pointers in the xarray and they are both first-class citizens. Signed-off-by: Matthew Wilcox Reviewed-by: Josef Bacik --- arch/powerpc/include/asm/book3s/64/pgtable.h | 4 +- arch/powerpc/include/asm/nohash/64/pgtable.h | 4 +- drivers/gpu/drm/i915/i915_gem.c | 17 ++-- drivers/staging/erofs/utils.c | 18 ++--- fs/btrfs/compression.c | 2 +- fs/dax.c | 112 +++++++++++++-------------- fs/proc/task_mmu.c | 2 +- include/linux/radix-tree.h | 36 ++------- include/linux/swapops.h | 19 ++--- include/linux/xarray.h | 102 ++++++++++++++++++++++++ lib/idr.c | 60 ++++++-------- lib/radix-tree.c | 21 +++-- mm/filemap.c | 10 +-- mm/khugepaged.c | 2 +- mm/madvise.c | 2 +- mm/memcontrol.c | 2 +- mm/mincore.c | 2 +- mm/readahead.c | 2 +- mm/shmem.c | 10 +-- mm/swap.c | 2 +- mm/truncate.c | 12 +-- mm/workingset.c | 13 ++-- tools/testing/radix-tree/idr-test.c | 6 +- tools/testing/radix-tree/linux/radix-tree.h | 1 - tools/testing/radix-tree/multiorder.c | 47 ++++++----- tools/testing/radix-tree/test.c | 2 +- 26 files changed, 278 insertions(+), 232 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2fdc865ca374..62039e557ac0 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -723,9 +723,7 @@ static inline bool pte_user(pte_t pte) BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \ } while (0) -/* - * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; - */ + #define SWP_TYPE_BITS 5 #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ & ((1UL << SWP_TYPE_BITS) - 1)) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 7cd6809f4d33..05765c2d2c1f 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -313,9 +313,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define MAX_SWAPFILES_CHECK() do { \ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ } while (0) -/* - * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; - */ + #define SWP_TYPE_BITS 5 #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ & ((1UL << SWP_TYPE_BITS) - 1)) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fcc73a6ab503..316730b45f84 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5996,7 +5996,8 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj, count = __sg_page_count(sg); while (idx + count <= n) { - unsigned long exception, i; + void *entry; + unsigned long i; int ret; /* If we cannot allocate and insert this entry, or the @@ -6011,12 +6012,9 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj, if (ret && ret != -EEXIST) goto scan; - exception = - RADIX_TREE_EXCEPTIONAL_ENTRY | - idx << RADIX_TREE_EXCEPTIONAL_SHIFT; + entry = xa_mk_value(idx); for (i = 1; i < count; i++) { - ret = radix_tree_insert(&iter->radix, idx + i, - (void *)exception); + ret = radix_tree_insert(&iter->radix, idx + i, entry); if (ret && ret != -EEXIST) goto scan; } @@ -6054,15 +6052,14 @@ lookup: GEM_BUG_ON(!sg); /* If this index is in the middle of multi-page sg entry, - * the radixtree will contain an exceptional entry that points + * the radix tree will contain a value entry that points * to the start of that range. We will return the pointer to * the base page and the offset of this page within the * sg entry's range. */ *offset = 0; - if (unlikely(radix_tree_exception(sg))) { - unsigned long base = - (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT; + if (unlikely(xa_is_value(sg))) { + unsigned long base = xa_to_value(sg); sg = radix_tree_lookup(&iter->radix, base); GEM_BUG_ON(!sg); diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c index 595cf90af9bb..bdee9bd09f11 100644 --- a/drivers/staging/erofs/utils.c +++ b/drivers/staging/erofs/utils.c @@ -35,7 +35,6 @@ static atomic_long_t erofs_global_shrink_cnt; #ifdef CONFIG_EROFS_FS_ZIP -/* radix_tree and the future XArray both don't use tagptr_t yet */ struct erofs_workgroup *erofs_find_workgroup( struct super_block *sb, pgoff_t index, bool *tag) { @@ -47,9 +46,8 @@ repeat: rcu_read_lock(); grp = radix_tree_lookup(&sbi->workstn_tree, index); if (grp != NULL) { - *tag = radix_tree_exceptional_entry(grp); - grp = (void *)((unsigned long)grp & - ~RADIX_TREE_EXCEPTIONAL_ENTRY); + *tag = xa_pointer_tag(grp); + grp = xa_untag_pointer(grp); if (erofs_workgroup_get(grp, &oldcount)) { /* prefer to relax rcu read side */ @@ -83,9 +81,7 @@ int erofs_register_workgroup(struct super_block *sb, sbi = EROFS_SB(sb); erofs_workstn_lock(sbi); - if (tag) - grp = (void *)((unsigned long)grp | - 1UL << RADIX_TREE_EXCEPTIONAL_SHIFT); + grp = xa_tag_pointer(grp, tag); err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp); @@ -131,9 +127,7 @@ repeat: for (i = 0; i < found; ++i) { int cnt; - struct erofs_workgroup *grp = (void *) - ((unsigned long)batch[i] & - ~RADIX_TREE_EXCEPTIONAL_ENTRY); + struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); first_index = grp->index + 1; @@ -150,8 +144,8 @@ repeat: #endif continue; - if (radix_tree_delete(&sbi->workstn_tree, - grp->index) != grp) { + if (xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, + grp->index)) != grp) { #ifdef EROFS_FS_HAS_MANAGED_CACHE skip: erofs_workgroup_unfreeze(grp, 1); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 9bfa66592aa7..fd25e125303c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -440,7 +440,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, rcu_read_lock(); page = radix_tree_lookup(&mapping->i_pages, pg_index); rcu_read_unlock(); - if (page && !radix_tree_exceptional_entry(page)) { + if (page && !xa_is_value(page)) { misses++; if (misses > 4) break; diff --git a/fs/dax.c b/fs/dax.c index b68ce484e1be..ebcec36335eb 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -59,56 +59,57 @@ static int __init init_dax_wait_table(void) fs_initcall(init_dax_wait_table); /* - * We use lowest available bit in exceptional entry for locking, one bit for - * the entry size (PMD) and two more to tell us if the entry is a zero page or - * an empty entry that is just used for locking. In total four special bits. + * DAX pagecache entries use XArray value entries so they can't be mistaken + * for pages. We use one bit for locking, one bit for the entry size (PMD) + * and two more to tell us if the entry is a zero page or an empty entry that + * is just used for locking. In total four special bits. * * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem * block allocation. */ -#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) -#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) -#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) -#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) -#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) +#define DAX_SHIFT (4) +#define DAX_LOCKED (1UL << 0) +#define DAX_PMD (1UL << 1) +#define DAX_ZERO_PAGE (1UL << 2) +#define DAX_EMPTY (1UL << 3) static unsigned long dax_radix_pfn(void *entry) { - return (unsigned long)entry >> RADIX_DAX_SHIFT; + return xa_to_value(entry) >> DAX_SHIFT; } static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags) { - return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | - (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK); + return xa_mk_value(flags | ((unsigned long)pfn << DAX_SHIFT) | + DAX_LOCKED); } static unsigned int dax_radix_order(void *entry) { - if ((unsigned long)entry & RADIX_DAX_PMD) + if (xa_to_value(entry) & DAX_PMD) return PMD_SHIFT - PAGE_SHIFT; return 0; } static int dax_is_pmd_entry(void *entry) { - return (unsigned long)entry & RADIX_DAX_PMD; + return xa_to_value(entry) & DAX_PMD; } static int dax_is_pte_entry(void *entry) { - return !((unsigned long)entry & RADIX_DAX_PMD); + return !(xa_to_value(entry) & DAX_PMD); } static int dax_is_zero_entry(void *entry) { - return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; + return xa_to_value(entry) & DAX_ZERO_PAGE; } static int dax_is_empty_entry(void *entry) { - return (unsigned long)entry & RADIX_DAX_EMPTY; + return xa_to_value(entry) & DAX_EMPTY; } /* @@ -186,9 +187,9 @@ static void dax_wake_mapping_entry_waiter(struct address_space *mapping, */ static inline int slot_locked(struct address_space *mapping, void **slot) { - unsigned long entry = (unsigned long) - radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); - return entry & RADIX_DAX_ENTRY_LOCK; + unsigned long entry = xa_to_value( + radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock)); + return entry & DAX_LOCKED; } /* @@ -196,12 +197,11 @@ static inline int slot_locked(struct address_space *mapping, void **slot) */ static inline void *lock_slot(struct address_space *mapping, void **slot) { - unsigned long entry = (unsigned long) - radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); - - entry |= RADIX_DAX_ENTRY_LOCK; - radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); - return (void *)entry; + unsigned long v = xa_to_value( + radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock)); + void *entry = xa_mk_value(v | DAX_LOCKED); + radix_tree_replace_slot(&mapping->i_pages, slot, entry); + return entry; } /* @@ -209,17 +209,16 @@ static inline void *lock_slot(struct address_space *mapping, void **slot) */ static inline void *unlock_slot(struct address_space *mapping, void **slot) { - unsigned long entry = (unsigned long) - radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); - - entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; - radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); - return (void *)entry; + unsigned long v = xa_to_value( + radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock)); + void *entry = xa_mk_value(v & ~DAX_LOCKED); + radix_tree_replace_slot(&mapping->i_pages, slot, entry); + return entry; } /* * Lookup entry in radix tree, wait for it to become unlocked if it is - * exceptional entry and return it. The caller must call + * a DAX entry and return it. The caller must call * put_unlocked_mapping_entry() when he decided not to lock the entry or * put_locked_mapping_entry() when he locked the entry and now wants to * unlock it. @@ -242,7 +241,7 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); if (!entry || - WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || + WARN_ON_ONCE(!xa_is_value(entry)) || !slot_locked(mapping, slot)) { if (slotp) *slotp = slot; @@ -283,7 +282,7 @@ static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) xa_lock_irq(&mapping->i_pages); entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); - if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || + if (WARN_ON_ONCE(!entry || !xa_is_value(entry) || !slot_locked(mapping, slot))) { xa_unlock_irq(&mapping->i_pages); return; @@ -472,12 +471,11 @@ void dax_unlock_mapping_entry(struct page *page) } /* - * Find radix tree entry at given index. If it points to an exceptional entry, - * return it with the radix tree entry locked. If the radix tree doesn't - * contain given index, create an empty exceptional entry for the index and - * return with it locked. + * Find radix tree entry at given index. If it is a DAX entry, return it + * with the radix tree entry locked. If the radix tree doesn't contain the + * given index, create an empty entry for the index and return with it locked. * - * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will + * When requesting an entry with size DAX_PMD, grab_mapping_entry() will * either return that locked entry or will return an error. This error will * happen if there are any 4k entries within the 2MiB range that we are * requesting. @@ -507,13 +505,13 @@ restart: xa_lock_irq(&mapping->i_pages); entry = get_unlocked_mapping_entry(mapping, index, &slot); - if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { + if (WARN_ON_ONCE(entry && !xa_is_value(entry))) { entry = ERR_PTR(-EIO); goto out_unlock; } if (entry) { - if (size_flag & RADIX_DAX_PMD) { + if (size_flag & DAX_PMD) { if (dax_is_pte_entry(entry)) { put_unlocked_mapping_entry(mapping, index, entry); @@ -584,7 +582,7 @@ restart: true); } - entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); + entry = dax_radix_locked_entry(0, size_flag | DAX_EMPTY); err = __radix_tree_insert(&mapping->i_pages, index, dax_radix_order(entry), entry); @@ -673,8 +671,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping) if (index >= end) break; - if (WARN_ON_ONCE( - !radix_tree_exceptional_entry(pvec_ent))) + if (WARN_ON_ONCE(!xa_is_value(pvec_ent))) continue; xa_lock_irq(&mapping->i_pages); @@ -713,7 +710,7 @@ static int __dax_invalidate_mapping_entry(struct address_space *mapping, xa_lock_irq(pages); entry = get_unlocked_mapping_entry(mapping, index, NULL); - if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) + if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto out; if (!trunc && (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) || @@ -729,8 +726,8 @@ out: return ret; } /* - * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree - * entry to get unlocked before deleting it. + * Delete DAX entry at @index from @mapping. Wait for it + * to be unlocked before deleting it. */ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) { @@ -740,7 +737,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) * This gets called from truncate / punch_hole path. As such, the caller * must hold locks protecting against concurrent modifications of the * radix tree (usually fs-private i_mmap_sem for writing). Since the - * caller has seen exceptional entry for this index, we better find it + * caller has seen a DAX entry for this index, we better find it * at that index as well... */ WARN_ON_ONCE(!ret); @@ -748,7 +745,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) } /* - * Invalidate exceptional DAX entry if it is clean. + * Invalidate DAX entry if it is clean. */ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index) @@ -802,7 +799,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, if (dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); - if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { + if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { /* we are replacing a zero page with block mapping */ if (dax_is_pmd_entry(entry)) unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, @@ -940,13 +937,13 @@ static int dax_writeback_one(struct dax_device *dax_dev, * A page got tagged dirty in DAX mapping? Something is seriously * wrong. */ - if (WARN_ON(!radix_tree_exceptional_entry(entry))) + if (WARN_ON(!xa_is_value(entry))) return -EIO; xa_lock_irq(pages); entry2 = get_unlocked_mapping_entry(mapping, index, &slot); /* Entry got punched out / reallocated? */ - if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) + if (!entry2 || WARN_ON_ONCE(!xa_is_value(entry2))) goto put_unlocked; /* * Entry got reallocated elsewhere? No need to writeback. We have to @@ -1123,8 +1120,9 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); vm_fault_t ret; - dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, - false); + dax_insert_mapping_entry(mapping, vmf, entry, pfn, + DAX_ZERO_PAGE, false); + ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); trace_dax_load_hole(inode, vmf, ret); return ret; @@ -1514,7 +1512,7 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, pfn = page_to_pfn_t(zero_page); ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, - RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); + DAX_PMD | DAX_ZERO_PAGE, false); ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); if (!pmd_none(*(vmf->pmd))) { @@ -1597,7 +1595,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, * is already in the tree, for instance), it will return -EEXIST and * we just fall back to 4k entries. */ - entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); + entry = grab_mapping_entry(mapping, pgoff, DAX_PMD); if (IS_ERR(entry)) goto fallback; @@ -1635,7 +1633,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, goto finish_iomap; entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, - RADIX_DAX_PMD, write && !sync); + DAX_PMD, write && !sync); /* * If we are doing synchronous page fault and inode needs fsync, diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 5ea1d64cb0b4..669abb617321 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -521,7 +521,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (!page) return; - if (radix_tree_exceptional_entry(page)) + if (xa_is_value(page)) mss->swap += PAGE_SIZE; else put_page(page); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 34149e8b5f73..e9e76ab4fbbf 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -28,34 +28,26 @@ #include #include #include +#include /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: * * 00 - data pointer - * 01 - internal entry - * 10 - exceptional entry - * 11 - this bit combination is currently unused/reserved + * 10 - internal entry + * x1 - value entry * * The internal entry may be a pointer to the next level in the tree, a * sibling entry, or an indicator that the entry in this slot has been moved * to another location in the tree and the lookup should be restarted. While * NULL fits the 'data pointer' pattern, it means that there is no entry in * the tree for this index (no matter what level of the tree it is found at). - * This means that you cannot store NULL in the tree as a value for the index. + * This means that storing a NULL entry in the tree is the same as deleting + * the entry from the tree. */ #define RADIX_TREE_ENTRY_MASK 3UL -#define RADIX_TREE_INTERNAL_NODE 1UL - -/* - * Most users of the radix tree store pointers but shmem/tmpfs stores swap - * entries in the same tree. They are marked as exceptional entries to - * distinguish them from pointers to struct page. - * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. - */ -#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 -#define RADIX_TREE_EXCEPTIONAL_SHIFT 2 +#define RADIX_TREE_INTERNAL_NODE 2UL static inline bool radix_tree_is_internal_node(void *ptr) { @@ -83,11 +75,10 @@ static inline bool radix_tree_is_internal_node(void *ptr) /* * @count is the count of every non-NULL element in the ->slots array - * whether that is an exceptional entry, a retry entry, a user pointer, + * whether that is a value entry, a retry entry, a user pointer, * a sibling entry or a pointer to the next level of the tree. * @exceptional is the count of every element in ->slots which is - * either radix_tree_exceptional_entry() or is a sibling entry for an - * exceptional entry. + * either a value entry or a sibling of a value entry. */ struct radix_tree_node { unsigned char shift; /* Bits remaining in each slot */ @@ -268,17 +259,6 @@ static inline int radix_tree_deref_retry(void *arg) return unlikely(radix_tree_is_internal_node(arg)); } -/** - * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? - * @arg: value returned by radix_tree_deref_slot - * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. - */ -static inline int radix_tree_exceptional_entry(void *arg) -{ - /* Not unlikely because radix_tree_exception often tested first */ - return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; -} - /** * radix_tree_exception - radix_tree_deref_slot returned either exception? * @arg: value returned by radix_tree_deref_slot diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 22af9d8a84ae..4d961668e5fc 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -18,9 +18,8 @@ * * swp_entry_t's are *never* stored anywhere in their arch-dependent format. */ -#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ - (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) -#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) +#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) +#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) /* * Store a type+offset into a swp_entry_t in an arch-independent format @@ -29,8 +28,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) { swp_entry_t ret; - ret.val = (type << SWP_TYPE_SHIFT(ret)) | - (offset & SWP_OFFSET_MASK(ret)); + ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); return ret; } @@ -40,7 +38,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) */ static inline unsigned swp_type(swp_entry_t entry) { - return (entry.val >> SWP_TYPE_SHIFT(entry)); + return (entry.val >> SWP_TYPE_SHIFT); } /* @@ -49,7 +47,7 @@ static inline unsigned swp_type(swp_entry_t entry) */ static inline pgoff_t swp_offset(swp_entry_t entry) { - return entry.val & SWP_OFFSET_MASK(entry); + return entry.val & SWP_OFFSET_MASK; } #ifdef CONFIG_MMU @@ -90,16 +88,13 @@ static inline swp_entry_t radix_to_swp_entry(void *arg) { swp_entry_t entry; - entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; + entry.val = xa_to_value(arg); return entry; } static inline void *swp_to_radix_entry(swp_entry_t entry) { - unsigned long value; - - value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; - return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY); + return xa_mk_value(entry.val); } #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 9e4c86853fa4..5c8acfc4ff55 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -5,9 +5,111 @@ * eXtensible Arrays * Copyright (c) 2017 Microsoft Corporation * Author: Matthew Wilcox + * + * See Documentation/core-api/xarray.rst for how to use the XArray. */ +#include #include +#include + +/* + * The bottom two bits of the entry determine how the XArray interprets + * the contents: + * + * 00: Pointer entry + * 10: Internal entry + * x1: Value entry or tagged pointer + * + * Attempting to store internal entries in the XArray is a bug. + */ + +#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1) + +/** + * xa_mk_value() - Create an XArray entry from an integer. + * @v: Value to store in XArray. + * + * Context: Any context. + * Return: An entry suitable for storing in the XArray. + */ +static inline void *xa_mk_value(unsigned long v) +{ + WARN_ON((long)v < 0); + return (void *)((v << 1) | 1); +} + +/** + * xa_to_value() - Get value stored in an XArray entry. + * @entry: XArray entry. + * + * Context: Any context. + * Return: The value stored in the XArray entry. + */ +static inline unsigned long xa_to_value(const void *entry) +{ + return (unsigned long)entry >> 1; +} + +/** + * xa_is_value() - Determine if an entry is a value. + * @entry: XArray entry. + * + * Context: Any context. + * Return: True if the entry is a value, false if it is a pointer. + */ +static inline bool xa_is_value(const void *entry) +{ + return (unsigned long)entry & 1; +} + +/** + * xa_tag_pointer() - Create an XArray entry for a tagged pointer. + * @p: Plain pointer. + * @tag: Tag value (0, 1 or 3). + * + * If the user of the XArray prefers, they can tag their pointers instead + * of storing value entries. Three tags are available (0, 1 and 3). + * These are distinct from the xa_mark_t as they are not replicated up + * through the array and cannot be searched for. + * + * Context: Any context. + * Return: An XArray entry. + */ +static inline void *xa_tag_pointer(void *p, unsigned long tag) +{ + return (void *)((unsigned long)p | tag); +} + +/** + * xa_untag_pointer() - Turn an XArray entry into a plain pointer. + * @entry: XArray entry. + * + * If you have stored a tagged pointer in the XArray, call this function + * to get the untagged version of the pointer. + * + * Context: Any context. + * Return: A pointer. + */ +static inline void *xa_untag_pointer(void *entry) +{ + return (void *)((unsigned long)entry & ~3UL); +} + +/** + * xa_pointer_tag() - Get the tag stored in an XArray entry. + * @entry: XArray entry. + * + * If you have stored a tagged pointer in the XArray, call this function + * to get the tag of that pointer. + * + * Context: Any context. + * Return: A tag. + */ +static inline unsigned int xa_pointer_tag(void *entry) +{ + return (unsigned long)entry & 3UL; +} #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) #define xa_lock(xa) spin_lock(&(xa)->xa_lock) diff --git a/lib/idr.c b/lib/idr.c index 729e381e23b4..88419fbc5737 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -338,11 +338,8 @@ EXPORT_SYMBOL(idr_replace); * by the number of bits in the leaf bitmap before doing a radix tree lookup. * * As an optimisation, if there are only a few low bits set in any given - * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional - * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits - * directly in the entry. By being really tricksy, we could store - * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising - * for 0-3 allocated IDs. + * leaf, instead of allocating a 128-byte bitmap, we store the bits + * directly in the entry. * * We allow the radix tree 'exceptional' count to get out of date. Nothing * in the IDA nor the radix tree code checks it. If it becomes important @@ -366,12 +363,11 @@ static int ida_get_new_above(struct ida *ida, int start) struct radix_tree_iter iter; struct ida_bitmap *bitmap; unsigned long index; - unsigned bit, ebit; + unsigned bit; int new; index = start / IDA_BITMAP_BITS; bit = start % IDA_BITMAP_BITS; - ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; slot = radix_tree_iter_init(&iter, index); for (;;) { @@ -386,25 +382,23 @@ static int ida_get_new_above(struct ida *ida, int start) return PTR_ERR(slot); } } - if (iter.index > index) { + if (iter.index > index) bit = 0; - ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; - } new = iter.index * IDA_BITMAP_BITS; bitmap = rcu_dereference_raw(*slot); - if (radix_tree_exception(bitmap)) { - unsigned long tmp = (unsigned long)bitmap; - ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); - if (ebit < BITS_PER_LONG) { - tmp |= 1UL << ebit; - rcu_assign_pointer(*slot, (void *)tmp); - return new + ebit - - RADIX_TREE_EXCEPTIONAL_SHIFT; + if (xa_is_value(bitmap)) { + unsigned long tmp = xa_to_value(bitmap); + int vbit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, + bit); + if (vbit < BITS_PER_XA_VALUE) { + tmp |= 1UL << vbit; + rcu_assign_pointer(*slot, xa_mk_value(tmp)); + return new + vbit; } bitmap = this_cpu_xchg(ida_bitmap, NULL); if (!bitmap) return -EAGAIN; - bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; + bitmap->bitmap[0] = tmp; rcu_assign_pointer(*slot, bitmap); } @@ -425,17 +419,14 @@ static int ida_get_new_above(struct ida *ida, int start) new += bit; if (new < 0) return -ENOSPC; - if (ebit < BITS_PER_LONG) { - bitmap = (void *)((1UL << ebit) | - RADIX_TREE_EXCEPTIONAL_ENTRY); - radix_tree_iter_replace(root, &iter, slot, - bitmap); - return new; + if (bit < BITS_PER_XA_VALUE) { + bitmap = xa_mk_value(1UL << bit); + } else { + bitmap = this_cpu_xchg(ida_bitmap, NULL); + if (!bitmap) + return -EAGAIN; + __set_bit(bit, bitmap->bitmap); } - bitmap = this_cpu_xchg(ida_bitmap, NULL); - if (!bitmap) - return -EAGAIN; - __set_bit(bit, bitmap->bitmap); radix_tree_iter_replace(root, &iter, slot, bitmap); } @@ -457,9 +448,9 @@ static void ida_remove(struct ida *ida, int id) goto err; bitmap = rcu_dereference_raw(*slot); - if (radix_tree_exception(bitmap)) { + if (xa_is_value(bitmap)) { btmp = (unsigned long *)slot; - offset += RADIX_TREE_EXCEPTIONAL_SHIFT; + offset += 1; /* Intimate knowledge of the value encoding */ if (offset >= BITS_PER_LONG) goto err; } else { @@ -470,9 +461,8 @@ static void ida_remove(struct ida *ida, int id) __clear_bit(offset, btmp); radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); - if (radix_tree_exception(bitmap)) { - if (rcu_dereference_raw(*slot) == - (void *)RADIX_TREE_EXCEPTIONAL_ENTRY) + if (xa_is_value(bitmap)) { + if (xa_to_value(rcu_dereference_raw(*slot)) == 0) radix_tree_iter_delete(&ida->ida_rt, &iter, slot); } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { kfree(bitmap); @@ -503,7 +493,7 @@ void ida_destroy(struct ida *ida) xa_lock_irqsave(&ida->ida_rt, flags); radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); - if (!radix_tree_exception(bitmap)) + if (!xa_is_value(bitmap)) kfree(bitmap); radix_tree_iter_delete(&ida->ida_rt, &iter, slot); } diff --git a/lib/radix-tree.c b/lib/radix-tree.c index a904a8ddd174..b6c0e7f3a894 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -340,14 +340,12 @@ static void dump_ida_node(void *entry, unsigned long index) for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) dump_ida_node(node->slots[i], index | (i << node->shift)); - } else if (radix_tree_exceptional_entry(entry)) { + } else if (xa_is_value(entry)) { pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n", entry, (int)(index & RADIX_TREE_MAP_MASK), index * IDA_BITMAP_BITS, - index * IDA_BITMAP_BITS + BITS_PER_LONG - - RADIX_TREE_EXCEPTIONAL_SHIFT, - (unsigned long)entry >> - RADIX_TREE_EXCEPTIONAL_SHIFT); + index * IDA_BITMAP_BITS + BITS_PER_XA_VALUE, + xa_to_value(entry)); } else { struct ida_bitmap *bitmap = entry; @@ -656,7 +654,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, BUG_ON(shift > BITS_PER_LONG); if (radix_tree_is_internal_node(entry)) { entry_to_node(entry)->parent = node; - } else if (radix_tree_exceptional_entry(entry)) { + } else if (xa_is_value(entry)) { /* Moving an exceptional root->rnode to a node */ node->exceptional = 1; } @@ -955,12 +953,12 @@ static inline int insert_entries(struct radix_tree_node *node, !is_sibling_entry(node, old) && (old != RADIX_TREE_RETRY)) radix_tree_free_nodes(old); - if (radix_tree_exceptional_entry(old)) + if (xa_is_value(old)) node->exceptional--; } if (node) { node->count += n; - if (radix_tree_exceptional_entry(item)) + if (xa_is_value(item)) node->exceptional += n; } return n; @@ -974,7 +972,7 @@ static inline int insert_entries(struct radix_tree_node *node, rcu_assign_pointer(*slot, item); if (node) { node->count++; - if (radix_tree_exceptional_entry(item)) + if (xa_is_value(item)) node->exceptional++; } return 1; @@ -1190,8 +1188,7 @@ void __radix_tree_replace(struct radix_tree_root *root, radix_tree_update_node_t update_node) { void *old = rcu_dereference_raw(*slot); - int exceptional = !!radix_tree_exceptional_entry(item) - - !!radix_tree_exceptional_entry(old); + int exceptional = !!xa_is_value(item) - !!xa_is_value(old); int count = calculate_count(root, node, slot, item, old); /* @@ -1992,7 +1989,7 @@ static bool __radix_tree_delete(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot) { void *old = rcu_dereference_raw(*slot); - int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; + int exceptional = xa_is_value(old) ? -1 : 0; unsigned offset = get_slot_offset(node, slot); int tag; diff --git a/mm/filemap.c b/mm/filemap.c index 52517f28e6f4..4de14e75c4ec 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -127,7 +127,7 @@ static int page_cache_tree_insert(struct address_space *mapping, p = radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); - if (!radix_tree_exceptional_entry(p)) + if (!xa_is_value(p)) return -EEXIST; mapping->nrexceptional--; @@ -336,7 +336,7 @@ page_cache_tree_delete_batch(struct address_space *mapping, break; page = radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); - if (radix_tree_exceptional_entry(page)) + if (xa_is_value(page)) continue; if (!tail_pages) { /* @@ -1355,7 +1355,7 @@ pgoff_t page_cache_next_hole(struct address_space *mapping, struct page *page; page = radix_tree_lookup(&mapping->i_pages, index); - if (!page || radix_tree_exceptional_entry(page)) + if (!page || xa_is_value(page)) break; index++; if (index == 0) @@ -1396,7 +1396,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping, struct page *page; page = radix_tree_lookup(&mapping->i_pages, index); - if (!page || radix_tree_exceptional_entry(page)) + if (!page || xa_is_value(page)) break; index--; if (index == ULONG_MAX) @@ -1539,7 +1539,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, repeat: page = find_get_entry(mapping, offset); - if (radix_tree_exceptional_entry(page)) + if (xa_is_value(page)) page = NULL; if (!page) goto no_page; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a31d740e6cd1..4820e4adf853 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1369,7 +1369,7 @@ static void collapse_shmem(struct mm_struct *mm, page = radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); - if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) { + if (xa_is_value(page) || !PageUptodate(page)) { xa_unlock_irq(&mapping->i_pages); /* swap in or instantiate fallocated page */ if (shmem_getpage(mapping->host, index, &page, diff --git a/mm/madvise.c b/mm/madvise.c index 972a9eaa898b..9d802566c494 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -251,7 +251,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; page = find_get_entry(mapping, index); - if (!radix_tree_exceptional_entry(page)) { + if (!xa_is_value(page)) { if (page) put_page(page); continue; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e79cb59552d9..29d9d1a69b36 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4750,7 +4750,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, /* shmem/tmpfs may report page out on swap: account for that too. */ if (shmem_mapping(mapping)) { page = find_get_entry(mapping, pgoff); - if (radix_tree_exceptional_entry(page)) { + if (xa_is_value(page)) { swp_entry_t swp = radix_to_swp_entry(page); if (do_memsw_account()) *entry = swp; diff --git a/mm/mincore.c b/mm/mincore.c index fc37afe226e6..4985965aa20a 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -66,7 +66,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) * shmem/tmpfs may return swap: account for swapcache * page too. */ - if (radix_tree_exceptional_entry(page)) { + if (xa_is_value(page)) { swp_entry_t swp = radix_to_swp_entry(page); page = find_get_page(swap_address_space(swp), swp_offset(swp)); diff --git a/mm/readahead.c b/mm/readahead.c index 4e630143a0ba..de657077d41d 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -179,7 +179,7 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping, rcu_read_lock(); page = radix_tree_lookup(&mapping->i_pages, page_offset); rcu_read_unlock(); - if (page && !radix_tree_exceptional_entry(page)) { + if (page && !xa_is_value(page)) { /* * Page already present? Kick off the current batch of * contiguous pages before continuing with the next diff --git a/mm/shmem.c b/mm/shmem.c index 446942677cd4..c1062760fe41 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -709,7 +709,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, continue; } - if (radix_tree_exceptional_entry(page)) + if (xa_is_value(page)) swapped++; if (need_resched()) { @@ -824,7 +824,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, if (index >= end) break; - if (radix_tree_exceptional_entry(page)) { + if (xa_is_value(page)) { if (unfalloc) continue; nr_swaps_freed += !shmem_free_swap(mapping, @@ -921,7 +921,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, if (index >= end) break; - if (radix_tree_exceptional_entry(page)) { + if (xa_is_value(page)) { if (unfalloc) continue; if (shmem_free_swap(mapping, index, page)) { @@ -1643,7 +1643,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, repeat: swap.val = 0; page = find_lock_entry(mapping, index); - if (radix_tree_exceptional_entry(page)) { + if (xa_is_value(page)) { swap = radix_to_swp_entry(page); page = NULL; } @@ -2578,7 +2578,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping, index = indices[i]; } page = pvec.pages[i]; - if (page && !radix_tree_exceptional_entry(page)) { + if (page && !xa_is_value(page)) { if (!PageUptodate(page)) page = NULL; } diff --git a/mm/swap.c b/mm/swap.c index 26fc9b5f1b6c..4c5c7fcc6e46 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -965,7 +965,7 @@ void pagevec_remove_exceptionals(struct pagevec *pvec) for (i = 0, j = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; - if (!radix_tree_exceptional_entry(page)) + if (!xa_is_value(page)) pvec->pages[j++] = page; } pvec->nr = j; diff --git a/mm/truncate.c b/mm/truncate.c index 1d2fb2dca96f..ed778555c9f3 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -70,7 +70,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, return; for (j = 0; j < pagevec_count(pvec); j++) - if (radix_tree_exceptional_entry(pvec->pages[j])) + if (xa_is_value(pvec->pages[j])) break; if (j == pagevec_count(pvec)) @@ -85,7 +85,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, struct page *page = pvec->pages[i]; pgoff_t index = indices[i]; - if (!radix_tree_exceptional_entry(page)) { + if (!xa_is_value(page)) { pvec->pages[j++] = page; continue; } @@ -347,7 +347,7 @@ void truncate_inode_pages_range(struct address_space *mapping, if (index >= end) break; - if (radix_tree_exceptional_entry(page)) + if (xa_is_value(page)) continue; if (!trylock_page(page)) @@ -442,7 +442,7 @@ void truncate_inode_pages_range(struct address_space *mapping, break; } - if (radix_tree_exceptional_entry(page)) + if (xa_is_value(page)) continue; lock_page(page); @@ -561,7 +561,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, if (index > end) break; - if (radix_tree_exceptional_entry(page)) { + if (xa_is_value(page)) { invalidate_exceptional_entry(mapping, index, page); continue; @@ -692,7 +692,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, if (index > end) break; - if (radix_tree_exceptional_entry(page)) { + if (xa_is_value(page)) { if (!invalidate_exceptional_entry2(mapping, index, page)) ret = -EBUSY; diff --git a/mm/workingset.c b/mm/workingset.c index 4516dd790129..bb109b3afac2 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -155,8 +155,8 @@ * refault distance will immediately activate the refaulting page. */ -#define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \ - NODES_SHIFT + \ +#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ + NODES_SHIFT + \ MEM_CGROUP_ID_SHIFT) #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) @@ -173,20 +173,19 @@ static unsigned int bucket_order __read_mostly; static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction) { eviction >>= bucket_order; + eviction &= EVICTION_MASK; eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; eviction = (eviction << NODES_SHIFT) | pgdat->node_id; - eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); - return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); + return xa_mk_value(eviction); } static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, unsigned long *evictionp) { - unsigned long entry = (unsigned long)shadow; + unsigned long entry = xa_to_value(shadow); int memcgid, nid; - entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; nid = entry & ((1UL << NODES_SHIFT) - 1); entry >>= NODES_SHIFT; memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); @@ -453,7 +452,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, goto out_invalid; for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { if (node->slots[i]) { - if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i]))) + if (WARN_ON_ONCE(!xa_is_value(node->slots[i]))) goto out_invalid; if (WARN_ON_ONCE(!node->exceptional)) goto out_invalid; diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index f620c831a4b5..a5a4494922a6 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -19,7 +19,7 @@ #include "test.h" -#define DUMMY_PTR ((void *)0x12) +#define DUMMY_PTR ((void *)0x10) int item_idr_free(int id, void *p, void *data) { @@ -411,11 +411,11 @@ void ida_check_conv_user(void) int id = ida_alloc(&ida, GFP_NOWAIT); if (id == -ENOMEM) { IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) != - BITS_PER_LONG - 2); + BITS_PER_XA_VALUE); id = ida_alloc(&ida, GFP_KERNEL); } else { IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) == - BITS_PER_LONG - 2); + BITS_PER_XA_VALUE); } IDA_BUG_ON(&ida, id != i); } diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h index 24f13d27a8da..d1635a5bef02 100644 --- a/tools/testing/radix-tree/linux/radix-tree.h +++ b/tools/testing/radix-tree/linux/radix-tree.h @@ -2,7 +2,6 @@ #ifndef _TEST_RADIX_TREE_H #define _TEST_RADIX_TREE_H -#include "generated/map-shift.h" #include "../../../../include/linux/radix-tree.h" extern int kmalloc_verbose; diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 7bf405638b0b..2b4f4dba1882 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -39,12 +39,11 @@ static void __multiorder_tag_test(int index, int order) /* * Verify we get collisions for covered indices. We try and fail to - * insert an exceptional entry so we don't leak memory via + * insert a value entry so we don't leak memory via * item_insert_order(). */ for_each_index(i, base, order) { - err = __radix_tree_insert(&tree, i, order, - (void *)(0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY)); + err = __radix_tree_insert(&tree, i, order, xa_mk_value(0xA0)); assert(err == -EEXIST); } @@ -380,8 +379,8 @@ static void multiorder_join1(unsigned long index, } /* - * Check that the accounting of exceptional entries is handled correctly - * by joining an exceptional entry to a normal pointer. + * Check that the accounting of value entries is handled correctly + * by joining a value entry to a normal pointer. */ static void multiorder_join2(unsigned order1, unsigned order2) { @@ -391,9 +390,9 @@ static void multiorder_join2(unsigned order1, unsigned order2) void *item2; item_insert_order(&tree, 0, order2); - radix_tree_insert(&tree, 1 << order2, (void *)0x12UL); + radix_tree_insert(&tree, 1 << order2, xa_mk_value(5)); item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL); - assert(item2 == (void *)0x12UL); + assert(item2 == xa_mk_value(5)); assert(node->exceptional == 1); item2 = radix_tree_lookup(&tree, 0); @@ -407,7 +406,7 @@ static void multiorder_join2(unsigned order1, unsigned order2) } /* - * This test revealed an accounting bug for exceptional entries at one point. + * This test revealed an accounting bug for value entries at one point. * Nodes were being freed back into the pool with an elevated exception count * by radix_tree_join() and then radix_tree_split() was failing to zero the * count of exceptional entries. @@ -421,16 +420,16 @@ static void multiorder_join3(unsigned int order) unsigned long i; for (i = 0; i < (1 << order); i++) { - radix_tree_insert(&tree, i, (void *)0x12UL); + radix_tree_insert(&tree, i, xa_mk_value(5)); } - radix_tree_join(&tree, 0, order, (void *)0x16UL); + radix_tree_join(&tree, 0, order, xa_mk_value(7)); rcu_barrier(); radix_tree_split(&tree, 0, 0); radix_tree_for_each_slot(slot, &tree, &iter, 0) { - radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL); + radix_tree_iter_replace(&tree, &iter, slot, xa_mk_value(5)); } __radix_tree_lookup(&tree, 0, &node, NULL); @@ -517,10 +516,10 @@ static void __multiorder_split2(int old_order, int new_order) struct radix_tree_node *node; void *item; - __radix_tree_insert(&tree, 0, old_order, (void *)0x12); + __radix_tree_insert(&tree, 0, old_order, xa_mk_value(5)); item = __radix_tree_lookup(&tree, 0, &node, NULL); - assert(item == (void *)0x12); + assert(item == xa_mk_value(5)); assert(node->exceptional > 0); radix_tree_split(&tree, 0, new_order); @@ -530,7 +529,7 @@ static void __multiorder_split2(int old_order, int new_order) } item = __radix_tree_lookup(&tree, 0, &node, NULL); - assert(item != (void *)0x12); + assert(item != xa_mk_value(5)); assert(node->exceptional == 0); item_kill_tree(&tree); @@ -544,40 +543,40 @@ static void __multiorder_split3(int old_order, int new_order) struct radix_tree_node *node; void *item; - __radix_tree_insert(&tree, 0, old_order, (void *)0x12); + __radix_tree_insert(&tree, 0, old_order, xa_mk_value(5)); item = __radix_tree_lookup(&tree, 0, &node, NULL); - assert(item == (void *)0x12); + assert(item == xa_mk_value(5)); assert(node->exceptional > 0); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { - radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16); + radix_tree_iter_replace(&tree, &iter, slot, xa_mk_value(7)); } item = __radix_tree_lookup(&tree, 0, &node, NULL); - assert(item == (void *)0x16); + assert(item == xa_mk_value(7)); assert(node->exceptional > 0); item_kill_tree(&tree); - __radix_tree_insert(&tree, 0, old_order, (void *)0x12); + __radix_tree_insert(&tree, 0, old_order, xa_mk_value(5)); item = __radix_tree_lookup(&tree, 0, &node, NULL); - assert(item == (void *)0x12); + assert(item == xa_mk_value(5)); assert(node->exceptional > 0); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { if (iter.index == (1 << new_order)) radix_tree_iter_replace(&tree, &iter, slot, - (void *)0x16); + xa_mk_value(7)); else radix_tree_iter_replace(&tree, &iter, slot, NULL); } item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL); - assert(item == (void *)0x16); + assert(item == xa_mk_value(7)); assert(node->count == node->exceptional); do { node = node->parent; @@ -610,13 +609,13 @@ static void multiorder_account(void) item_insert_order(&tree, 0, 5); - __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12); + __radix_tree_insert(&tree, 1 << 5, 5, xa_mk_value(5)); __radix_tree_lookup(&tree, 0, &node, NULL); assert(node->count == node->exceptional * 2); radix_tree_delete(&tree, 1 << 5); assert(node->exceptional == 0); - __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12); + __radix_tree_insert(&tree, 1 << 5, 5, xa_mk_value(5)); __radix_tree_lookup(&tree, 1 << 5, &node, &slot); assert(node->count == node->exceptional * 2); __radix_tree_replace(&tree, node, slot, NULL, NULL); diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index def6015570b2..62de66c314b7 100644 --- a/tools/te