aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-14 12:20:00 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-14 12:20:00 -0800
commit48ea09cddae0b794cde2070f106ef676703dbcd3 (patch)
treed4b76e71ee11468d8a83c852ebdf1405fae73927 /lib
parentad76bf1ff18e059d64b70047940d298641d4cc2f (diff)
parentd272e01fa0a2f15c5c331a37cd99c6875c7b7186 (diff)
Merge tag 'hardening-v6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull kernel hardening updates from Kees Cook: - Convert flexible array members, fix -Wstringop-overflow warnings, and fix KCFI function type mismatches that went ignored by maintainers (Gustavo A. R. Silva, Nathan Chancellor, Kees Cook) - Remove the remaining side-effect users of ksize() by converting dma-buf, btrfs, and coredump to using kmalloc_size_roundup(), add more __alloc_size attributes, and introduce full testing of all allocator functions. Finally remove the ksize() side-effect so that each allocation-aware checker can finally behave without exceptions - Introduce oops_limit (default 10,000) and warn_limit (default off) to provide greater granularity of control for panic_on_oops and panic_on_warn (Jann Horn, Kees Cook) - Introduce overflows_type() and castable_to_type() helpers for cleaner overflow checking - Improve code generation for strscpy() and update str*() kern-doc - Convert strscpy and sigphash tests to KUnit, and expand memcpy tests - Always use a non-NULL argument for prepare_kernel_cred() - Disable structleak plugin in FORTIFY KUnit test (Anders Roxell) - Adjust orphan linker section checking to respect CONFIG_WERROR (Xin Li) - Make sure siginfo is cleared for forced SIGKILL (haifeng.xu) - Fix um vs FORTIFY warnings for always-NULL arguments * tag 'hardening-v6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (31 commits) ksmbd: replace one-element arrays with flexible-array members hpet: Replace one-element array with flexible-array member um: virt-pci: Avoid GCC non-NULL warning signal: Initialize the info in ksignal lib: fortify_kunit: build without structleak plugin panic: Expose "warn_count" to sysfs panic: Introduce warn_limit panic: Consolidate open-coded panic_on_warn checks exit: Allow oops_limit to be disabled exit: Expose "oops_count" to sysfs exit: Put an upper limit on how often we can oops panic: Separate sysctl logic from CONFIG_SMP mm/pgtable: Fix multiple -Wstringop-overflow warnings mm: Make ksize() a reporting-only function kunit/fortify: Validate __alloc_size attribute results drm/sti: Fix return type of sti_{dvo,hda,hdmi}_connector_mode_valid() drm/fsl-dcu: Fix return type of fsl_dcu_drm_connector_mode_valid() driver core: Add __alloc_size hint to devm allocators overflow: Introduce overflows_type() and castable_to_type() coredump: Proactively round up to kmalloc bucket size ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug28
-rw-r--r--lib/Makefile7
-rw-r--r--lib/fortify_kunit.c255
-rw-r--r--lib/memcpy_kunit.c205
-rw-r--r--lib/overflow_kunit.c381
-rw-r--r--lib/siphash_kunit.c (renamed from lib/test_siphash.c)165
-rw-r--r--lib/string.c82
-rw-r--r--lib/strscpy_kunit.c142
-rw-r--r--lib/test_strscpy.c150
-rw-r--r--lib/ubsan.c3
10 files changed, 1075 insertions, 343 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1b2bdc02abf4..6831b798152d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2234,9 +2234,6 @@ config STRING_SELFTEST
config TEST_STRING_HELPERS
tristate "Test functions located in the string_helpers module at runtime"
-config TEST_STRSCPY
- tristate "Test strscpy*() family of functions at runtime"
-
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
@@ -2271,15 +2268,6 @@ config TEST_RHASHTABLE
If unsure, say N.
-config TEST_SIPHASH
- tristate "Perform selftest on siphash functions"
- help
- Enable this option to test the kernel's siphash (<linux/siphash.h>) hash
- functions on boot (or module load).
-
- This is intended to help people writing architecture-specific
- optimized versions. If unsure, say N.
-
config TEST_IDA
tristate "Perform selftest on IDA functions"
@@ -2607,6 +2595,22 @@ config HW_BREAKPOINT_KUNIT_TEST
If unsure, say N.
+config STRSCPY_KUNIT_TEST
+ tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+
+config SIPHASH_KUNIT_TEST
+ tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the kernel's siphash (<linux/siphash.h>) hash
+ functions on boot (or module load).
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
diff --git a/lib/Makefile b/lib/Makefile
index 59bd7c2f793a..4d9461bfea42 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -62,7 +62,6 @@ obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
CFLAGS_test_bitops.o += -Werror
obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
-obj-$(CONFIG_TEST_SIPHASH) += test_siphash.o
obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
@@ -82,7 +81,6 @@ obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_SCANF) += test_scanf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
-obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
@@ -377,10 +375,15 @@ obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
+CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o
+obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 409af07f340a..c8c33cbaae9e 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -16,7 +16,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
@@ -60,9 +63,261 @@ static void control_flow_split_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
}
+#define KUNIT_EXPECT_BOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bos on " name "\n")
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ /* Silence "unused variable 'expected'" warning. */ \
+ KUNIT_EXPECT_EQ(test, expected, expected)
+#else
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bdos on " name "\n")
+#endif
+
+/* If the execpted size is a constant value, __bos can see it. */
+#define check_const(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* If the execpted size is NOT a constant value, __bos CANNOT see it. */
+#define check_dynamic(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* Assortment of constant-value kinda-edge cases. */
+#define CONST_TEST_BODY(TEST_alloc) do { \
+ /* Special-case vmalloc()-family to skip 0-sized allocs. */ \
+ if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
+ TEST_alloc(check_const, 0, 0); \
+ TEST_alloc(check_const, 1, 1); \
+ TEST_alloc(check_const, 128, 128); \
+ TEST_alloc(check_const, 1023, 1023); \
+ TEST_alloc(check_const, 1025, 1025); \
+ TEST_alloc(check_const, 4096, 4096); \
+ TEST_alloc(check_const, 4097, 4097); \
+} while (0)
+
+static volatile size_t zero_size;
+static volatile size_t unknown_size = 50;
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define DYNAMIC_TEST_BODY(TEST_alloc) \
+ kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
+#else
+#define DYNAMIC_TEST_BODY(TEST_alloc) do { \
+ size_t size = unknown_size; \
+ \
+ /* \
+ * Expected size is "size" in each test, before it is then \
+ * internally incremented in each test. Requires we disable \
+ * -Wunsequenced. \
+ */ \
+ TEST_alloc(check_dynamic, size, size++); \
+ /* Make sure incrementing actually happened. */ \
+ KUNIT_EXPECT_NE(test, size, unknown_size); \
+} while (0)
+#endif
+
+#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
+static void alloc_size_##allocator##_const_test(struct kunit *test) \
+{ \
+ CONST_TEST_BODY(TEST_##allocator); \
+} \
+static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
+{ \
+ DYNAMIC_TEST_BODY(TEST_##allocator); \
+}
+
+#define TEST_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ void *orig; \
+ size_t len; \
+ \
+ checker(expected_size, kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kzalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, __kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc(orig, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
+ kfree(p)); \
+ \
+ len = 11; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
+
+/* Sizes are in pages, not bytes. */
+#define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ checker((expected_pages) * PAGE_SIZE, \
+ vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
+
+/* Sizes are in pages (and open-coded for side-effects), not bytes. */
+#define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ size_t prev_size; \
+ void *orig; \
+ \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ vfree(p)); \
+ \
+ prev_size = (expected_pages) * PAGE_SIZE; \
+ orig = kvmalloc(prev_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker(((expected_pages) * PAGE_SIZE) * 2, \
+ kvrealloc(orig, prev_size, \
+ ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
+ kvfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
+
+#define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ const char dev_name[] = "fortify-test"; \
+ struct device *dev; \
+ void *orig; \
+ size_t len; \
+ \
+ /* Create dummy device for devm_kmalloc()-family tests. */ \
+ dev = root_device_register(dev_name); \
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
+ "Cannot register test device\n"); \
+ \
+ checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ orig = devm_kmalloc(dev, alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ len = 4; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ device_unregister(dev); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+
static struct kunit_case fortify_test_cases[] = {
KUNIT_CASE(known_sizes_test),
KUNIT_CASE(control_flow_split_test),
+ KUNIT_CASE(alloc_size_kmalloc_const_test),
+ KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_vmalloc_const_test),
+ KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_kvmalloc_const_test),
+ KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
+ KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
{}
};
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 7513e6d5dc90..89128551448d 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -292,6 +292,208 @@ static void memset_test(struct kunit *test)
#undef TEST_OP
}
+static u8 large_src[1024];
+static u8 large_dst[2048];
+static const u8 large_zero[2048];
+
+static void set_random_nonzero(struct kunit *test, u8 *byte)
+{
+ int failed_rng = 0;
+
+ while (*byte == 0) {
+ get_random_bytes(byte, 1);
+ KUNIT_ASSERT_LT_MSG(test, failed_rng++, 100,
+ "Is the RNG broken?");
+ }
+}
+
+static void init_large(struct kunit *test)
+{
+
+ /* Get many bit patterns. */
+ get_random_bytes(large_src, ARRAY_SIZE(large_src));
+
+ /* Make sure we have non-zero edges. */
+ set_random_nonzero(test, &large_src[0]);
+ set_random_nonzero(test, &large_src[ARRAY_SIZE(large_src) - 1]);
+
+ /* Explicitly zero the entire destination. */
+ memset(large_dst, 0, ARRAY_SIZE(large_dst));
+}
+
+/*
+ * Instead of an indirect function call for "copy" or a giant macro,
+ * use a bool to pick memcpy or memmove.
+ */
+static void copy_large_test(struct kunit *test, bool use_memmove)
+{
+ init_large(test);
+
+ /* Copy a growing number of non-overlapping bytes ... */
+ for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
+ /* Over a shifting destination window ... */
+ for (int offset = 0; offset < ARRAY_SIZE(large_src); offset++) {
+ int right_zero_pos = offset + bytes;
+ int right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ /* Copy! */
+ if (use_memmove)
+ memmove(large_dst + offset, large_src, bytes);
+ else
+ memcpy(large_dst + offset, large_src, bytes);
+
+ /* Did we touch anything before the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst, large_zero, offset), 0,
+ "with size %d at offset %d", bytes, offset);
+ /* Did we touch anything after the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Are we byte-for-byte exact across the copy? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst + offset, large_src, bytes), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Zero out what we copied for the next cycle. */
+ memset(large_dst + offset, 0, bytes);
+ }
+ /* Avoid stall warnings if this loop gets slow. */
+ cond_resched();
+ }
+}
+
+static void memcpy_large_test(struct kunit *test)
+{
+ copy_large_test(test, false);
+}
+
+static void memmove_large_test(struct kunit *test)
+{
+ copy_large_test(test, true);
+}
+
+/*
+ * On the assumption that boundary conditions are going to be the most
+ * sensitive, instead of taking a full step (inc) each iteration,
+ * take single index steps for at least the first "inc"-many indexes
+ * from the "start" and at least the last "inc"-many indexes before
+ * the "end". When in the middle, take full "inc"-wide steps. For
+ * example, calling next_step(idx, 1, 15, 3) with idx starting at 0
+ * would see the following pattern: 1 2 3 4 7 10 11 12 13 14 15.
+ */
+static int next_step(int idx, int start, int end, int inc)
+{
+ start += inc;
+ end -= inc;
+
+ if (idx < start || idx + inc > end)
+ inc = 1;
+ return idx + inc;
+}
+
+static void inner_loop(struct kunit *test, int bytes, int d_off, int s_off)
+{
+ int left_zero_pos, left_zero_size;
+ int right_zero_pos, right_zero_size;
+ int src_pos, src_orig_pos, src_size;
+ int pos;
+
+ /* Place the source in the destination buffer. */
+ memcpy(&large_dst[s_off], large_src, bytes);
+
+ /* Copy to destination offset. */
+ memmove(&large_dst[d_off], &large_dst[s_off], bytes);
+
+ /* Make sure destination entirely matches. */
+ KUNIT_ASSERT_EQ_MSG(test, memcmp(&large_dst[d_off], large_src, bytes), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Calculate the expected zero spans. */
+ if (s_off < d_off) {
+ left_zero_pos = 0;
+ left_zero_size = s_off;
+
+ right_zero_pos = d_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = s_off;
+ src_orig_pos = 0;
+ src_size = d_off - s_off;
+ } else {
+ left_zero_pos = 0;
+ left_zero_size = d_off;
+
+ right_zero_pos = s_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = d_off + bytes;
+ src_orig_pos = src_pos - s_off;
+ src_size = right_zero_pos - src_pos;
+ }
+
+ /* Check non-overlapping source is unchanged.*/
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[src_pos], &large_src[src_orig_pos], src_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Check leading buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[left_zero_pos], large_zero, left_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+ /* Check trailing buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Zero out everything not already zeroed.*/
+ pos = left_zero_pos + left_zero_size;
+ memset(&large_dst[pos], 0, right_zero_pos - pos);
+}
+
+static void memmove_overlap_test(struct kunit *test)
+{
+ /*
+ * Running all possible offset and overlap combinations takes a
+ * very long time. Instead, only check up to 128 bytes offset
+ * into the destination buffer (which should result in crossing
+ * cachelines), with a step size of 1 through 7 to try to skip some
+ * redundancy.
+ */
+ static const int offset_max = 128; /* less than ARRAY_SIZE(large_src); */
+ static const int bytes_step = 7;
+ static const int window_step = 7;
+
+ static const int bytes_start = 1;
+ static const int bytes_end = ARRAY_SIZE(large_src) + 1;
+
+ init_large(test);
+
+ /* Copy a growing number of overlapping bytes ... */
+ for (int bytes = bytes_start; bytes < bytes_end;
+ bytes = next_step(bytes, bytes_start, bytes_end, bytes_step)) {
+
+ /* Over a shifting destination window ... */
+ for (int d_off = 0; d_off < offset_max; d_off++) {
+ int s_start = max(d_off - bytes, 0);
+ int s_end = min_t(int, d_off + bytes, ARRAY_SIZE(large_src));
+
+ /* Over a shifting source window ... */
+ for (int s_off = s_start; s_off < s_end;
+ s_off = next_step(s_off, s_start, s_end, window_step))
+ inner_loop(test, bytes, d_off, s_off);
+
+ /* Avoid stall warnings. */
+ cond_resched();
+ }
+ }
+}
+
static void strtomem_test(struct kunit *test)
{
static const char input[sizeof(unsigned long)] = "hi";
@@ -347,7 +549,10 @@ static void strtomem_test(struct kunit *test)
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
+ KUNIT_CASE(memcpy_large_test),
KUNIT_CASE(memmove_test),
+ KUNIT_CASE(memmove_large_test),
+ KUNIT_CASE(memmove_overlap_test),
KUNIT_CASE(strtomem_test),
{}
};
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index b8556a2e7bb1..dcd3ba102db6 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -736,6 +736,384 @@ static void overflow_size_helpers_test(struct kunit *test)
#undef check_one_size_helper
}
+static void overflows_type_test(struct kunit *test)
+{
+ int count = 0;
+ unsigned int var;
+
+#define __TEST_OVERFLOWS_TYPE(func, arg1, arg2, of) do { \
+ bool __of = func(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected " #func "(" #arg1 ", " #arg2 " to%s overflow\n",\
+ of ? "" : " not"); \
+ count++; \
+} while (0)
+
+/* Args are: first type, second type, value, overflow expected */
+#define TEST_OVERFLOWS_TYPE(__t1, __t2, v, of) do { \
+ __t1 t1 = (v); \
+ __t2 t2; \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, __t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, __t2, of);\
+} while (0)
+
+ TEST_OVERFLOWS_TYPE(u8, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, u16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, U8_MAX, true);
+ TEST_OVERFLOWS_TYPE(u8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, (u8)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u8, s16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, (u16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, u8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s8, (u16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s16, (u16)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, u32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, (s16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MAX, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, (u32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s8, (u32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s16, (u32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s32, (u32)S32_MAX + 1, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(u32, u64, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s64, U32_MAX, false);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u8, (s32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u16, (s32)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MIN, false);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, (u64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u16, (u64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u32, (u64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u64, U64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, (u64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s16, (u64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s32, (u64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s64, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, (u64)S64_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u8, (s64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u16, (s64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u32, (s64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MIN, false);
+#endif
+
+ /* Check for macro side-effects. */
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, true);
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, true);
+
+ kunit_info(test, "%d overflows_type() tests finished\n", count);
+#undef TEST_OVERFLOWS_TYPE
+#undef __TEST_OVERFLOWS_TYPE
+}
+
+static void same_type_test(struct kunit *test)
+{
+ int count = 0;
+ int var;
+
+#define TEST_SAME_TYPE(t1, t2, same) do { \
+ typeof(t1) __t1h = type_max(t1); \
+ typeof(t1) __t1l = type_min(t1); \
+ typeof(t2) __t2h = type_max(t2); \
+ typeof(t2) __t2l = type_min(t2); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1h, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1l, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2h, t2)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1h, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2h, t1)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2l, t1)); \
+} while (0)
+
+#if BITS_PER_LONG == 64
+# define TEST_SAME_TYPE64(base, t, m) TEST_SAME_TYPE(base, t, m)
+#else
+# define TEST_SAME_TYPE64(base, t, m) do { } while (0)
+#endif
+
+#define TEST_TYPE_SETS(base, mu8, mu16, mu32, ms8, ms16, ms32, mu64, ms64) \
+do { \
+ TEST_SAME_TYPE(base, u8, mu8); \
+ TEST_SAME_TYPE(base, u16, mu16); \
+ TEST_SAME_TYPE(base, u32, mu32); \
+ TEST_SAME_TYPE(base, s8, ms8); \
+ TEST_SAME_TYPE(base, s16, ms16); \
+ TEST_SAME_TYPE(base, s32, ms32); \
+ TEST_SAME_TYPE64(base, u64, mu64); \
+ TEST_SAME_TYPE64(base, s64, ms64); \
+} while (0)