mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00

Uros Bizjak uses x86 named address space qualifiers to provide compile-time checking of percpu area accesses. This has caused a small amount of fallout - two or three issues were reported. In all cases the calling code was founf to be incorrect. - The 4 patch series "Some cleanup for memcg" from Chen Ridong implements some relatively monir cleanups for the memcontrol code. - The 17 patch series "mm: fixes for device-exclusive entries (hmm)" from David Hildenbrand fixes a boatload of issues which David found then using device-exclusive PTE entries when THP is enabled. More work is needed, but this makes thins better - our own HMM selftests now succeed. - The 2 patch series "mm: zswap: remove z3fold and zbud" from Yosry Ahmed remove the z3fold and zbud implementations. They have been deprecated for half a year and nobody has complained. - The 5 patch series "mm: further simplify VMA merge operation" from Lorenzo Stoakes implements numerous simplifications in this area. No runtime effects are anticipated. - The 4 patch series "mm/madvise: remove redundant mmap_lock operations from process_madvise()" from SeongJae Park rationalizes the locking in the madvise() implementation. Performance gains of 20-25% were observed in one MADV_DONTNEED microbenchmark. - The 12 patch series "Tiny cleanup and improvements about SWAP code" from Baoquan He contains a number of touchups to issues which Baoquan noticed when working on the swap code. - The 2 patch series "mm: kmemleak: Usability improvements" from Catalin Marinas implements a couple of improvements to the kmemleak user-visible output. - The 2 patch series "mm/damon/paddr: fix large folios access and schemes handling" from Usama Arif provides a couple of fixes for DAMON's handling of large folios. - The 3 patch series "mm/damon/core: fix wrong and/or useless damos_walk() behaviors" from SeongJae Park fixes a few issues with the accuracy of kdamond's walking of DAMON regions. - The 3 patch series "expose mapping wrprotect, fix fb_defio use" from Lorenzo Stoakes changes the interaction between framebuffer deferred-io and core MM. No functional changes are anticipated - this is preparatory work for the future removal of page structure fields. - The 4 patch series "mm/damon: add support for hugepage_size DAMOS filter" from Usama Arif adds a DAMOS filter which permits the filtering by huge page sizes. - The 4 patch series "mm: permit guard regions for file-backed/shmem mappings" from Lorenzo Stoakes extends the guard region feature from its present "anon mappings only" state. The feature now covers shmem and file-backed mappings. - The 4 patch series "mm: batched unmap lazyfree large folios during reclamation" from Barry Song cleans up and speeds up the unmapping for pte-mapped large folios. - The 18 patch series "reimplement per-vma lock as a refcount" from Suren Baghdasaryan puts the vm_lock back into the vma. Our reasons for pulling it out were largely bogus and that change made the code more messy. This patchset provides small (0-10%) improvements on one microbenchmark. - The 5 patch series "Docs/mm/damon: misc DAMOS filters documentation fixes and improves" from SeongJae Park does some maintenance work on the DAMON docs. - The 27 patch series "hugetlb/CMA improvements for large systems" from Frank van der Linden addresses a pile of issues which have been observed when using CMA on large machines. - The 2 patch series "mm/damon: introduce DAMOS filter type for unmapped pages" from SeongJae Park enables users of DMAON/DAMOS to filter my the page's mapped/unmapped status. - The 19 patch series "zsmalloc/zram: there be preemption" from Sergey Senozhatsky teaches zram to run its compression and decompression operations preemptibly. - The 12 patch series "selftests/mm: Some cleanups from trying to run them" from Brendan Jackman fixes a pile of unrelated issues which Brendan encountered while runnimg our selftests. - The 2 patch series "fs/proc/task_mmu: add guard region bit to pagemap" from Lorenzo Stoakes permits userspace to use /proc/pid/pagemap to determine whether a particular page is a guard page. - The 7 patch series "mm, swap: remove swap slot cache" from Kairui Song removes the swap slot cache from the allocation path - it simply wasn't being effective. - The 5 patch series "mm: cleanups for device-exclusive entries (hmm)" from David Hildenbrand implements a number of unrelated cleanups in this code. - The 5 patch series "mm: Rework generic PTDUMP configs" from Anshuman Khandual implements a number of preparatoty cleanups to the GENERIC_PTDUMP Kconfig logic. - The 8 patch series "mm/damon: auto-tune aggregation interval" from SeongJae Park implements a feedback-driven automatic tuning feature for DAMON's aggregation interval tuning. - The 5 patch series "Fix lazy mmu mode" from Ryan Roberts fixes some issues in powerpc, sparc and x86 lazy MMU implementations. Ryan did this in preparation for implementing lazy mmu mode for arm64 to optimize vmalloc. - The 2 patch series "mm/page_alloc: Some clarifications for migratetype fallback" from Brendan Jackman reworks some commentary to make the code easier to follow. - The 3 patch series "page_counter cleanup and size reduction" from Shakeel Butt cleans up the page_counter code and fixes a size increase which we accidentally added late last year. - The 3 patch series "Add a command line option that enables control of how many threads should be used to allocate huge pages" from Thomas Prescher does that. It allows the careful operator to significantly reduce boot time by tuning the parallalization of huge page initialization. - The 3 patch series "Fix calculations in trace_balance_dirty_pages() for cgwb" from Tang Yizhou fixes the tracing output from the dirty page balancing code. - The 9 patch series "mm/damon: make allow filters after reject filters useful and intuitive" from SeongJae Park improves the handling of allow and reject filters. Behaviour is made more consistent and the documention is updated accordingly. - The 5 patch series "Switch zswap to object read/write APIs" from Yosry Ahmed updates zswap to the new object read/write APIs and thus permits the removal of some legacy code from zpool and zsmalloc. - The 6 patch series "Some trivial cleanups for shmem" from Baolin Wang does as it claims. - The 20 patch series "fs/dax: Fix ZONE_DEVICE page reference counts" from Alistair Popple regularizes the weird ZONE_DEVICE page refcount handling in DAX, permittig the removal of a number of special-case checks. - The 4 patch series "refactor mremap and fix bug" from Lorenzo Stoakes is a preparatoty refactoring and cleanup of the mremap() code. - The 20 patch series "mm: MM owner tracking for large folios (!hugetlb) + CONFIG_NO_PAGE_MAPCOUNT" from David Hildenbrand reworks the manner in which we determine whether a large folio is known to be mapped exclusively into a single MM. - The 8 patch series "mm/damon: add sysfs dirs for managing DAMOS filters based on handling layers" from SeongJae Park adds a couple of new sysfs directories to ease the management of DAMON/DAMOS filters. - The 13 patch series "arch, mm: reduce code duplication in mem_init()" from Mike Rapoport consolidates many per-arch implementations of mem_init() into code generic code, where that is practical. - The 13 patch series "mm/damon/sysfs: commit parameters online via damon_call()" from SeongJae Park continues the cleaning up of sysfs access to DAMON internal data. - The 3 patch series "mm: page_ext: Introduce new iteration API" from Luiz Capitulino reworks the page_ext initialization to fix a boot-time crash which was observed with an unusual combination of compile and cmdline options. - The 8 patch series "Buddy allocator like (or non-uniform) folio split" from Zi Yan reworks the code to split a folio into smaller folios. The main benefit is lessened memory consumption: fewer post-split folios are generated. - The 2 patch series "Minimize xa_node allocation during xarry split" from Zi Yan reduces the number of xarray xa_nodes which are generated during an xarray split. - The 2 patch series "drivers/base/memory: Two cleanups" from Gavin Shan performs some maintenance work on the drivers/base/memory code. - The 3 patch series "Add tracepoints for lowmem reserves, watermarks and totalreserve_pages" from Martin Liu adds some more tracepoints to the page allocator code. - The 4 patch series "mm/madvise: cleanup requests validations and classifications" from SeongJae Park cleans up some warts which SeongJae observed during his earlier madvise work. - The 3 patch series "mm/hwpoison: Fix regressions in memory failure handling" from Shuai Xue addresses two quite serious regressions which Shuai has observed in the memory-failure implementation. - The 5 patch series "mm: reliable huge page allocator" from Johannes Weiner makes huge page allocations cheaper and more reliable by reducing fragmentation. - The 5 patch series "Minor memcg cleanups & prep for memdescs" from Matthew Wilcox is preparatory work for the future implementation of memdescs. - The 4 patch series "track memory used by balloon drivers" from Nico Pache introduces a way to track memory used by our various balloon drivers. - The 2 patch series "mm/damon: introduce DAMOS filter type for active pages" from Nhat Pham permits users to filter for active/inactive pages, separately for file and anon pages. - The 2 patch series "Adding Proactive Memory Reclaim Statistics" from Hao Jia separates the proactive reclaim statistics from the direct reclaim statistics. - The 2 patch series "mm/vmscan: don't try to reclaim hwpoison folio" from Jinjiang Tu fixes our handling of hwpoisoned pages within the reclaim code. -----BEGIN PGP SIGNATURE----- iHQEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZ+nZaAAKCRDdBJ7gKXxA jsOWAPiP4r7CJHMZRK4eyJOkvS1a1r+TsIarrFZtjwvf/GIfAQCEG+JDxVfUaUSF Ee93qSSLR1BkNdDw+931Pu0mXfbnBw== =Pn2K -----END PGP SIGNATURE----- Merge tag 'mm-stable-2025-03-30-16-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull MM updates from Andrew Morton: - The series "Enable strict percpu address space checks" from Uros Bizjak uses x86 named address space qualifiers to provide compile-time checking of percpu area accesses. This has caused a small amount of fallout - two or three issues were reported. In all cases the calling code was found to be incorrect. - The series "Some cleanup for memcg" from Chen Ridong implements some relatively monir cleanups for the memcontrol code. - The series "mm: fixes for device-exclusive entries (hmm)" from David Hildenbrand fixes a boatload of issues which David found then using device-exclusive PTE entries when THP is enabled. More work is needed, but this makes thins better - our own HMM selftests now succeed. - The series "mm: zswap: remove z3fold and zbud" from Yosry Ahmed remove the z3fold and zbud implementations. They have been deprecated for half a year and nobody has complained. - The series "mm: further simplify VMA merge operation" from Lorenzo Stoakes implements numerous simplifications in this area. No runtime effects are anticipated. - The series "mm/madvise: remove redundant mmap_lock operations from process_madvise()" from SeongJae Park rationalizes the locking in the madvise() implementation. Performance gains of 20-25% were observed in one MADV_DONTNEED microbenchmark. - The series "Tiny cleanup and improvements about SWAP code" from Baoquan He contains a number of touchups to issues which Baoquan noticed when working on the swap code. - The series "mm: kmemleak: Usability improvements" from Catalin Marinas implements a couple of improvements to the kmemleak user-visible output. - The series "mm/damon/paddr: fix large folios access and schemes handling" from Usama Arif provides a couple of fixes for DAMON's handling of large folios. - The series "mm/damon/core: fix wrong and/or useless damos_walk() behaviors" from SeongJae Park fixes a few issues with the accuracy of kdamond's walking of DAMON regions. - The series "expose mapping wrprotect, fix fb_defio use" from Lorenzo Stoakes changes the interaction between framebuffer deferred-io and core MM. No functional changes are anticipated - this is preparatory work for the future removal of page structure fields. - The series "mm/damon: add support for hugepage_size DAMOS filter" from Usama Arif adds a DAMOS filter which permits the filtering by huge page sizes. - The series "mm: permit guard regions for file-backed/shmem mappings" from Lorenzo Stoakes extends the guard region feature from its present "anon mappings only" state. The feature now covers shmem and file-backed mappings. - The series "mm: batched unmap lazyfree large folios during reclamation" from Barry Song cleans up and speeds up the unmapping for pte-mapped large folios. - The series "reimplement per-vma lock as a refcount" from Suren Baghdasaryan puts the vm_lock back into the vma. Our reasons for pulling it out were largely bogus and that change made the code more messy. This patchset provides small (0-10%) improvements on one microbenchmark. - The series "Docs/mm/damon: misc DAMOS filters documentation fixes and improves" from SeongJae Park does some maintenance work on the DAMON docs. - The series "hugetlb/CMA improvements for large systems" from Frank van der Linden addresses a pile of issues which have been observed when using CMA on large machines. - The series "mm/damon: introduce DAMOS filter type for unmapped pages" from SeongJae Park enables users of DMAON/DAMOS to filter my the page's mapped/unmapped status. - The series "zsmalloc/zram: there be preemption" from Sergey Senozhatsky teaches zram to run its compression and decompression operations preemptibly. - The series "selftests/mm: Some cleanups from trying to run them" from Brendan Jackman fixes a pile of unrelated issues which Brendan encountered while runnimg our selftests. - The series "fs/proc/task_mmu: add guard region bit to pagemap" from Lorenzo Stoakes permits userspace to use /proc/pid/pagemap to determine whether a particular page is a guard page. - The series "mm, swap: remove swap slot cache" from Kairui Song removes the swap slot cache from the allocation path - it simply wasn't being effective. - The series "mm: cleanups for device-exclusive entries (hmm)" from David Hildenbrand implements a number of unrelated cleanups in this code. - The series "mm: Rework generic PTDUMP configs" from Anshuman Khandual implements a number of preparatoty cleanups to the GENERIC_PTDUMP Kconfig logic. - The series "mm/damon: auto-tune aggregation interval" from SeongJae Park implements a feedback-driven automatic tuning feature for DAMON's aggregation interval tuning. - The series "Fix lazy mmu mode" from Ryan Roberts fixes some issues in powerpc, sparc and x86 lazy MMU implementations. Ryan did this in preparation for implementing lazy mmu mode for arm64 to optimize vmalloc. - The series "mm/page_alloc: Some clarifications for migratetype fallback" from Brendan Jackman reworks some commentary to make the code easier to follow. - The series "page_counter cleanup and size reduction" from Shakeel Butt cleans up the page_counter code and fixes a size increase which we accidentally added late last year. - The series "Add a command line option that enables control of how many threads should be used to allocate huge pages" from Thomas Prescher does that. It allows the careful operator to significantly reduce boot time by tuning the parallalization of huge page initialization. - The series "Fix calculations in trace_balance_dirty_pages() for cgwb" from Tang Yizhou fixes the tracing output from the dirty page balancing code. - The series "mm/damon: make allow filters after reject filters useful and intuitive" from SeongJae Park improves the handling of allow and reject filters. Behaviour is made more consistent and the documention is updated accordingly. - The series "Switch zswap to object read/write APIs" from Yosry Ahmed updates zswap to the new object read/write APIs and thus permits the removal of some legacy code from zpool and zsmalloc. - The series "Some trivial cleanups for shmem" from Baolin Wang does as it claims. - The series "fs/dax: Fix ZONE_DEVICE page reference counts" from Alistair Popple regularizes the weird ZONE_DEVICE page refcount handling in DAX, permittig the removal of a number of special-case checks. - The series "refactor mremap and fix bug" from Lorenzo Stoakes is a preparatoty refactoring and cleanup of the mremap() code. - The series "mm: MM owner tracking for large folios (!hugetlb) + CONFIG_NO_PAGE_MAPCOUNT" from David Hildenbrand reworks the manner in which we determine whether a large folio is known to be mapped exclusively into a single MM. - The series "mm/damon: add sysfs dirs for managing DAMOS filters based on handling layers" from SeongJae Park adds a couple of new sysfs directories to ease the management of DAMON/DAMOS filters. - The series "arch, mm: reduce code duplication in mem_init()" from Mike Rapoport consolidates many per-arch implementations of mem_init() into code generic code, where that is practical. - The series "mm/damon/sysfs: commit parameters online via damon_call()" from SeongJae Park continues the cleaning up of sysfs access to DAMON internal data. - The series "mm: page_ext: Introduce new iteration API" from Luiz Capitulino reworks the page_ext initialization to fix a boot-time crash which was observed with an unusual combination of compile and cmdline options. - The series "Buddy allocator like (or non-uniform) folio split" from Zi Yan reworks the code to split a folio into smaller folios. The main benefit is lessened memory consumption: fewer post-split folios are generated. - The series "Minimize xa_node allocation during xarry split" from Zi Yan reduces the number of xarray xa_nodes which are generated during an xarray split. - The series "drivers/base/memory: Two cleanups" from Gavin Shan performs some maintenance work on the drivers/base/memory code. - The series "Add tracepoints for lowmem reserves, watermarks and totalreserve_pages" from Martin Liu adds some more tracepoints to the page allocator code. - The series "mm/madvise: cleanup requests validations and classifications" from SeongJae Park cleans up some warts which SeongJae observed during his earlier madvise work. - The series "mm/hwpoison: Fix regressions in memory failure handling" from Shuai Xue addresses two quite serious regressions which Shuai has observed in the memory-failure implementation. - The series "mm: reliable huge page allocator" from Johannes Weiner makes huge page allocations cheaper and more reliable by reducing fragmentation. - The series "Minor memcg cleanups & prep for memdescs" from Matthew Wilcox is preparatory work for the future implementation of memdescs. - The series "track memory used by balloon drivers" from Nico Pache introduces a way to track memory used by our various balloon drivers. - The series "mm/damon: introduce DAMOS filter type for active pages" from Nhat Pham permits users to filter for active/inactive pages, separately for file and anon pages. - The series "Adding Proactive Memory Reclaim Statistics" from Hao Jia separates the proactive reclaim statistics from the direct reclaim statistics. - The series "mm/vmscan: don't try to reclaim hwpoison folio" from Jinjiang Tu fixes our handling of hwpoisoned pages within the reclaim code. * tag 'mm-stable-2025-03-30-16-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (431 commits) mm/page_alloc: remove unnecessary __maybe_unused in order_to_pindex() x86/mm: restore early initialization of high_memory for 32-bits mm/vmscan: don't try to reclaim hwpoison folio mm/hwpoison: introduce folio_contain_hwpoisoned_page() helper cgroup: docs: add pswpin and pswpout items in cgroup v2 doc mm: vmscan: split proactive reclaim statistics from direct reclaim statistics selftests/mm: speed up split_huge_page_test selftests/mm: uffd-unit-tests support for hugepages > 2M docs/mm/damon/design: document active DAMOS filter type mm/damon: implement a new DAMOS filter type for active pages fs/dax: don't disassociate zero page entries MM documentation: add "Unaccepted" meminfo entry selftests/mm: add commentary about 9pfs bugs fork: use __vmalloc_node() for stack allocation docs/mm: Physical Memory: Populate the "Zones" section xen: balloon: update the NR_BALLOON_PAGES state hv_balloon: update the NR_BALLOON_PAGES state balloon_compaction: update the NR_BALLOON_PAGES state meminfo: add a per node counter for balloon drivers mm: remove references to folio in __memcg_kmem_uncharge_page() ...
743 lines
18 KiB
C++
743 lines
18 KiB
C++
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _BCACHEFS_UTIL_H
|
|
#define _BCACHEFS_UTIL_H
|
|
|
|
#include <linux/bio.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/closure.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/freezer.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/min_heap.h>
|
|
#include <linux/sched/clock.h>
|
|
#include <linux/llist.h>
|
|
#include <linux/log2.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/ratelimit.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
#include "mean_and_variance.h"
|
|
|
|
#include "darray.h"
|
|
#include "time_stats.h"
|
|
|
|
struct closure;
|
|
|
|
#ifdef CONFIG_BCACHEFS_DEBUG
|
|
#define EBUG_ON(cond) BUG_ON(cond)
|
|
#else
|
|
#define EBUG_ON(cond)
|
|
#endif
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
#define CPU_BIG_ENDIAN 0
|
|
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
|
#define CPU_BIG_ENDIAN 1
|
|
#endif
|
|
|
|
/* type hackery */
|
|
|
|
#define type_is_exact(_val, _type) \
|
|
__builtin_types_compatible_p(typeof(_val), _type)
|
|
|
|
#define type_is(_val, _type) \
|
|
(__builtin_types_compatible_p(typeof(_val), _type) || \
|
|
__builtin_types_compatible_p(typeof(_val), const _type))
|
|
|
|
/* Userspace doesn't align allocations as nicely as the kernel allocators: */
|
|
static inline size_t buf_pages(void *p, size_t len)
|
|
{
|
|
return DIV_ROUND_UP(len +
|
|
((unsigned long) p & (PAGE_SIZE - 1)),
|
|
PAGE_SIZE);
|
|
}
|
|
|
|
static inline void *bch2_kvmalloc(size_t n, gfp_t flags)
|
|
{
|
|
void *p = unlikely(n >= INT_MAX)
|
|
? vmalloc(n)
|
|
: kvmalloc(n, flags & ~__GFP_ZERO);
|
|
if (p && (flags & __GFP_ZERO))
|
|
memset(p, 0, n);
|
|
return p;
|
|
}
|
|
|
|
#define init_heap(heap, _size, gfp) \
|
|
({ \
|
|
(heap)->nr = 0; \
|
|
(heap)->size = (_size); \
|
|
(heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\
|
|
(gfp)); \
|
|
})
|
|
|
|
#define free_heap(heap) \
|
|
do { \
|
|
kvfree((heap)->data); \
|
|
(heap)->data = NULL; \
|
|
} while (0)
|
|
|
|
#define ANYSINT_MAX(t) \
|
|
((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
|
|
|
|
#include "printbuf.h"
|
|
|
|
#define prt_vprintf(_out, ...) bch2_prt_vprintf(_out, __VA_ARGS__)
|
|
#define prt_printf(_out, ...) bch2_prt_printf(_out, __VA_ARGS__)
|
|
#define printbuf_str(_buf) bch2_printbuf_str(_buf)
|
|
#define printbuf_exit(_buf) bch2_printbuf_exit(_buf)
|
|
|
|
#define printbuf_tabstops_reset(_buf) bch2_printbuf_tabstops_reset(_buf)
|
|
#define printbuf_tabstop_pop(_buf) bch2_printbuf_tabstop_pop(_buf)
|
|
#define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n)
|
|
|
|
#define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n)
|
|
#define printbuf_indent_add_nextline(_out, _n) bch2_printbuf_indent_add_nextline(_out, _n)
|
|
#define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n)
|
|
|
|
#define prt_newline(_out) bch2_prt_newline(_out)
|
|
#define prt_tab(_out) bch2_prt_tab(_out)
|
|
#define prt_tab_rjust(_out) bch2_prt_tab_rjust(_out)
|
|
|
|
#define prt_bytes_indented(...) bch2_prt_bytes_indented(__VA_ARGS__)
|
|
#define prt_u64(_out, _v) prt_printf(_out, "%llu", (u64) (_v))
|
|
#define prt_human_readable_u64(...) bch2_prt_human_readable_u64(__VA_ARGS__)
|
|
#define prt_human_readable_s64(...) bch2_prt_human_readable_s64(__VA_ARGS__)
|
|
#define prt_units_u64(...) bch2_prt_units_u64(__VA_ARGS__)
|
|
#define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__)
|
|
#define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__)
|
|
#define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__)
|
|
#define prt_bitflags_vector(...) bch2_prt_bitflags_vector(__VA_ARGS__)
|
|
|
|
void bch2_pr_time_units(struct printbuf *, u64);
|
|
void bch2_prt_datetime(struct printbuf *, time64_t);
|
|
|
|
#ifdef __KERNEL__
|
|
static inline void uuid_unparse_lower(u8 *uuid, char *out)
|
|
{
|
|
sprintf(out, "%pUb", uuid);
|
|
}
|
|
#else
|
|
#include <uuid/uuid.h>
|
|
#endif
|
|
|
|
static inline void pr_uuid(struct printbuf *out, u8 *uuid)
|
|
{
|
|
char uuid_str[40];
|
|
|
|
uuid_unparse_lower(uuid, uuid_str);
|
|
prt_printf(out, "%s", uuid_str);
|
|
}
|
|
|
|
int bch2_strtoint_h(const char *, int *);
|
|
int bch2_strtouint_h(const char *, unsigned int *);
|
|
int bch2_strtoll_h(const char *, long long *);
|
|
int bch2_strtoull_h(const char *, unsigned long long *);
|
|
int bch2_strtou64_h(const char *, u64 *);
|
|
|
|
static inline int bch2_strtol_h(const char *cp, long *res)
|
|
{
|
|
#if BITS_PER_LONG == 32
|
|
return bch2_strtoint_h(cp, (int *) res);
|
|
#else
|
|
return bch2_strtoll_h(cp, (long long *) res);
|
|
#endif
|
|
}
|
|
|
|
static inline int bch2_strtoul_h(const char *cp, long *res)
|
|
{
|
|
#if BITS_PER_LONG == 32
|
|
return bch2_strtouint_h(cp, (unsigned int *) res);
|
|
#else
|
|
return bch2_strtoull_h(cp, (unsigned long long *) res);
|
|
#endif
|
|
}
|
|
|
|
#define strtoi_h(cp, res) \
|
|
( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\
|
|
: type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\
|
|
: type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\
|
|
: type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\
|
|
: type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\
|
|
: type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
|
|
: -EINVAL)
|
|
|
|
#define strtoul_safe(cp, var) \
|
|
({ \
|
|
unsigned long _v; \
|
|
int _r = kstrtoul(cp, 10, &_v); \
|
|
if (!_r) \
|
|
var = _v; \
|
|
_r; \
|
|
})
|
|
|
|
#define strtoul_safe_clamp(cp, var, min, max) \
|
|
({ \
|
|
unsigned long _v; \
|
|
int _r = kstrtoul(cp, 10, &_v); \
|
|
if (!_r) \
|
|
var = clamp_t(typeof(var), _v, min, max); \
|
|
_r; \
|
|
})
|
|
|
|
#define strtoul_safe_restrict(cp, var, min, max) \
|
|
({ \
|
|
unsigned long _v; \
|
|
int _r = kstrtoul(cp, 10, &_v); \
|
|
if (!_r && _v >= min && _v <= max) \
|
|
var = _v; \
|
|
else \
|
|
_r = -EINVAL; \
|
|
_r; \
|
|
})
|
|
|
|
#define snprint(out, var) \
|
|
prt_printf(out, \
|
|
type_is(var, int) ? "%i\n" \
|
|
: type_is(var, unsigned) ? "%u\n" \
|
|
: type_is(var, long) ? "%li\n" \
|
|
: type_is(var, unsigned long) ? "%lu\n" \
|
|
: type_is(var, s64) ? "%lli\n" \
|
|
: type_is(var, u64) ? "%llu\n" \
|
|
: type_is(var, char *) ? "%s\n" \
|
|
: "%i\n", var)
|
|
|
|
bool bch2_is_zero(const void *, size_t);
|
|
|
|
u64 bch2_read_flag_list(const char *, const char * const[]);
|
|
|
|
void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
|
|
void bch2_prt_u64_base2(struct printbuf *, u64);
|
|
|
|
void bch2_print_string_as_lines(const char *prefix, const char *lines);
|
|
void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines);
|
|
|
|
typedef DARRAY(unsigned long) bch_stacktrace;
|
|
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
|
|
void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
|
|
int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t);
|
|
|
|
static inline void prt_bdevname(struct printbuf *out, struct block_device *bdev)
|
|
{
|
|
#ifdef __KERNEL__
|
|
prt_printf(out, "%pg", bdev);
|
|
#else
|
|
prt_str(out, bdev->name);
|
|
#endif
|
|
}
|
|
|
|
void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *);
|
|
|
|
#define ewma_add(ewma, val, weight) \
|
|
({ \
|
|
typeof(ewma) _ewma = (ewma); \
|
|
typeof(weight) _weight = (weight); \
|
|
\
|
|
(((_ewma << _weight) - _ewma) + (val)) >> _weight; \
|
|
})
|
|
|
|
struct bch_ratelimit {
|
|
/* Next time we want to do some work, in nanoseconds */
|
|
u64 next;
|
|
|
|
/*
|
|
* Rate at which we want to do work, in units per nanosecond
|
|
* The units here correspond to the units passed to
|
|
* bch2_ratelimit_increment()
|
|
*/
|
|
unsigned rate;
|
|
};
|
|
|
|
static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
|
|
{
|
|
d->next = local_clock();
|
|
}
|
|
|
|
u64 bch2_ratelimit_delay(struct bch_ratelimit *);
|
|
void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
|
|
|
|
struct bch_pd_controller {
|
|
struct bch_ratelimit rate;
|
|
unsigned long last_update;
|
|
|
|
s64 last_actual;
|
|
s64 smoothed_derivative;
|
|
|
|
unsigned p_term_inverse;
|
|
unsigned d_smooth;
|
|
unsigned d_term;
|
|
|
|
/* for exporting to sysfs (no effect on behavior) */
|
|
s64 last_derivative;
|
|
s64 last_proportional;
|
|
s64 last_change;
|
|
s64 last_target;
|
|
|
|
/*
|
|
* If true, the rate will not increase if bch2_ratelimit_delay()
|
|
* is not being called often enough.
|
|
*/
|
|
bool backpressure;
|
|
};
|
|
|
|
void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
|
|
void bch2_pd_controller_init(struct bch_pd_controller *);
|
|
void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *);
|
|
|
|
#define sysfs_pd_controller_attribute(name) \
|
|
rw_attribute(name##_rate); \
|
|
rw_attribute(name##_rate_bytes); \
|
|
rw_attribute(name##_rate_d_term); \
|
|
rw_attribute(name##_rate_p_term_inverse); \
|
|
read_attribute(name##_rate_debug)
|
|
|
|
#define sysfs_pd_controller_files(name) \
|
|
&sysfs_##name##_rate, \
|
|
&sysfs_##name##_rate_bytes, \
|
|
&sysfs_##name##_rate_d_term, \
|
|
&sysfs_##name##_rate_p_term_inverse, \
|
|
&sysfs_##name##_rate_debug
|
|
|
|
#define sysfs_pd_controller_show(name, var) \
|
|
do { \
|
|
sysfs_hprint(name##_rate, (var)->rate.rate); \
|
|
sysfs_print(name##_rate_bytes, (var)->rate.rate); \
|
|
sysfs_print(name##_rate_d_term, (var)->d_term); \
|
|
sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
|
|
\
|
|
if (attr == &sysfs_##name##_rate_debug) \
|
|
bch2_pd_controller_debug_to_text(out, var); \
|
|
} while (0)
|
|
|
|
#define sysfs_pd_controller_store(name, var) \
|
|
do { \
|
|
sysfs_strtoul_clamp(name##_rate, \
|
|
(var)->rate.rate, 1, UINT_MAX); \
|
|
sysfs_strtoul_clamp(name##_rate_bytes, \
|
|
(var)->rate.rate, 1, UINT_MAX); \
|
|
sysfs_strtoul(name##_rate_d_term, (var)->d_term); \
|
|
sysfs_strtoul_clamp(name##_rate_p_term_inverse, \
|
|
(var)->p_term_inverse, 1, INT_MAX); \
|
|
} while (0)
|
|
|
|
#define container_of_or_null(ptr, type, member) \
|
|
({ \
|
|
typeof(ptr) _ptr = ptr; \
|
|
_ptr ? container_of(_ptr, type, member) : NULL; \
|
|
})
|
|
|
|
static inline struct list_head *list_pop(struct list_head *head)
|
|
{
|
|
if (list_empty(head))
|
|
return NULL;
|
|
|
|
struct list_head *ret = head->next;
|
|
list_del_init(ret);
|
|
return ret;
|
|
}
|
|
|
|
#define list_pop_entry(head, type, member) \
|
|
container_of_or_null(list_pop(head), type, member)
|
|
|
|
/* Does linear interpolation between powers of two */
|
|
static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
|
|
{
|
|
unsigned fract = x & ~(~0 << fract_bits);
|
|
|
|
x >>= fract_bits;
|
|
x = 1 << x;
|
|
x += (x * fract) >> fract_bits;
|
|
|
|
return x;
|
|
}
|
|
|
|
void bch2_bio_map(struct bio *bio, void *base, size_t);
|
|
int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
|
|
|
|
#define closure_bio_submit(bio, cl) \
|
|
do { \
|
|
closure_get(cl); \
|
|
submit_bio(bio); \
|
|
} while (0)
|
|
|
|
#define kthread_wait(cond) \
|
|
({ \
|
|
int _ret = 0; \
|
|
\
|
|
while (1) { \
|
|
set_current_state(TASK_INTERRUPTIBLE); \
|
|
if (kthread_should_stop()) { \
|
|
_ret = -1; \
|
|
break; \
|
|
} \
|
|
\
|
|
if (cond) \
|
|
break; \
|
|
\
|
|
schedule(); \
|
|
} \
|
|
set_current_state(TASK_RUNNING); \
|
|
_ret; \
|
|
})
|
|
|
|
#define kthread_wait_freezable(cond) \
|
|
({ \
|
|
int _ret = 0; \
|
|
while (1) { \
|
|
set_current_state(TASK_INTERRUPTIBLE); \
|
|
if (kthread_should_stop()) { \
|
|
_ret = -1; \
|
|
break; \
|
|
} \
|
|
\
|
|
if (cond) \
|
|
break; \
|
|
\
|
|
schedule(); \
|
|
try_to_freeze(); \
|
|
} \
|
|
set_current_state(TASK_RUNNING); \
|
|
_ret; \
|
|
})
|
|
|
|
u64 bch2_get_random_u64_below(u64);
|
|
|
|
void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
|
|
void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
|
|
|
|
#ifdef CONFIG_BCACHEFS_DEBUG
|
|
void bch2_corrupt_bio(struct bio *);
|
|
|
|
static inline void bch2_maybe_corrupt_bio(struct bio *bio, unsigned ratio)
|
|
{
|
|
if (ratio && !get_random_u32_below(ratio))
|
|
bch2_corrupt_bio(bio);
|
|
}
|
|
#else
|
|
#define bch2_maybe_corrupt_bio(...) do {} while (0)
|
|
#endif
|
|
|
|
static inline void memcpy_u64s_small(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
u64 *d = dst;
|
|
const u64 *s = src;
|
|
|
|
while (u64s--)
|
|
*d++ = *s++;
|
|
}
|
|
|
|
static inline void __memcpy_u64s(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
#if defined(CONFIG_X86_64) && !defined(CONFIG_KMSAN)
|
|
long d0, d1, d2;
|
|
|
|
asm volatile("rep ; movsq"
|
|
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
|
: "0" (u64s), "1" (dst), "2" (src)
|
|
: "memory");
|
|
#else
|
|
u64 *d = dst;
|
|
const u64 *s = src;
|
|
|
|
while (u64s--)
|
|
*d++ = *s++;
|
|
#endif
|
|
}
|
|
|
|
static inline void memcpy_u64s(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
|
|
dst + u64s * sizeof(u64) <= src));
|
|
|
|
__memcpy_u64s(dst, src, u64s);
|
|
}
|
|
|
|
static inline void __memmove_u64s_down(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
__memcpy_u64s(dst, src, u64s);
|
|
}
|
|
|
|
static inline void memmove_u64s_down(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
EBUG_ON(dst > src);
|
|
|
|
__memmove_u64s_down(dst, src, u64s);
|
|
}
|
|
|
|
static inline void __memmove_u64s_down_small(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
memcpy_u64s_small(dst, src, u64s);
|
|
}
|
|
|
|
static inline void memmove_u64s_down_small(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
EBUG_ON(dst > src);
|
|
|
|
__memmove_u64s_down_small(dst, src, u64s);
|
|
}
|
|
|
|
static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
|
|
unsigned u64s)
|
|
{
|
|
u64 *dst = (u64 *) _dst + u64s;
|
|
u64 *src = (u64 *) _src + u64s;
|
|
|
|
while (u64s--)
|
|
*--dst = *--src;
|
|
}
|
|
|
|
static inline void memmove_u64s_up_small(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
EBUG_ON(dst < src);
|
|
|
|
__memmove_u64s_up_small(dst, src, u64s);
|
|
}
|
|
|
|
static inline void __memmove_u64s_up(void *_dst, const void *_src,
|
|
unsigned u64s)
|
|
{
|
|
u64 *dst = (u64 *) _dst + u64s - 1;
|
|
u64 *src = (u64 *) _src + u64s - 1;
|
|
|
|
#if defined(CONFIG_X86_64) && !defined(CONFIG_KMSAN)
|
|
long d0, d1, d2;
|
|
|
|
asm volatile("std ;\n"
|
|
"rep ; movsq\n"
|
|
"cld ;\n"
|
|
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
|
: "0" (u64s), "1" (dst), "2" (src)
|
|
: "memory");
|
|
#else
|
|
while (u64s--)
|
|
*dst-- = *src--;
|
|
#endif
|
|
}
|
|
|
|
static inline void memmove_u64s_up(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
EBUG_ON(dst < src);
|
|
|
|
__memmove_u64s_up(dst, src, u64s);
|
|
}
|
|
|
|
static inline void memmove_u64s(void *dst, const void *src,
|
|
unsigned u64s)
|
|
{
|
|
if (dst < src)
|
|
__memmove_u64s_down(dst, src, u64s);
|
|
else
|
|
__memmove_u64s_up(dst, src, u64s);
|
|
}
|
|
|
|
/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
|
|
static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
|
|
{
|
|
unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
|
|
|
|
memset(s + bytes, c, rem);
|
|
}
|
|
|
|
/* just the memmove, doesn't update @_nr */
|
|
#define __array_insert_item(_array, _nr, _pos) \
|
|
memmove(&(_array)[(_pos) + 1], \
|
|
&(_array)[(_pos)], \
|
|
sizeof((_array)[0]) * ((_nr) - (_pos)))
|
|
|
|
#define array_insert_item(_array, _nr, _pos, _new_item) \
|
|
do { \
|
|
__array_insert_item(_array, _nr, _pos); \
|
|
(_nr)++; \
|
|
(_array)[(_pos)] = (_new_item); \
|
|
} while (0)
|
|
|
|
#define array_remove_items(_array, _nr, _pos, _nr_to_remove) \
|
|
do { \
|
|
(_nr) -= (_nr_to_remove); \
|
|
memmove(&(_array)[(_pos)], \
|
|
&(_array)[(_pos) + (_nr_to_remove)], \
|
|
sizeof((_array)[0]) * ((_nr) - (_pos))); \
|
|
} while (0)
|
|
|
|
#define array_remove_item(_array, _nr, _pos) \
|
|
array_remove_items(_array, _nr, _pos, 1)
|
|
|
|
static inline void __move_gap(void *array, size_t element_size,
|
|
size_t nr, size_t size,
|
|
size_t old_gap, size_t new_gap)
|
|
{
|
|
size_t gap_end = old_gap + size - nr;
|
|
|
|
if (new_gap < old_gap) {
|
|
size_t move = old_gap - new_gap;
|
|
|
|
memmove(array + element_size * (gap_end - move),
|
|
array + element_size * (old_gap - move),
|
|
element_size * move);
|
|
} else if (new_gap > old_gap) {
|
|
size_t move = new_gap - old_gap;
|
|
|
|
memmove(array + element_size * old_gap,
|
|
array + element_size * gap_end,
|
|
element_size * move);
|
|
}
|
|
}
|
|
|
|
/* Move the gap in a gap buffer: */
|
|
#define move_gap(_d, _new_gap) \
|
|
do { \
|
|
BUG_ON(_new_gap > (_d)->nr); \
|
|
BUG_ON((_d)->gap > (_d)->nr); \
|
|
\
|
|
__move_gap((_d)->data, sizeof((_d)->data[0]), \
|
|
(_d)->nr, (_d)->size, (_d)->gap, _new_gap); \
|
|
(_d)->gap = _new_gap; \
|
|
} while (0)
|
|
|
|
#define bubble_sort(_base, _nr, _cmp) \
|
|
do { \
|
|
ssize_t _i, _last; \
|
|
bool _swapped = true; \
|
|
\
|
|
for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\
|
|
_swapped = false; \
|
|
for (_i = 0; _i < _last; _i++) \
|
|
if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \
|
|
swap((_base)[_i], (_base)[_i + 1]); \
|
|
_swapped = true; \
|
|
} \
|
|
} \
|
|
} while (0)
|
|
|
|
#define per_cpu_sum(_p) \
|
|
({ \
|
|
TYPEOF_UNQUAL(*_p) _ret = 0; \
|
|
\
|
|
int cpu; \
|
|
for_each_possible_cpu(cpu) \
|
|
_ret += *per_cpu_ptr(_p, cpu); \
|
|
_ret; \
|
|
})
|
|
|
|
static inline u64 percpu_u64_get(u64 __percpu *src)
|
|
{
|
|
return per_cpu_sum(src);
|
|
}
|
|
|
|
static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
*per_cpu_ptr(dst, cpu) = 0;
|
|
this_cpu_write(*dst, src);
|
|
}
|
|
|
|
static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
|
|
{
|
|
for (unsigned i = 0; i < nr; i++)
|
|
acc[i] += src[i];
|
|
}
|
|
|
|
static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
|
|
unsigned nr)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
|
|
}
|
|
|
|
static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
memset(per_cpu_ptr(p, cpu), c, bytes);
|
|
}
|
|
|
|
u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
|
|
|
|
#define cmp_int(l, r) ((l > r) - (l < r))
|
|
|
|
static inline int u8_cmp(u8 l, u8 r)
|
|
{
|
|
return cmp_int(l, r);
|
|
}
|
|
|
|
static inline int cmp_le32(__le32 l, __le32 r)
|
|
{
|
|
return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
|
|
}
|
|
|
|
#include <linux/uuid.h>
|
|
|
|
static inline bool qstr_eq(const struct qstr l, const struct qstr r)
|
|
{
|
|
return l.len == r.len && !memcmp(l.name, r.name, l.len);
|
|
}
|
|
|
|
void bch2_darray_str_exit(darray_str *);
|
|
int bch2_split_devs(const char *, darray_str *);
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
__must_check
|
|
static inline int copy_to_user_errcode(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
return copy_to_user(to, from, n) ? -EFAULT : 0;
|
|
}
|
|
|
|
__must_check
|
|
static inline int copy_from_user_errcode(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
return copy_from_user(to, from, n) ? -EFAULT : 0;
|
|
}
|
|
|
|
#endif
|
|
|
|
static inline void mod_bit(long nr, volatile unsigned long *addr, bool v)
|
|
{
|
|
if (v)
|
|
set_bit(nr, addr);
|
|
else
|
|
clear_bit(nr, addr);
|
|
}
|
|
|
|
static inline void __set_bit_le64(size_t bit, __le64 *addr)
|
|
{
|
|
addr[bit / 64] |= cpu_to_le64(BIT_ULL(bit % 64));
|
|
}
|
|
|
|
static inline void __clear_bit_le64(size_t bit, __le64 *addr)
|
|
{
|
|
addr[bit / 64] &= ~cpu_to_le64(BIT_ULL(bit % 64));
|
|
}
|
|
|
|
static inline bool test_bit_le64(size_t bit, __le64 *addr)
|
|
{
|
|
return (addr[bit / 64] & cpu_to_le64(BIT_ULL(bit % 64))) != 0;
|
|
}
|
|
|
|
static inline void memcpy_swab(void *_dst, void *_src, size_t len)
|
|
{
|
|
u8 *dst = _dst + len;
|
|
u8 *src = _src;
|
|
|
|
while (len--)
|
|
*--dst = *src++;
|
|
}
|
|
|
|
#endif /* _BCACHEFS_UTIL_H */
|