mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
bcachefs: bch_dev_usage_full
All the fastpaths that need device usage don't need the sector totals or fragmentation, just bucket counts. Split bch_dev_usage up into two different versions, the normal one with just bucket counts. This is also a stack usage improvement, since we have a bch_dev_usage on the stack in the allocation path. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
9180ad2e16
commit
955ba7b5ea
@ -321,11 +321,11 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
|
||||
{
|
||||
u64 want_free = ca->mi.nbuckets >> 7;
|
||||
u64 free = max_t(s64, 0,
|
||||
u.d[BCH_DATA_free].buckets
|
||||
+ u.d[BCH_DATA_need_discard].buckets
|
||||
u.buckets[BCH_DATA_free]
|
||||
+ u.buckets[BCH_DATA_need_discard]
|
||||
- bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
|
||||
|
||||
return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
|
||||
return clamp_t(s64, want_free - free, 0, u.buckets[BCH_DATA_cached]);
|
||||
}
|
||||
|
||||
void bch2_dev_do_invalidates(struct bch_dev *);
|
||||
|
@ -469,7 +469,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
|
||||
prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
|
||||
prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
|
||||
prt_printf(&buf, "blocking\t%u\n", cl != NULL);
|
||||
prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
|
||||
prt_printf(&buf, "free\t%llu\n", usage->buckets[BCH_DATA_free]);
|
||||
prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
|
||||
prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
|
||||
bch2_copygc_wait_amount(c),
|
||||
@ -524,10 +524,10 @@ again:
|
||||
bch2_dev_usage_read_fast(ca, usage);
|
||||
avail = dev_buckets_free(ca, *usage, watermark);
|
||||
|
||||
if (usage->d[BCH_DATA_need_discard].buckets > avail)
|
||||
if (usage->buckets[BCH_DATA_need_discard] > avail)
|
||||
bch2_dev_do_discards(ca);
|
||||
|
||||
if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
|
||||
if (usage->buckets[BCH_DATA_need_gc_gens] > avail)
|
||||
bch2_gc_gens_async(c);
|
||||
|
||||
if (should_invalidate_buckets(ca, *usage))
|
||||
@ -1669,7 +1669,7 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
{
|
||||
struct bch_fs *c = ca->fs;
|
||||
struct bch_dev_usage stats = bch2_dev_usage_read(ca);
|
||||
struct bch_dev_usage_full stats = bch2_dev_usage_full_read(ca);
|
||||
unsigned nr[BCH_DATA_NR];
|
||||
|
||||
memset(nr, 0, sizeof(nr));
|
||||
@ -1692,7 +1692,8 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
printbuf_tabstop_push(out, 16);
|
||||
|
||||
prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
|
||||
prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
|
||||
prt_printf(out, "buckets to invalidate\t%llu\r\n",
|
||||
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)));
|
||||
}
|
||||
|
||||
static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
|
||||
|
@ -562,7 +562,8 @@ struct bch_dev {
|
||||
unsigned long *bucket_backpointer_mismatches;
|
||||
unsigned long *bucket_backpointer_empty;
|
||||
|
||||
struct bch_dev_usage __percpu *usage;
|
||||
struct bch_dev_usage_full __percpu
|
||||
*usage;
|
||||
|
||||
/* Allocator: */
|
||||
u64 alloc_cursor[3];
|
||||
|
@ -29,6 +29,12 @@
|
||||
#include <linux/preempt.h>
|
||||
|
||||
void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
|
||||
{
|
||||
for (unsigned i = 0; i < BCH_DATA_NR; i++)
|
||||
usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets);
|
||||
}
|
||||
|
||||
void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
|
||||
{
|
||||
memset(usage, 0, sizeof(*usage));
|
||||
acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
|
||||
@ -75,7 +81,7 @@ bch2_fs_usage_read_short(struct bch_fs *c)
|
||||
|
||||
void bch2_dev_usage_to_text(struct printbuf *out,
|
||||
struct bch_dev *ca,
|
||||
struct bch_dev_usage *usage)
|
||||
struct bch_dev_usage_full *usage)
|
||||
{
|
||||
if (out->nr_tabstops < 5) {
|
||||
printbuf_tabstops_reset(out);
|
||||
@ -1331,7 +1337,7 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
|
||||
|
||||
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
ca->usage = alloc_percpu(struct bch_dev_usage);
|
||||
ca->usage = alloc_percpu(struct bch_dev_usage_full);
|
||||
if (!ca->usage)
|
||||
return -BCH_ERR_ENOMEM_usage_init;
|
||||
|
||||
|
@ -172,7 +172,16 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
|
||||
void bch2_dev_usage_full_read_fast(struct bch_dev *, struct bch_dev_usage_full *);
|
||||
static inline struct bch_dev_usage_full bch2_dev_usage_full_read(struct bch_dev *ca)
|
||||
{
|
||||
struct bch_dev_usage_full ret;
|
||||
|
||||
bch2_dev_usage_full_read_fast(ca, &ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage_full *);
|
||||
|
||||
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
|
||||
{
|
||||
@ -207,7 +216,7 @@ static inline u64 dev_buckets_free(struct bch_dev *ca,
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
return max_t(s64, 0,
|
||||
usage.d[BCH_DATA_free].buckets -
|
||||
usage.buckets[BCH_DATA_free]-
|
||||
ca->nr_open_buckets -
|
||||
bch2_dev_buckets_reserved(ca, watermark));
|
||||
}
|
||||
@ -217,10 +226,10 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
return max_t(s64, 0,
|
||||
usage.d[BCH_DATA_free].buckets
|
||||
+ usage.d[BCH_DATA_cached].buckets
|
||||
+ usage.d[BCH_DATA_need_gc_gens].buckets
|
||||
+ usage.d[BCH_DATA_need_discard].buckets
|
||||
usage.buckets[BCH_DATA_free]
|
||||
+ usage.buckets[BCH_DATA_cached]
|
||||
+ usage.buckets[BCH_DATA_need_gc_gens]
|
||||
+ usage.buckets[BCH_DATA_need_discard]
|
||||
- ca->nr_open_buckets
|
||||
- bch2_dev_buckets_reserved(ca, watermark));
|
||||
}
|
||||
|
@ -54,7 +54,12 @@ struct bucket_gens {
|
||||
u8 b[] __counted_by(nbuckets);
|
||||
};
|
||||
|
||||
/* Only info on bucket countns: */
|
||||
struct bch_dev_usage {
|
||||
u64 buckets[BCH_DATA_NR];
|
||||
};
|
||||
|
||||
struct bch_dev_usage_full {
|
||||
struct bch_dev_usage_type {
|
||||
u64 buckets;
|
||||
u64 sectors; /* _compressed_ sectors: */
|
||||
|
@ -350,8 +350,8 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
|
||||
if (ctx->arg.op == BCH_DATA_OP_scrub) {
|
||||
struct bch_dev *ca = bch2_dev_tryget(c, ctx->arg.scrub.dev);
|
||||
if (ca) {
|
||||
struct bch_dev_usage u;
|
||||
bch2_dev_usage_read_fast(ca, &u);
|
||||
struct bch_dev_usage_full u;
|
||||
bch2_dev_usage_full_read_fast(ca, &u);
|
||||
for (unsigned i = BCH_DATA_btree; i < ARRAY_SIZE(u.d); i++)
|
||||
if (ctx->arg.scrub.data_types & BIT(i))
|
||||
e.p.sectors_total += u.d[i].sectors;
|
||||
@ -473,7 +473,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
|
||||
struct bch_ioctl_dev_usage __user *user_arg)
|
||||
{
|
||||
struct bch_ioctl_dev_usage arg;
|
||||
struct bch_dev_usage src;
|
||||
struct bch_dev_usage_full src;
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
@ -493,7 +493,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
|
||||
if (IS_ERR(ca))
|
||||
return PTR_ERR(ca);
|
||||
|
||||
src = bch2_dev_usage_read(ca);
|
||||
src = bch2_dev_usage_full_read(ca);
|
||||
|
||||
arg.state = ca->mi.state;
|
||||
arg.bucket_size = ca->mi.bucket_size;
|
||||
@ -514,7 +514,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
|
||||
struct bch_ioctl_dev_usage_v2 __user *user_arg)
|
||||
{
|
||||
struct bch_ioctl_dev_usage_v2 arg;
|
||||
struct bch_dev_usage src;
|
||||
struct bch_dev_usage_full src;
|
||||
struct bch_dev *ca;
|
||||
int ret = 0;
|
||||
|
||||
@ -534,7 +534,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
|
||||
if (IS_ERR(ca))
|
||||
return PTR_ERR(ca);
|
||||
|
||||
src = bch2_dev_usage_read(ca);
|
||||
src = bch2_dev_usage_full_read(ca);
|
||||
|
||||
arg.state = ca->mi.state;
|
||||
arg.bucket_size = ca->mi.bucket_size;
|
||||
|
@ -280,7 +280,11 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
|
||||
s64 wait = S64_MAX, fragmented_allowed, fragmented;
|
||||
|
||||
for_each_rw_member(c, ca) {
|
||||
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
|
||||
struct bch_dev_usage_full usage_full = bch2_dev_usage_full_read(ca);
|
||||
struct bch_dev_usage usage;
|
||||
|
||||
for (unsigned i = 0; i < BCH_DATA_NR; i++)
|
||||
usage.buckets[i] = usage_full.d[i].buckets;
|
||||
|
||||
fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
|
||||
ca->mi.bucket_size) >> 1);
|
||||
@ -288,7 +292,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
|
||||
|
||||
for (unsigned i = 0; i < BCH_DATA_NR; i++)
|
||||
if (data_type_movable(i))
|
||||
fragmented += usage.d[i].fragmented;
|
||||
fragmented += usage_full.d[i].fragmented;
|
||||
|
||||
wait = min(wait, max(0LL, fragmented_allowed - fragmented));
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user