diff --git a/usr/src/lib/libumem/common/mapfile-vers b/usr/src/lib/libumem/common/mapfile-vers index 045495cd07c5..888a1570f25f 100644 --- a/usr/src/lib/libumem/common/mapfile-vers +++ b/usr/src/lib/libumem/common/mapfile-vers @@ -61,7 +61,6 @@ SYMBOL_VERSION SUNW_1.1 { umem_cache_alloc; umem_cache_create; umem_cache_destroy; - umem_cache_bufsize; umem_cache_free; umem_free; umem_nofail_callback; diff --git a/usr/src/lib/libumem/common/umem.c b/usr/src/lib/libumem/common/umem.c index 1fe50284651f..dbc738a049c1 100644 --- a/usr/src/lib/libumem/common/umem.c +++ b/usr/src/lib/libumem/common/umem.c @@ -2997,12 +2997,6 @@ umem_cache_destroy(umem_cache_t *cp) vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus)); } -size_t -umem_cache_bufsize(umem_cache_t *cp) -{ - return (cp->cache_bufsize); -} - void umem_alloc_sizes_clear(void) { diff --git a/usr/src/lib/libumem/common/umem.h b/usr/src/lib/libumem/common/umem.h index 56c1670b76f8..579429bc1250 100644 --- a/usr/src/lib/libumem/common/umem.h +++ b/usr/src/lib/libumem/common/umem.h @@ -71,7 +71,6 @@ extern umem_cache_t *umem_cache_create(char *, size_t, size_t, umem_constructor_t *, umem_destructor_t *, umem_reclaim_t *, void *, vmem_t *, int); extern void umem_cache_destroy(umem_cache_t *); -extern size_t umem_cache_bufsize(umem_cache_t *); extern void *umem_cache_alloc(umem_cache_t *, int); extern void umem_cache_free(umem_cache_t *, void *); diff --git a/usr/src/lib/libzpool/common/sys/zfs_context.h b/usr/src/lib/libzpool/common/sys/zfs_context.h index feac1cd81c32..434827d7b0c6 100644 --- a/usr/src/lib/libzpool/common/sys/zfs_context.h +++ b/usr/src/lib/libzpool/common/sys/zfs_context.h @@ -329,7 +329,6 @@ extern void kstat_runq_back_to_waitq(kstat_io_t *); #define kmem_cache_destroy(_c) umem_cache_destroy(_c) #define kmem_cache_alloc(_c, _f) umem_cache_alloc(_c, _f) #define kmem_cache_free(_c, _b) umem_cache_free(_c, _b) -#define kmem_cache_bufsize(_c) umem_cache_bufsize(_c) #define kmem_debugging() 0 #define kmem_cache_reap_now(_c) /* nothing */ #define kmem_cache_set_move(_c, _cb) /* nothing */ diff --git a/usr/src/uts/common/fs/zfs/dsl_scan.c b/usr/src/uts/common/fs/zfs/dsl_scan.c index be4106c70ee0..e47d4a0f035a 100644 --- a/usr/src/uts/common/fs/zfs/dsl_scan.c +++ b/usr/src/uts/common/fs/zfs/dsl_scan.c @@ -124,7 +124,7 @@ boolean_t zfs_scan_direct = B_FALSE; /* don't queue & sort zios, go direct */ uint64_t zfs_scan_max_ext_gap = 2 << 20; /* bytes */ /* See scan_io_queue_mem_lim for details on the memory limit tunables */ uint64_t zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ -uint64_t zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim */ +uint64_t zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ uint64_t zfs_scan_checkpoint_intval = 7200; /* seconds */ /* * fill_weight is non-tunable at runtime, so we copy it at module init from @@ -161,8 +161,6 @@ extern int zfs_txg_timeout; */ boolean_t zfs_free_bpobj_enabled = B_TRUE; -static kmem_cache_t *scan_seg_cache; - /* the order has to match pool_scan_type */ static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { NULL, @@ -170,12 +168,6 @@ static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ }; -typedef struct { - range_seg_t ss_rs; - avl_node_t ss_size_node; - uint64_t ss_fill; -} scan_seg_t; - typedef struct scan_io { uint64_t sio_blk_prop; uint64_t sio_phys_birth; @@ -206,18 +198,17 @@ struct dsl_scan_io_queue { range_seg_t q_issuing_rs; uint64_t q_num_issuing_zios; - - scan_io_t *q_adding_io; - uint64_t q_removed_tmp; }; -#define SCAN_IO_GET_OFFSET(sio) \ +#define SCAN_IO_GET_OFFSET(sio) \ BF64_GET_SB((sio)->sio_dva_word1, 0, 63, SPA_MINBLOCKSHIFT, 0) #define SCAN_IO_SET_OFFSET(sio, offset) \ BF64_SET_SB((sio)->sio_dva_word1, 0, 63, SPA_MINBLOCKSHIFT, 0, offset) -static void scan_io_queue_insert_cb(range_tree_t *rt, void *rs, void *arg); -static void scan_io_queue_remove_cb(range_tree_t *rt, void *rs, void *arg); +static void scan_io_queue_insert_cb(range_tree_t *rt, range_seg_t *rs, + void *arg); +static void scan_io_queue_remove_cb(range_tree_t *rt, range_seg_t *rs, + void *arg); static void scan_io_queue_vacate_cb(range_tree_t *rt, void *arg); static int ext_size_compar(const void *x, const void *y); static int io_addr_compar(const void *x, const void *y); @@ -287,20 +278,9 @@ bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) void dsl_scan_global_init() { - ASSERT3P(scan_seg_cache, ==, NULL); - scan_seg_cache = kmem_cache_create("scan_seg_cache", - sizeof (scan_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0); fill_weight = zfs_scan_fill_weight; } -void -dsl_scan_global_fini() -{ - ASSERT(scan_seg_cache != NULL); - kmem_cache_destroy(scan_seg_cache); - scan_seg_cache = NULL; -} - int dsl_scan_init(dsl_pool_t *dp, uint64_t txg) { @@ -2585,88 +2565,39 @@ scan_io_queue_insert(dsl_scan_t *scn, dsl_scan_io_queue_t *queue, mutex_exit(&scn->scn_status_lock); range_tree_set_gap(queue->q_exts_by_addr, zfs_scan_max_ext_gap); - queue->q_adding_io = sio; - range_tree_add(queue->q_exts_by_addr, offset, asize); - queue->q_adding_io = NULL; + range_tree_add_fill(queue->q_exts_by_addr, offset, asize, asize); } -/* - * Callback invoked when a scan_seg_t is being added to the q_exts_by_addr - * range tree. Inserting a new segment might have removed a previously - * existing segment due to the range tree automatically joining two - * adjacent segments. In that case, scan_io_queue_remove_cb will have saved - * the old segment's ss_fill value in q_removed_tmp, and the caller of - * range_tree_add() has placed the newly adding scan_io_t into q_adding_io. - * In order to properly track fill for the new segment, we need to add - * the previously saved q_removed_tmp value and the size of the adding - * scan_io_t to the ss_fill of the newly added scan_seg_t. - */ +/* q_exts_by_addr segment add callback. */ /*ARGSUSED*/ static void -scan_io_queue_insert_cb(range_tree_t *rt, void *rs, void *arg) +scan_io_queue_insert_cb(range_tree_t *rt, range_seg_t *rs, void *arg) { dsl_scan_io_queue_t *queue = arg; - scan_seg_t *ss = rs; - scan_seg_t *old_ss; avl_index_t idx; - ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); - ASSERT(queue->q_adding_io != NULL); - - old_ss = avl_find(&queue->q_exts_by_size, ss, &idx); - if (old_ss != NULL) { - ASSERT3P(old_ss, ==, ss); - avl_remove(&queue->q_exts_by_size, ss); - } - - ss->ss_fill += queue->q_adding_io->sio_asize + queue->q_removed_tmp; - ASSERT3U(ss->ss_fill, <=, ss->ss_rs.rs_end - ss->ss_rs.rs_start); - queue->q_removed_tmp = 0; - - VERIFY3P(avl_find(&queue->q_exts_by_size, ss, &idx), ==, NULL); - avl_insert(&queue->q_exts_by_size, ss, idx); + VERIFY3P(avl_find(&queue->q_exts_by_size, rs, &idx), ==, NULL); + avl_insert(&queue->q_exts_by_size, rs, idx); } -/* - * Callback invoked from the q_exts_by_addr range tree when a scan_seg_t - * is being removed. This can happen either when we are properly removing - * a scan_seg_t, because we've consumed the extents in it, or because we - * are adding a new scan_io_t and it happens to bridge the gap between two - * scan_seg_t's. In that case, we need to memorize how well the adjacent - * scan_seg_t's were filled (their ss_fill values), so that we can add - * that in the insertion callback above. This information is conveyed to - * this function by having the caller who invoked the range_tree_add() - * place the io to be added into the q_adding_io temporary pointer. - * That tells us that we are being called back due to a range_tree_add() - * instead of a simple range_tree_remove(). - */ +/* q_exts_by_addr segment remove callback. */ /*ARGSUSED*/ static void -scan_io_queue_remove_cb(range_tree_t *rt, void *rs, void *arg) +scan_io_queue_remove_cb(range_tree_t *rt, range_seg_t *rs, void *arg) { dsl_scan_io_queue_t *queue = arg; - scan_seg_t *ss = rs; - - avl_remove(&queue->q_exts_by_size, ss); - if (queue->q_adding_io != NULL) { - queue->q_removed_tmp += ss->ss_fill; - ss->ss_fill = 0; - } + avl_remove(&queue->q_exts_by_size, rs); } -/* - * Callback invoked from the q_exts_by_addr range tree when the tree is - * being vacated (during queue destruction). We don't do any queue counter - * manipulations, since the queue is being destroyed anyway. - */ +/* q_exts_by_addr vacate callback. */ /*ARGSUSED*/ static void scan_io_queue_vacate_cb(range_tree_t *rt, void *arg) { dsl_scan_io_queue_t *queue = arg; - scan_seg_t *ss; - while ((ss = avl_first(&queue->q_exts_by_size)) != NULL) - avl_remove(&queue->q_exts_by_size, ss); + void *cookie = NULL; + while (avl_destroy_nodes(&queue->q_exts_by_size, &cookie) != NULL) + ; } /* @@ -2702,22 +2633,22 @@ scan_io_queue_vacate_cb(range_tree_t *rt, void *arg) static int ext_size_compar(const void *x, const void *y) { - const scan_seg_t *ssa = x, *ssb = y; - uint64_t sa = ssa->ss_rs.rs_end - ssa->ss_rs.rs_start, - sb = ssb->ss_rs.rs_end - ssb->ss_rs.rs_start; + const range_seg_t *rsa = x, *rsb = y; + uint64_t sa = rsa->rs_end - rsa->rs_start, + sb = rsb->rs_end - rsb->rs_start; uint64_t score_a, score_b; - score_a = ssa->ss_fill + (((ssa->ss_fill * 100) / sa) * - fill_weight * ssa->ss_fill) / 100; - score_b = ssb->ss_fill + (((ssb->ss_fill * 100) / sb) * - fill_weight * ssb->ss_fill) / 100; + score_a = rsa->rs_fill + (((rsa->rs_fill * 100) / sa) * + fill_weight * rsa->rs_fill) / 100; + score_b = rsb->rs_fill + (((rsb->rs_fill * 100) / sb) * + fill_weight * rsb->rs_fill) / 100; if (score_a > score_b) return (-1); if (score_a == score_b) { - if (ssa->ss_rs.rs_start < ssb->ss_rs.rs_start) + if (rsa->rs_start < rsb->rs_start) return (-1); - if (ssa->ss_rs.rs_start == ssb->ss_rs.rs_start) + if (rsa->rs_start == rsb->rs_start) return (0); return (1); } @@ -2750,11 +2681,10 @@ scan_io_queue_create(vdev_t *vd) q->q_scn = scn; q->q_vd = vd; cv_init(&q->q_cv, NULL, CV_DEFAULT, NULL); - ASSERT(scan_seg_cache != NULL); - q->q_exts_by_addr = range_tree_create_custom(&scan_io_queue_ops, q, - &q->q_vd->vdev_scan_io_queue_lock, scan_seg_cache); + q->q_exts_by_addr = range_tree_create(&scan_io_queue_ops, q, + &q->q_vd->vdev_scan_io_queue_lock); avl_create(&q->q_exts_by_size, ext_size_compar, - sizeof (scan_seg_t), offsetof(scan_seg_t, ss_size_node)); + sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); avl_create(&q->q_zios_by_addr, io_addr_compar, sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); @@ -2787,6 +2717,7 @@ dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) ASSERT(range_tree_contains(queue->q_exts_by_addr, SCAN_IO_GET_OFFSET(sio), sio->sio_asize)); bytes_dequeued += sio->sio_asize; + avl_remove(&queue->q_zios_by_addr, sio); kmem_free(sio, sizeof (*sio)); } #ifdef DEBUG @@ -2908,7 +2839,7 @@ scan_io_queue_mem_lim(dsl_scan_t *scn) if (queue != NULL) { /* #extents in exts_by_size = # in exts_by_addr */ mused += avl_numnodes(&queue->q_exts_by_size) * - sizeof (scan_seg_t) + + sizeof (range_seg_t) + (avl_numnodes(&queue->q_zios_by_addr) + queue->q_num_issuing_zios) * sizeof (scan_io_t); } @@ -2964,7 +2895,7 @@ scan_io_queue_issue(list_t *io_list, dsl_scan_io_queue_t *queue) } /* - * Given a scan_seg_t (extent) and a list, this function passes over a + * Given a range_seg_t (extent) and a list, this function passes over a * scan queue and gathers up the appropriate ios which fit into that * scan seg (starting from lowest LBA). During this, we observe that we * don't go over the `limit' in the total amount of scan_io_t bytes that @@ -2978,21 +2909,21 @@ scan_io_queue_issue(list_t *io_list, dsl_scan_io_queue_t *queue) * q_exts_by_size tree. */ static uint64_t -scan_io_queue_gather(scan_seg_t *ss, list_t *list, +scan_io_queue_gather(const range_seg_t *rs, list_t *list, dsl_scan_io_queue_t *queue, uint64_t limit) { scan_io_t srch_sio, *sio, *next_sio; avl_index_t idx; - uint64_t num_zios = 0, bytes = 0; + int64_t num_zios = 0, bytes = 0; boolean_t size_limited = B_FALSE; - ASSERT(ss != NULL); + ASSERT(rs != NULL); ASSERT3U(limit, !=, 0); ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); list_create(list, sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_list_node)); - SCAN_IO_SET_OFFSET(&srch_sio, ss->ss_rs.rs_start); + SCAN_IO_SET_OFFSET(&srch_sio, rs->rs_start); /* * The exact start of the extent might not contain any matching zios, @@ -3002,52 +2933,36 @@ scan_io_queue_gather(scan_seg_t *ss, list_t *list, if (sio == NULL) sio = avl_nearest(&queue->q_zios_by_addr, idx, AVL_AFTER); - while (sio != NULL && SCAN_IO_GET_OFFSET(sio) < ss->ss_rs.rs_end) { + while (sio != NULL && SCAN_IO_GET_OFFSET(sio) < rs->rs_end) { if (bytes >= limit) { size_limited = B_TRUE; break; } - ASSERT3U(SCAN_IO_GET_OFFSET(sio), >=, ss->ss_rs.rs_start); + ASSERT3U(SCAN_IO_GET_OFFSET(sio), >=, rs->rs_start); ASSERT3U(SCAN_IO_GET_OFFSET(sio) + sio->sio_asize, <=, - ss->ss_rs.rs_end); + rs->rs_end); + next_sio = AVL_NEXT(&queue->q_zios_by_addr, sio); avl_remove(&queue->q_zios_by_addr, sio); list_insert_tail(list, sio); num_zios++; bytes += sio->sio_asize; - ss->ss_fill -= sio->sio_asize; sio = next_sio; } if (size_limited) { - scan_io_t *zero_io = kmem_zalloc(sizeof (*zero_io), KM_SLEEP); - /* - * Only remove what's consumed. The ss_fill field will have - * already been adjusted above, so when the range tree - * internally recycles the value in scan_io_queue_insert_cb - * and scan_io_queue_remove_cb, it will work out OK. - */ + uint64_t end; sio = list_tail(list); - /* - * We stick a zero-length io into q_adding_io, so that - * scan_io_queue_remove_cb saves the ss_fill field of the - * old scan seg (which has already been decremented by the - * amount we lifted from it in the loop above) and will be - * restored in the new scan seg representing the new extent - * tail. - */ - queue->q_adding_io = zero_io; - range_tree_remove(queue->q_exts_by_addr, ss->ss_rs.rs_start, - (SCAN_IO_GET_OFFSET(sio) + sio->sio_asize) - - ss->ss_rs.rs_start); - kmem_free(zero_io, sizeof (*zero_io)); + end = SCAN_IO_GET_OFFSET(sio) + sio->sio_asize; + range_tree_remove_fill(queue->q_exts_by_addr, rs->rs_start, + end - rs->rs_start, bytes, 0); } else { /* * Whole extent consumed, remove it all, including any head * or tail overhang. */ - range_tree_remove(queue->q_exts_by_addr, ss->ss_rs.rs_start, - ss->ss_rs.rs_end - ss->ss_rs.rs_start); + range_tree_remove_fill(queue->q_exts_by_addr, rs->rs_start, + rs->rs_end - rs->rs_start, bytes, 0); } atomic_add_64(&queue->q_num_issuing_zios, num_zios); @@ -3066,7 +2981,7 @@ scan_io_queue_gather(scan_seg_t *ss, list_t *list, * memory limit. * 3) Otherwise we don't select any extents. */ -static scan_seg_t * +static const range_seg_t * scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) { dsl_scan_t *scn = queue->q_scn; @@ -3106,7 +3021,7 @@ scan_io_queues_run_one(io_queue_run_info_t *info) dsl_scan_t *scn = queue->q_scn; kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; list_t zio_list; - scan_seg_t *ss; + const range_seg_t *rs; uint64_t issued = 0; ASSERT(scn->scn_is_sorted); @@ -3117,10 +3032,10 @@ scan_io_queues_run_one(io_queue_run_info_t *info) mutex_enter(q_lock); /* First we select the extent we'll be issuing from next. */ - ss = scan_io_queue_fetch_ext(queue); - DTRACE_PROBE2(queue_fetch_ext, scan_seg_t *, ss, + rs = scan_io_queue_fetch_ext(queue); + DTRACE_PROBE2(queue_fetch_ext, range_seg_t *, rs, dsl_scan_io_queue_t *, queue); - if (ss == NULL) { + if (rs == NULL) { mutex_exit(q_lock); break; } @@ -3130,7 +3045,7 @@ scan_io_queues_run_one(io_queue_run_info_t *info) * gather up the corresponding zio's, taking care not to step * over the limit. */ - issued += scan_io_queue_gather(ss, &zio_list, queue, + issued += scan_io_queue_gather(rs, &zio_list, queue, limit - issued); first_io = list_head(&zio_list); last_io = list_tail(&zio_list); @@ -3372,10 +3287,10 @@ dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) * 1) Cold, just sitting in the queue of zio's to be issued at * some point in the future. In this case, all we do is * remove the zio from the q_zios_by_addr tree, decrement - * its data volume from the containing scan_seg_t and + * its data volume from the containing range_seg_t and * resort the q_exts_by_size tree to reflect that the - * scan_seg_t has lost some of its 'fill'. We don't shorten - * the scan_seg_t - this is usually rare enough not to be + * range_seg_t has lost some of its 'fill'. We don't shorten + * the range_seg_t - this is usually rare enough not to be * worth the extra hassle of trying keep track of precise * extent boundaries. * 2) Hot, where the zio is currently in-flight in @@ -3387,7 +3302,7 @@ dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) */ sio = avl_find(&queue->q_zios_by_addr, &srch, &idx); if (sio != NULL) { - scan_seg_t *ss; + range_seg_t *rs; /* Got it while it was cold in the queue */ ASSERT3U(srch.sio_asize, ==, sio->sio_asize); @@ -3400,20 +3315,19 @@ dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) /* * Since we're taking this scan_io_t out of its parent - * scan_seg, we need to alter the scan_seg_t's ss_fill value, - * so this changes its ordering position. We need to reinsert - * in its appropriate place in q_exts_by_size. + * range_seg_t, we need to alter the range_seg_t's rs_fill + * value, so this changes its ordering position. We need + * to reinsert in its appropriate place in q_exts_by_size. */ - ss = range_tree_find(queue->q_exts_by_addr, + rs = range_tree_find(queue->q_exts_by_addr, SCAN_IO_GET_OFFSET(sio), sio->sio_asize); - ASSERT(ss != NULL); - avl_remove(&queue->q_exts_by_size, ss); - ASSERT3U(ss->ss_fill, <=, - ss->ss_rs.rs_end - ss->ss_rs.rs_start); - ASSERT3U(ss->ss_fill, >=, sio->sio_asize); - ss->ss_fill -= sio->sio_asize; - VERIFY3P(avl_find(&queue->q_exts_by_size, ss, &idx), ==, NULL); - avl_insert(&queue->q_exts_by_size, ss, idx); + ASSERT(rs != NULL); + ASSERT3U(rs->rs_fill, >=, sio->sio_asize); + avl_remove(&queue->q_exts_by_size, rs); + ASSERT3U(rs->rs_fill, >=, sio->sio_asize); + rs->rs_fill -= sio->sio_asize; + VERIFY3P(avl_find(&queue->q_exts_by_size, rs, &idx), ==, NULL); + avl_insert(&queue->q_exts_by_size, rs, idx); /* * We only update the queue byte counter in the cold path, diff --git a/usr/src/uts/common/fs/zfs/metaslab.c b/usr/src/uts/common/fs/zfs/metaslab.c index a82efe172193..f9a691c7d893 100644 --- a/usr/src/uts/common/fs/zfs/metaslab.c +++ b/usr/src/uts/common/fs/zfs/metaslab.c @@ -198,29 +198,6 @@ static void metaslab_set_fragmentation(metaslab_t *); kmem_cache_t *metaslab_alloc_trace_cache; -typedef struct ms_seg { - range_seg_t ms_rs; - avl_node_t ms_pp_node; /* AVL picker-private node */ -} ms_seg_t; - -static kmem_cache_t *ms_seg_cache; - -void -metaslab_global_init(void) -{ - ASSERT3P(ms_seg_cache, ==, NULL); - ms_seg_cache = kmem_cache_create("metaslab_seg_cache", - sizeof (ms_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0); -} - -void -metaslab_global_fini(void) -{ - ASSERT(ms_seg_cache != NULL); - kmem_cache_destroy(ms_seg_cache); - ms_seg_cache = NULL; -} - /* * ========================================================================== * Metaslab classes @@ -982,20 +959,20 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, static int metaslab_rangesize_compare(const void *x1, const void *x2) { - const ms_seg_t *r1 = x1; - const ms_seg_t *r2 = x2; - uint64_t rs_size1 = r1->ms_rs.rs_end - r1->ms_rs.rs_start; - uint64_t rs_size2 = r2->ms_rs.rs_end - r2->ms_rs.rs_start; + const range_seg_t *r1 = x1; + const range_seg_t *r2 = x2; + uint64_t rs_size1 = r1->rs_end - r1->rs_start; + uint64_t rs_size2 = r2->rs_end - r2->rs_start; if (rs_size1 < rs_size2) return (-1); if (rs_size1 > rs_size2) return (1); - if (r1->ms_rs.rs_start < r2->ms_rs.rs_start) + if (r1->rs_start < r2->rs_start) return (-1); - if (r1->ms_rs.rs_start > r2->ms_rs.rs_start) + if (r1->rs_start > r2->rs_start) return (1); return (0); @@ -1014,7 +991,7 @@ metaslab_rt_create(range_tree_t *rt, void *arg) ASSERT(msp->ms_tree == NULL); avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, - sizeof (ms_seg_t), offsetof(ms_seg_t, ms_pp_node)); + sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); } /* @@ -1033,27 +1010,25 @@ metaslab_rt_destroy(range_tree_t *rt, void *arg) } static void -metaslab_rt_add(range_tree_t *rt, void *rs, void *arg) +metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) { - ms_seg_t *mss = rs; metaslab_t *msp = arg; ASSERT3P(rt->rt_arg, ==, msp); ASSERT3P(msp->ms_tree, ==, rt); VERIFY(!msp->ms_condensing); - avl_add(&msp->ms_size_tree, mss); + avl_add(&msp->ms_size_tree, rs); } static void -metaslab_rt_remove(range_tree_t *rt, void *rs, void *arg) +metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) { - ms_seg_t *mss = rs; metaslab_t *msp = arg; ASSERT3P(rt->rt_arg, ==, msp); ASSERT3P(msp->ms_tree, ==, rt); VERIFY(!msp->ms_condensing); - avl_remove(&msp->ms_size_tree, mss); + avl_remove(&msp->ms_size_tree, rs); } static void @@ -1071,7 +1046,7 @@ metaslab_rt_vacate(range_tree_t *rt, void *arg) * will be freed by the range tree, so we don't want to free them here. */ avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, - sizeof (ms_seg_t), offsetof(ms_seg_t, ms_pp_node)); + sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); } static range_tree_ops_t metaslab_rt_ops = { @@ -1095,29 +1070,29 @@ uint64_t metaslab_block_maxsize(metaslab_t *msp) { avl_tree_t *t = &msp->ms_size_tree; - ms_seg_t *mss; + range_seg_t *rs; - if (t == NULL || (mss = avl_last(t)) == NULL) + if (t == NULL || (rs = avl_last(t)) == NULL) return (0ULL); - return (mss->ms_rs.rs_end - mss->ms_rs.rs_start); + return (rs->rs_end - rs->rs_start); } -static ms_seg_t * +static range_seg_t * metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size) { - ms_seg_t *mss, rsearch; + range_seg_t *rs, rsearch; avl_index_t where; - rsearch.ms_rs.rs_start = start; - rsearch.ms_rs.rs_end = start + size; + rsearch.rs_start = start; + rsearch.rs_end = start + size; - mss = avl_find(t, &rsearch, &where); - if (mss == NULL) { - mss = avl_nearest(t, where, AVL_AFTER); + rs = avl_find(t, &rsearch, &where); + if (rs == NULL) { + rs = avl_nearest(t, where, AVL_AFTER); } - return (mss); + return (rs); } /* @@ -1129,16 +1104,16 @@ static uint64_t metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, uint64_t align) { - ms_seg_t *mss = metaslab_block_find(t, *cursor, size); + range_seg_t *rs = metaslab_block_find(t, *cursor, size); - while (mss != NULL) { - uint64_t offset = P2ROUNDUP(mss->ms_rs.rs_start, align); + while (rs != NULL) { + uint64_t offset = P2ROUNDUP(rs->rs_start, align); - if (offset + size <= mss->ms_rs.rs_end) { + if (offset + size <= rs->rs_end) { *cursor = offset + size; return (offset); } - mss = AVL_NEXT(t, mss); + rs = AVL_NEXT(t, rs); } /* @@ -1250,15 +1225,14 @@ metaslab_cf_alloc(metaslab_t *msp, uint64_t size) ASSERT3U(*cursor_end, >=, *cursor); if ((*cursor + size) > *cursor_end) { - ms_seg_t *mss; + range_seg_t *rs; - mss = avl_last(&msp->ms_size_tree); - if (mss == NULL || - (mss->ms_rs.rs_end - mss->ms_rs.rs_start) < size) + rs = avl_last(&msp->ms_size_tree); + if (rs == NULL || (rs->rs_end - rs->rs_start) < size) return (-1ULL); - *cursor = mss->ms_rs.rs_start; - *cursor_end = mss->ms_rs.rs_end; + *cursor = rs->rs_start; + *cursor_end = rs->rs_end; } offset = *cursor; @@ -1291,7 +1265,7 @@ metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) { avl_tree_t *t = &msp->ms_tree->rt_root; avl_index_t where; - ms_seg_t *mss, rsearch; + range_seg_t *rs, rsearch; uint64_t hbit = highbit64(size); uint64_t *cursor = &msp->ms_lbas[hbit - 1]; uint64_t max_size = metaslab_block_maxsize(msp); @@ -1302,25 +1276,25 @@ metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) if (max_size < size) return (-1ULL); - rsearch.ms_rs.rs_start = *cursor; - rsearch.ms_rs.rs_end = *cursor + size; + rsearch.rs_start = *cursor; + rsearch.rs_end = *cursor + size; - mss = avl_find(t, &rsearch, &where); - if (mss == NULL || (mss->ms_rs.rs_end - mss->ms_rs.rs_start) < size) { + rs = avl_find(t, &rsearch, &where); + if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { t = &msp->ms_size_tree; - rsearch.ms_rs.rs_start = 0; - rsearch.ms_rs.rs_end = MIN(max_size, + rsearch.rs_start = 0; + rsearch.rs_end = MIN(max_size, 1ULL << (hbit + metaslab_ndf_clump_shift)); - mss = avl_find(t, &rsearch, &where); - if (mss == NULL) - mss = avl_nearest(t, where, AVL_AFTER); - ASSERT(mss != NULL); + rs = avl_find(t, &rsearch, &where); + if (rs == NULL) + rs = avl_nearest(t, where, AVL_AFTER); + ASSERT(rs != NULL); } - if ((mss->ms_rs.rs_end - mss->ms_rs.rs_start) >= size) { - *cursor = mss->ms_rs.rs_start + size; - return (mss->ms_rs.rs_start); + if ((rs->rs_end - rs->rs_start) >= size) { + *cursor = rs->rs_start + size; + return (rs->rs_start); } return (-1ULL); } @@ -1439,9 +1413,7 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, * addition of new space; and for debugging, it ensures that we'd * data fault on any attempt to use this metaslab before it's ready. */ - ASSERT(ms_seg_cache != NULL); - ms->ms_tree = range_tree_create_custom(&metaslab_rt_ops, ms, - &ms->ms_lock, ms_seg_cache); + ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock); metaslab_group_add(mg, ms); metaslab_set_fragmentation(ms); @@ -2061,7 +2033,7 @@ static boolean_t metaslab_should_condense(metaslab_t *msp) { space_map_t *sm = msp->ms_sm; - ms_seg_t *mss; + range_seg_t *rs; uint64_t size, entries, segsz, object_size, optimal_size, record_size; dmu_object_info_t doi; uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift; @@ -2075,8 +2047,8 @@ metaslab_should_condense(metaslab_t *msp) * metaslabs that are empty and metaslabs for which a condense * request has been made. */ - mss = avl_last(&msp->ms_size_tree); - if (mss == NULL || msp->ms_condense_wanted) + rs = avl_last(&msp->ms_size_tree); + if (rs == NULL || msp->ms_condense_wanted) return (B_TRUE); /* @@ -2085,7 +2057,7 @@ metaslab_should_condense(metaslab_t *msp) * larger on-disk than the entire current on-disk structure, then * clearly condensing will increase the on-disk structure size. */ - size = (mss->ms_rs.rs_end - mss->ms_rs.rs_start) >> sm->sm_shift; + size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; entries = size / (MIN(size, SM_RUN_MAX)); segsz = entries * sizeof (uint64_t); diff --git a/usr/src/uts/common/fs/zfs/range_tree.c b/usr/src/uts/common/fs/zfs/range_tree.c index abd29efb9ae4..88e741803476 100644 --- a/usr/src/uts/common/fs/zfs/range_tree.c +++ b/usr/src/uts/common/fs/zfs/range_tree.c @@ -198,25 +198,7 @@ range_tree_seg_compare(const void *x1, const void *x2) range_tree_t * range_tree_create(range_tree_ops_t *ops, void *arg, kmutex_t *lp) { - return (range_tree_create_custom(ops, arg, lp, range_seg_cache)); -} - -/* - * Same as range_tree_create, but allows you to pass a custom range tree - * segment allocator. The purpose of this is that you can put additional - * fields into the structure following a first range_seg_t field (which - * will be used by the range tree itself), allowing you to store arbitrary - * additional information with each range tree segment. - */ -range_tree_t * -range_tree_create_custom(range_tree_ops_t *ops, void *arg, kmutex_t *lp, - kmem_cache_t *seg_cache) -{ - range_tree_t *rt; - - ASSERT3U(kmem_cache_bufsize(seg_cache), >=, sizeof (range_seg_t)); - - rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP); + range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP); avl_create(&rt->rt_root, range_tree_seg_compare, sizeof (range_seg_t), offsetof(range_seg_t, rs_node)); @@ -224,7 +206,6 @@ range_tree_create_custom(range_tree_ops_t *ops, void *arg, kmutex_t *lp, rt->rt_lock = lp; rt->rt_ops = ops; rt->rt_arg = arg; - rt->rt_seg_cache = seg_cache; if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL) rt->rt_ops->rtop_create(rt, rt->rt_arg); @@ -245,7 +226,7 @@ range_tree_destroy(range_tree_t *rt) } void -range_tree_add(void *arg, uint64_t start, uint64_t size) +range_tree_add_fill(void *arg, uint64_t start, uint64_t size, uint64_t fill) { range_tree_t *rt = arg; avl_index_t where; @@ -271,6 +252,11 @@ range_tree_add(void *arg, uint64_t start, uint64_t size) if (rt->rt_gap != 0) { if (rs != NULL) { if (rs->rs_start <= start && rs->rs_end >= end) { + if (rt->rt_ops != NULL && + rt->rt_ops->rtop_remove != NULL) + rt->rt_ops->rtop_remove(rt, rs, + rt->rt_arg); + rs->rs_fill += fill; if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) { rt->rt_ops->rtop_add(rt, rs, @@ -278,6 +264,7 @@ range_tree_add(void *arg, uint64_t start, uint64_t size) } return; } + ASSERT0(fill); if (rs->rs_start < start) range_tree_add(rt, rs->rs_end, end); else @@ -314,7 +301,8 @@ range_tree_add(void *arg, uint64_t start, uint64_t size) range_tree_stat_decr(rt, rs_after); rs_after->rs_start = rs_before->rs_start; - kmem_cache_free(rt->rt_seg_cache, rs_before); + rs_after->rs_fill += rs_before->rs_fill + fill; + kmem_cache_free(range_seg_cache, rs_before); rs = rs_after; } else if (merge_before) { if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) @@ -323,6 +311,7 @@ range_tree_add(void *arg, uint64_t start, uint64_t size) range_tree_stat_decr(rt, rs_before); rs_before->rs_end = end; + rs_before->rs_fill += fill; rs = rs_before; } else if (merge_after) { if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) @@ -331,12 +320,13 @@ range_tree_add(void *arg, uint64_t start, uint64_t size) range_tree_stat_decr(rt, rs_after); rs_after->rs_start = start; + rs_after->rs_fill += fill; rs = rs_after; } else { - rs = kmem_cache_alloc(rt->rt_seg_cache, KM_SLEEP); - bzero(rs, kmem_cache_bufsize(rt->rt_seg_cache)); + rs = kmem_cache_alloc(range_seg_cache, KM_SLEEP); rs->rs_start = start; rs->rs_end = end; + rs->rs_fill = fill; avl_insert(&rt->rt_root, rs, where); } @@ -348,7 +338,14 @@ range_tree_add(void *arg, uint64_t start, uint64_t size) } void -range_tree_remove(void *arg, uint64_t start, uint64_t size) +range_tree_add(void *arg, uint64_t start, uint64_t size) +{ + range_tree_add_fill(arg, start, size, 0); +} + +static void +range_tree_remove_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill, + uint64_t fill_left, boolean_t partial_overlap) { range_tree_t *rt = arg; avl_index_t where; @@ -358,60 +355,115 @@ range_tree_remove(void *arg, uint64_t start, uint64_t size) ASSERT(MUTEX_HELD(rt->rt_lock)); VERIFY3U(size, !=, 0); - VERIFY3U(size, <=, rt->rt_space); + if (!partial_overlap) { + VERIFY3U(size, <=, rt->rt_space); + } else { + VERIFY0(fill); + VERIFY0(fill_left); + } rsearch.rs_start = start; rsearch.rs_end = end; - rs = avl_find(&rt->rt_root, &rsearch, &where); - /* Make sure we completely overlap with someone */ - if (rs == NULL) { - zfs_panic_recover("zfs: freeing free segment " - "(offset=%llu size=%llu)", - (longlong_t)start, (longlong_t)size); - return; - } - VERIFY3U(rs->rs_start, <=, start); - VERIFY3U(rs->rs_end, >=, end); + while ((rs = avl_find(&rt->rt_root, &rsearch, &where)) != NULL || + !partial_overlap) { + uint64_t overlap_sz; + + if (partial_overlap) { + if (rs->rs_start <= start && rs->rs_end >= end) + overlap_sz = size; + else if (rs->rs_start > start && rs->rs_end < end) + overlap_sz = rs->rs_end - rs->rs_start; + else if (rs->rs_end < end) + overlap_sz = rs->rs_end - start; + else /* rs->rs_start > start */ + overlap_sz = end - rs->rs_start; + } else { + /* Make sure we completely overlapped with someone */ + if (rs == NULL) { + zfs_panic_recover("zfs: freeing free segment " + "(offset=%llu size=%llu)", + (longlong_t)start, (longlong_t)size); + return; + } + VERIFY3U(rs->rs_start, <=, start); + VERIFY3U(rs->rs_end, >=, end); + overlap_sz = size; + } - left_over = (rs->rs_start != start); - right_over = (rs->rs_end != end); + left_over = (rs->rs_start < start); + right_over = (rs->rs_end > end); - range_tree_stat_decr(rt, rs); + range_tree_stat_decr(rt, rs); - if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) - rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); + if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) + rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); + + if (left_over && right_over) { + newseg = kmem_cache_alloc(range_seg_cache, KM_SLEEP); + newseg->rs_start = end; + newseg->rs_end = rs->rs_end; + ASSERT3U(rs->rs_fill, >=, (fill + fill_left)); + newseg->rs_fill = rs->rs_fill - (fill + fill_left); + range_tree_stat_incr(rt, newseg); + + rs->rs_end = start; + rs->rs_fill = fill_left; + + avl_insert_here(&rt->rt_root, newseg, rs, AVL_AFTER); + if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) + rt->rt_ops->rtop_add(rt, newseg, rt->rt_arg); + } else if (left_over) { + rs->rs_end = start; + ASSERT3U(rs->rs_fill, >=, fill); + rs->rs_fill -= fill; + } else if (right_over) { + rs->rs_start = end; + ASSERT3U(rs->rs_fill, >=, fill); + rs->rs_fill -= fill; + } else { + ASSERT3U(rs->rs_fill, ==, fill); + ASSERT(fill == 0 || !partial_overlap); + avl_remove(&rt->rt_root, rs); + kmem_cache_free(range_seg_cache, rs); + rs = NULL; + } - if (left_over && right_over) { - newseg = kmem_cache_alloc(rt->rt_seg_cache, KM_SLEEP); - bzero(newseg, kmem_cache_bufsize(rt->rt_seg_cache)); - newseg->rs_start = end; - newseg->rs_end = rs->rs_end; - range_tree_stat_incr(rt, newseg); + if (rs != NULL) { + range_tree_stat_incr(rt, rs); - rs->rs_end = start; + if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) + rt->rt_ops->rtop_add(rt, rs, rt->rt_arg); + } - avl_insert_here(&rt->rt_root, newseg, rs, AVL_AFTER); - if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) - rt->rt_ops->rtop_add(rt, newseg, rt->rt_arg); - } else if (left_over) { - rs->rs_end = start; - } else if (right_over) { - rs->rs_start = end; - } else { - avl_remove(&rt->rt_root, rs); - kmem_cache_free(rt->rt_seg_cache, rs); - rs = NULL; + rt->rt_space -= overlap_sz; + if (!partial_overlap) { + /* + * There can't be any more segments overlapping with + * us, so no sense in performing an extra search. + */ + break; + } } +} - if (rs != NULL) { - range_tree_stat_incr(rt, rs); +void +range_tree_remove(void *arg, uint64_t start, uint64_t size) +{ + range_tree_remove_impl(arg, start, size, 0, 0, B_FALSE); +} - if (rt->rt_ops != NULL) - rt->rt_ops->rtop_add(rt, rs, rt->rt_arg); - } +void +range_tree_remove_overlap(void *arg, uint64_t start, uint64_t size) +{ + range_tree_remove_impl(arg, start, size, 0, 0, B_TRUE); +} - rt->rt_space -= size; +void +range_tree_remove_fill(void *arg, uint64_t start, uint64_t size, + uint64_t fill, uint64_t remain_left) +{ + range_tree_remove_impl(arg, start, size, fill, remain_left, B_FALSE); } static range_seg_t * @@ -500,7 +552,7 @@ range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg) while ((rs = avl_destroy_nodes(&rt->rt_root, &cookie)) != NULL) { if (func != NULL) func(arg, rs->rs_start, rs->rs_end - rs->rs_start); - kmem_cache_free(rt->rt_seg_cache, rs); + kmem_cache_free(range_seg_cache, rs); } bzero(rt->rt_histogram, sizeof (rt->rt_histogram)); @@ -518,7 +570,7 @@ range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg) func(arg, rs->rs_start, rs->rs_end - rs->rs_start); } -void * +range_seg_t * range_tree_first(range_tree_t *rt) { ASSERT(MUTEX_HELD(rt->rt_lock)); diff --git a/usr/src/uts/common/fs/zfs/spa_misc.c b/usr/src/uts/common/fs/zfs/spa_misc.c index 9b7779a8b0d8..867a9ecb70cf 100644 --- a/usr/src/uts/common/fs/zfs/spa_misc.c +++ b/usr/src/uts/common/fs/zfs/spa_misc.c @@ -1887,7 +1887,6 @@ spa_init(int mode) zpool_feature_init(); spa_config_load(); l2arc_start(); - metaslab_global_init(); dsl_scan_global_init(); } @@ -1906,8 +1905,6 @@ spa_fini(void) range_tree_fini(); unique_fini(); refcount_fini(); - metaslab_global_fini(); - dsl_scan_global_fini(); avl_destroy(&spa_namespace_avl); avl_destroy(&spa_spare_avl); diff --git a/usr/src/uts/common/fs/zfs/sys/metaslab.h b/usr/src/uts/common/fs/zfs/sys/metaslab.h index 501a5096a9c5..82ed08c728bb 100644 --- a/usr/src/uts/common/fs/zfs/sys/metaslab.h +++ b/usr/src/uts/common/fs/zfs/sys/metaslab.h @@ -44,9 +44,6 @@ typedef struct metaslab_ops { extern metaslab_ops_t *zfs_metaslab_ops; -void metaslab_global_init(void); -void metaslab_global_fini(void); - int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t, metaslab_t **); void metaslab_fini(metaslab_t *); diff --git a/usr/src/uts/common/fs/zfs/sys/range_tree.h b/usr/src/uts/common/fs/zfs/sys/range_tree.h index 98062071187c..005ed932ab27 100644 --- a/usr/src/uts/common/fs/zfs/sys/range_tree.h +++ b/usr/src/uts/common/fs/zfs/sys/range_tree.h @@ -61,15 +61,17 @@ typedef struct range_tree { typedef struct range_seg { avl_node_t rs_node; /* AVL node */ + avl_node_t rs_pp_node; /* AVL picker-private node */ uint64_t rs_start; /* starting offset of this segment */ uint64_t rs_end; /* ending offset (non-inclusive) */ + uint64_t rs_fill; /* actual fill if gap mode is on */ } range_seg_t; struct range_tree_ops { void (*rtop_create)(range_tree_t *rt, void *arg); void (*rtop_destroy)(range_tree_t *rt, void *arg); - void (*rtop_add)(range_tree_t *rt, void *rs, void *arg); - void (*rtop_remove)(range_tree_t *rt, void *rs, void *arg); + void (*rtop_add)(range_tree_t *rt, range_seg_t *rs, void *arg); + void (*rtop_remove)(range_tree_t *rt, range_seg_t *rs, void *arg); void (*rtop_vacate)(range_tree_t *rt, void *arg); }; @@ -78,8 +80,6 @@ typedef void range_tree_func_t(void *arg, uint64_t start, uint64_t size); void range_tree_init(void); void range_tree_fini(void); range_tree_t *range_tree_create(range_tree_ops_t *ops, void *arg, kmutex_t *lp); -range_tree_t *range_tree_create_custom(range_tree_ops_t *ops, void *arg, - kmutex_t *lp, kmem_cache_t *segcache); void range_tree_destroy(range_tree_t *rt); boolean_t range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size); void *range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size); @@ -91,12 +91,17 @@ void range_tree_set_gap(range_tree_t *rt, uint64_t gap); void range_tree_set_lock(range_tree_t *rt, kmutex_t *lp); void range_tree_add(void *arg, uint64_t start, uint64_t size); +void range_tree_add_fill(void *arg, uint64_t start, uint64_t size, + uint64_t fill); void range_tree_remove(void *arg, uint64_t start, uint64_t size); +void range_tree_remove_overlap(void *arg, uint64_t start, uint64_t size); +void range_tree_remove_fill(void *arg, uint64_t start, uint64_t size, + uint64_t fill, uint64_t fill_left); void range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size); void range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg); void range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg); -void *range_tree_first(range_tree_t *rt); +range_seg_t *range_tree_first(range_tree_t *rt); #ifdef __cplusplus } diff --git a/usr/src/uts/common/os/kmem.c b/usr/src/uts/common/os/kmem.c index fb2957c277ed..ccdbc59f2174 100644 --- a/usr/src/uts/common/os/kmem.c +++ b/usr/src/uts/common/os/kmem.c @@ -3621,15 +3621,6 @@ kmem_cache_stat(kmem_cache_t *cp, char *name) return (value); } -/* - * Returns the buffer size being allocated by this kmem cache. - */ -size_t -kmem_cache_bufsize(const kmem_cache_t *cp) -{ - return (cp->cache_bufsize); -} - /* * Return an estimate of currently available kernel heap memory. * On 32-bit systems, physical memory may exceed virtual memory, diff --git a/usr/src/uts/common/sys/kmem.h b/usr/src/uts/common/sys/kmem.h index f1bf3b250b0a..e54d83e49933 100644 --- a/usr/src/uts/common/sys/kmem.h +++ b/usr/src/uts/common/sys/kmem.h @@ -128,7 +128,6 @@ extern void kmem_cache_destroy(kmem_cache_t *); extern void *kmem_cache_alloc(kmem_cache_t *, int); extern void kmem_cache_free(kmem_cache_t *, void *); extern uint64_t kmem_cache_stat(kmem_cache_t *, char *); -extern size_t kmem_cache_bufsize(const kmem_cache_t *); extern void kmem_cache_reap_now(kmem_cache_t *); extern void kmem_cache_move_notify(kmem_cache_t *, void *);