にゃんだかdedup=onした鯖のディスクアクセスが多すぎるので調査。
echo "::zfs_params" |mdb -k
arc_reduce_dnlc_percent = 0x3
zfs_arc_max = 0x0
zfs_arc_min = 0x0
arc_shrink_shift = 0x5
zfs_mdcomp_disable = 0x0
zfs_prefetch_disable = 0x0
zfetch_max_streams = 0x8
zfetch_min_sec_reap = 0x2
zfetch_block_cap = 0x100
zfetch_array_rd_sz = 0x100000
zfs_default_bs = 0x9
zfs_default_ibs = 0xe
metaslab_aliquot = 0x80000
mdb: variable reference_tracking_enable not found: unknown symbol name
mdb: variable reference_history not found: unknown symbol name
spa_max_replication_override = 0x3
spa_mode_global = 0x3
zfs_flags = 0x0
mdb: variable zfs_txg_synctime not found: unknown symbol name
zfs_txg_timeout = 0x1e
zfs_write_limit_min = 0x2000000
zfs_write_limit_max = 0x5fe47e00
zfs_write_limit_shift = 0x3
zfs_write_limit_override = 0x0
zfs_no_write_throttle = 0x0
zfs_vdev_cache_max = 0x4000
zfs_vdev_cache_size = 0x0
zfs_vdev_cache_bshift = 0x10
vdev_mirror_shift = 0x15
zfs_vdev_max_pending = 0x1
zfs_vdev_min_pending = 0x4
zfs_scrub_limit = 0x1
zfs_no_scrub_io = 0x0
zfs_no_scrub_prefetch = 0x0
zfs_vdev_time_shift = 0x6
zfs_vdev_ramp_rate = 0x2
zfs_vdev_aggregation_limit = 0x20000
fzap_default_block_shift = 0xe
zfs_immediate_write_sz = 0x8000
zfs_read_chunk_size = 0x100000
zfs_nocacheflush = 0x1
zil_replay_disable = 0x0
metaslab_gang_bang = 0x20001
metaslab_df_alloc_threshold = 0x20000
metaslab_df_free_pct = 0x4
zio_injection_enabled = 0x0
zvol_immediate_write_sz = 0x8000
echo ::arc | mdb -k
hits = 2,892M
misses = 85M
demand_data_hits = 716M
demand_data_misses = 12M
demand_metadata_hits = 1,968M
demand_metadata_misses = 19M
prefetch_data_hits = 51M
prefetch_data_misses = 22M
prefetch_metadata_hits = 155M
prefetch_metadata_misses = 29M
mru_hits = 408M
mru_ghost_hits = 1M
mfu_hits = 2,283M
mfu_ghost_hits = 52M
deleted = 46M
recycle_miss = 8M
mutex_miss = 119K
evict_skip = 27M
evict_l2_cached = 2.2T
evict_l2_eligible = 1.7T
evict_l2_ineligible = 1.9T
hash_elements = 1M
hash_elements_max = 1M
hash_collisions = 550M
hash_chains = 243K
hash_chain_max = 21
p = 6,127 MB
c = 8,934 MB
c_min = 1,406 MB
c_max = 11,250 MB
size = 8,934 MB
hdr_size = 170M
data_size = 9,131M
other_size = 33M
l2_hits = 19M
l2_misses = 65M
l2_feeds = 4M
l2_rw_clash = 827
l2_read_bytes = 474G
l2_write_bytes = 1.5T
l2_writes_sent = 1M
l2_writes_done = 1M
l2_writes_error = 0
l2_writes_hdr_miss = 1K
l2_evict_lock_retry = 162
l2_evict_reading = 107
l2_free_on_write = 3M
l2_abort_lowmem = 230
l2_cksum_bad = 0
l2_io_error = 0
l2_size = 26G
l2_hdr_size = 35M
memory_throttle_count = 0
arc_no_grow = 0
arc_tempreserve = 0 MB
arc_meta_used = 2,812 MB
arc_meta_limit = 2,812 MB
arc_meta_max = 2,921 MB
・・・・・?
arc_meta_limitにゃるものが2.8Gほどににゃってるが、これはc_maxの1/4か。つまりarc_metaのサイズは全ARCの1/4がリミットとされているわけか。これはdedupメインの鯖だと問題だにゃ。
本来は/etc/systemに
set zfs:zfs_arc_meta_limit = c_max未満(byte)
とか書いてrebootにゃはずだが、とりあえず関連しそうにゃの全部変更。
echo "zfs_arc_meta_limit/Z 0x20F580000" | mdb -kw
echo "arc_meta_limit/Z 0x20F580000" | mdb -kw
echo "arc_meta_max/Z 0x20F580000" | mdb -kw
arc_meta_limit = 8,437 MB
arc_meta_max = 8,437 MB
で、しばらく放置してるとarc_meta_usedが微妙に増加していく感じ。arc_no_grow=1とにゃる事があるけどこれはどうも急激に増減しにゃいか何かの仕組みぽい。
で、これでdedup鯖のIO過多が収まるかどうかはちょっとよく分からにゃい。再起動するのは少々骨が折れるにゃぁ。