echo |format
0. c0t0d0 <DEFAULT cyl 3184 alt 2 hd 255 sec 63>
1. c0t1d0 <DEFAULT cyl 3184 alt 2 hd 255 sec 63>
でzpool status
syspool ONLINE 0 0 0
c0t0d0s0 ONLINE 0 0 0
みたいにゃ状況だとすると、prtvtoc /dev/dsk/c0t0d0s0 | fmthard -s - /dev/rdsk/c0t1d0s0
zpool attach -f syspool c0t0d0s0 c0t1d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c0t1d0s0
でいけるはず。fmthard: Partition 2 specifies the full disk and is not equal
full size of disk. The full disk capacity is
とか、Partition 0 of the disk has an incorrect offset
とか言われたら、formatのとこの作業が抜けてる気味。add_drv -v -i "pci100b,35" ce
みたいにすると/etc/path_to_instにceが4行追加されて、/etc/driver_aliasesにもce "pci100b,35"みたいにゃのが追加される。 ---- errors ---
device s/w h/w trn tot
sd0 0 64 0 64
sd1 0 0 0 0
sd2 0 0 0 0
sd3 0 0 0 0
sd4 0 0 0 0
sd5 0 0 0 0
sd6 0 0 0 0
このまま放置しても動作には影響にゃいし再起動すれば直るんだけどディスク入れ替えたのにカウンタがリセットされにゃいというのは困ったものにゃのでオンラインでこれをクリアしたい。error_reset.sh:
echo target : sd$1
sd=$(echo "*sd_state::softstate $1" | mdb -kw)
es=$(echo "$sd::print struct sd_lun un_errstats"| mdb -k | cut -d" " -f3)
ks=$(echo "$es::print kstat_t ks_data" | mdb -k | cut -d" " -f3)
ha=$(echo "$ks::print -a struct sd_errstats sd_harderrs.value.ui32" | mdb -k | cut -d" " -f1)
echo $ha/W 0 | mdb -kw
ha=$(echo "$ks::print -a struct sd_errstats sd_softerrs.value.ui32" | mdb -k | cut -d" " -f1)
echo $ha/W 0 | mdb -kw
ha=$(echo "$ks::print -a struct sd_errstats sd_transerrs.value.ui32" | mdb -k | cut -d" " -f1)
echo $ha/W 0 | mdb -kw
こういう.shでクリアできるようだ。./error_reset.sh 0
target : sd0
0xffffff03eec71d30: 0x40 = 0x0
0xffffff03eec71d00: 0 = 0x0
0xffffff03eec71d60: 0 = 0x0
とこんにゃ感じでクリアできる。echo ::arc | mdb -k
hits = 6,579,098,581
misses = 452,069,693
demand_data_hits = 1,568,821,337
demand_data_misses = 35,059,825
demand_metadata_hits = 3,224,194,606
demand_metadata_misses = 45,501,861
prefetch_data_hits = 125,750,375
prefetch_data_misses = 128,235,341
prefetch_metadata_hits = 1,660,332,263
prefetch_metadata_misses = 243,272,666
mru_hits = 1,426,213,514
mru_ghost_hits = 13,497,486
mfu_hits = 3,403,854,705
mfu_ghost_hits = 281,484,703
deleted = 250,271,016
recycle_miss = 65,465,441
mutex_miss = 415,133
evict_skip = 414,384,099
evict_l2_cached =11,321,550,825,472
evict_l2_eligible = 6,392,283,271,680
evict_l2_ineligible = 9,111,346,605,568
hash_elements = 1,457,128
hash_elements_max = 2,131,419
hash_collisions = 2,674,957,304
hash_chains = 255,459
hash_chain_max = 26
p = 5,607 MB
c = 8,820 MB
c_min = 1,406 MB
c_max = 11,255 MB
size = 8,820 MB
hdr_size = 147,103,056
data_size = 8,951,655,424
other_size = 33,156,048
l2_hits = 52,579,407
l2_misses = 399,487,072
l2_feeds = 12,909,282
l2_rw_clash = 3,235
l2_read_bytes = 1,967,823,256,576
l2_write_bytes = 6,513,012,548,096
l2_writes_sent = 7,047,789
l2_writes_done = 7,047,789
l2_writes_error = 0
l2_writes_hdr_miss = 2,309
l2_evict_lock_retry = 1,405
l2_evict_reading = 184
l2_free_on_write = 5,408,775
l2_abort_lowmem = 418
l2_cksum_bad = 0
l2_io_error = 0
l2_size = 61,163,917,312
l2_hdr_size = 127,183,488
memory_throttle_count = 0
arc_no_grow = 0
arc_tempreserve = 0 MB
arc_meta_used = 2,813 MB
arc_meta_limit = 2,813 MB
arc_meta_max = 6,029 MB
c_maxが11Gほどあるが、これはマシンメモリが12Gにゃので-1gくらいした容量が最大ということににゃる。L2ARC 32GB : arc_meta_limit 1.5GB : 物理mem 7GB
L2ARC 64GB : arc_meta_limit 3GB : 物理mem 14GB
L2ARC 192GB : arc_meta_limit 9GB : 物理mem 39GB
L2ARC 256GB : arc_meta_limit 12GB : 物理mem 51GB
L2ARC 512GB : arc_meta_limit 25GB : 物理mem 101GB
と言った感じ。必要物理memはarc_meta_limitをデフォの1/4から3/4あたりまで増やせば1/3くらいで済むはずだが、L2ARCはあまり賢くにゃいし書き込み中は読み込みも遅くにゃるSSDが多いので、metaじゃにゃいARCが十分取れるメモリ量でにゃければ逆効果ににゃるかもしれにゃい。平均ブロックサイズが細かいと数倍食うわけで、まぁ場合によってはちょっとしんどいかにゃぁという。faulted: 1
aux_state: 'err_exceeded'
とか付いてるが表面上は正常。errors: Permanent errors have been detected in the following files:
<metadata>:<0x0>
/volumes/tank1/
/volumes/tank1/share1/
みたいにゃのが居残る。アクセスは普通に出来てるあたりがまたキモイ。iscsiadm list initiator-node
iscsiadm list target -v
iscsiadm list target-param -v
とかで。iscsiadm modify initiator-node -T recv-login-rsp-timeout=5
iscsiadm modify initiator-node -T polling-login-delay=2
とか設定して実験する予定。さっぱりHITしにゃいんだがデフォでいいということかしら??find / -name "vmxnet3*" -exec tar -rf vmxnet3.tar {} \;
したものをCE版に上書き展開。再起動すると一応動いた。./vmware-install.pl -d
で失敗する。dladm show-ether | awk 'NR>1 {print}' | sort |
while read a b ; do echo $a ; done |
while read a ; do
for p in en_10hdx_cap en_10fdx_cap en_100hdx_cap en_100fdx_cap en_1000hdx_cap ; do
dladm set-linkprop -p $p=0 $a ; dladm show-linkprop -p $p $a | awk 'NR>1 {print}'
done
done
が、bnxはdladmから設定出来にゃい気味。ndd -set /dev/bnx0 adv_1000hdx_cap 0
ndd -set /dev/bnx0 adv_100fdx_cap 0
ndd -set /dev/bnx0 adv_100hdx_cap 0
ndd -set /dev/bnx0 adv_10fdx_cap 0
ndd -set /dev/bnx0 adv_10hdx_cap 0
ndd -get /dev/bnx1 adv_1000hdx_cap
ndd -get /dev/bnx1 adv_100fdx_cap
ndd -get /dev/bnx1 adv_100hdx_cap
ndd -get /dev/bnx1 adv_10fdx_cap
ndd -get /dev/bnx1 adv_10hdx_cap
すれば通ってるようだが、これ再起動で元戻っちゃうからrcに書かにゃいとダメ。めどい。/etc/systemに書けるんだろうけど怪しいんでrcがベターだろう。dladm show-ether | grep "^bnx" | while read a b ; do
for p in adv_1000hdx_cap adv_100fdx_cap adv_100hdx_cap adv_10fdx_cap adv_10hdx_cap ; do
ndd -set /dev/$a $p 0
done
done
とかか。rcに書くにゃらbnxに限定せず全部これでも良いとも言える。./MegaCli -LDInfo -Lall -aALL
./MegaCli -AdpPr -start -aALL
./MegaCli -LDCC -Start -LALL -aALL
あたりが使える。./arcconf GETCONFIG 1
./arcconf datascrub 1 period 10 noprompt
./arcconf TASK start 1 LOGICALDRIVE 0 verify_fix noprompt
./arcconf TASK start 1 DEVICE 0 1 verify_fix noprompt
とかだろうか。dladm show-ether
dladm show-link
ifconfig nic0 plumb
ifconfig -a
echo 123.123.123.123>/etc/hostname.nic0
echo 123.123.123.123 255.255.0.0>>/etc/netmasks
echo 123.123.123.1>/etc/defaultrouter
echo hostname>/etc/nodename
echo domain local>/etc/resolv.conf
echo nameserver 123.123.123.1>>/etc/resolv.conf
svcadm disable svc:/network/physical:nwam
svcadm enable svc:/network/physical:default
cat /etc/nsswitch.dns > /etc/nsswitch.conf
ifconfig -a
init 6
ping 123.123.123.123
ping yahoo.com
ntpdate pool.ntp.org
pfexec pkg install SUNWipkg
pfexec pkg image-update
init 6
pfexec pkg image-update
init 6
pfexec pkg image-update
init 6
zpool upgrade rpool
zfs upgrade -r rpool
echo | format
0. c8t0d0 <DEFAULT cyl 290 alt 2 hd 255 sec 252>
1. c8t1d0 <DEFAULT cyl 291 alt 2 hd 255 sec 252>
2. c9t0d0 <DEFAULT cyl 291 alt 2 hd 255 sec 252>
3. c9t1d0 <DEFAULT cyl 291 alt 2 hd 255 sec 252>
format c8t1d0 fdisk create SOLARIS2 10GB(シリンダで1stディスクと似たようにゃ感じに指定)
format c9t0d0 fdisk create SOLARIS2 10GB(シリンダで1stディスクと似たようにゃ感じに指定)
format c9t1d0 fdisk create SOLARIS2 10GB(シリンダで1stディスクと似たようにゃ感じに指定)
但しHDDによってセクタ/シリンダとか違ってくるので違うHDD間でミラーする時は注意
format -e c8t1d0 label SMI
format -e c9t0d0 label SMI
format -e c9t1d0 label SMI
prtvtoc /dev/rdsk/c8t0d0s2 | fmthard -s - /dev/rdsk/c8t1d0s2
prtvtoc /dev/rdsk/c8t0d0s2 | fmthard -s - /dev/rdsk/c9t0d0s2
prtvtoc /dev/rdsk/c8t0d0s2 | fmthard -s - /dev/rdsk/c9t1d0s2
zpool attach -f rpool c8t0d0s0 c8t1d0s0
zpool attach -f rpool c8t0d0s0 c9t0d0s0
zpool attach -f rpool c8t0d0s0 c9t1d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c8t1d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c9t0d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c9t1d0s0
zpool list
zpool status
format c8t0d0 fdisk create SOLARIS2 99%
format c8t1d0 fdisk create SOLARIS2 99%
format c9t0d0 fdisk create SOLARIS2 99%
format c9t1d0 fdisk create SOLARIS2 99%
zpool create pool1 raidz c8t0d0p2 c8t1d0p2 c9t0d0p2 c9t1d0p2 c10t0d0p2 c10t1d0p2
zfs create -o atime=off -o casesensitivity=mixed -o snapdir=visible -o aclinherit=passthrough pool1/share
zfs set compression=on pool1/share
zfs set recordsize=64k pool1/share
zfs set aclmode=passthrough pool1/share
zfs set sharenfs=on pool1/share
chmod 777 /pool1/share
vi /etc/hosts
share
echo set nfs:nfs_allow_preepoch_time = 1 >> /etc/system
echo set nfs:nfs3_max_threads = 1024 >> /etc/system
echo set nfs:nfs3_nra = 32 >> /etc/system
echo set zfs:zfs_nocacheflush = 1 >> /etc/system
echo set zfs:zfs_txg_timeout = 5 >> /etc/system
echo set zfs:zfs_scrub_delay = 100 >> /etc/system
echo set zfs:zfs_scrub_limit = 1 >> /etc/system
echo set zfs:zil_disable = 1 >> /etc/system
echo set zfs:zfs_vdev_max_pending = 10 >> /etc/system
crontab -e
0 4 * * * svcadm restart idmap
0 * * * * ntpdate pool.ntp.org
svcadm restart cron
pkg install SUNWsmbskr
pkg install SUNWsmbs
init 6
svcadm enable -r smb/server
vi resolv.conf
zfs set sharesmb=on pool1/share
zfs set sharesmb=name=share1 pool1/share
smbadm join -w WORKGROUP
echo other password required pam_smb_passwd.so.1 nowarn >> /etc/pam.conf
passwd 既存ユーザ(smbpasswd相当)
smbadm join -u domain_admin DOMAIN
svcadm disable smb/server
svcadm enable -r smb/server
chmod -R A=owner@:full_set:fd:allow /pool1/share
chmod -R A+group@:full_set:fd:allow /pool1/share
chmod -R A+everyone@:read_set:fd:allow /pool1/share
sharemgr show -vp
init 6
useradd -b /export/home/ -g staff -m -s /usr/bin/bash user1
vi /etc/auto_home
usermod -d /home/user1 user1
passwd user1
vi /etc/user_attr
vi /rpool/boot/grub/menu.lst
svcadm disable gdm
visudo
user1 ALL=(ALL) NOPASSWD: ALL
Defaults:user1 !env_reset
mkdir -p /vmfs/volumes
cd /vmfs/volumes/
host=`cat /etc/nodename`
ln -s /pool1/share/ ds_$host
ssh-keygen -t rsa
cd
cd .ssh
vi authorized_keys
dladm show-ether
dladm show-link
ifconfig -a
ifconfig nic1 plumb
ifconfig nic2 plumb
dladm create-aggr -l nic1 aggr1
dladm add-aggr -l nic2 aggr1
ifconfig aggr1 plumb
ifconfig aggr1 123.123.123.124 netmask 255.255.0.0 up
ifconfig nic0 down
dladm add-aggr -l nic0 aggr1
dladm show-aggr
cp /etc/hostname.nic1 /etc/hostname.aggr1
dladm modify-aggr -P L2,L3,L4 aggr1
dladm modify-aggr -l passive aggr1
dladm show-aggr -L
dladm show-aggr -x
init 6
定期snapshotをcronに登録とか、mail送信用スクリプトを適当にゃ所にコピーとか、定期ディスク表面検査とか、ssh関連とかvisudoとか適時設定のこと。 Total disk size is 60800 cylinders
Cylinder size is 48195 (512 byte) blocks
Partition Status Type Start End Length %
========= ====== ============ ===== === ====== ===
1 Active Solaris2 0 1305 1306 2
format [2nd] fdisk Total disk size is 60800 cylinders
Cylinder size is 32130 (512 byte) blocks
Partition Status Type Start End Length %
========= ====== ============ ===== === ====== ===
format [2nd] fdisk create SOLARIS2 c 1 X active Partition Status Type Start End Length %
========= ====== ============ ===== === ====== ===
1 Active Solaris2 1 1959 1959 3
format [1st] partition printPart Tag Flag Cylinders Size Blocks
0 root wm 1 - 1302 29.92GB (1302/0/0) 62749890
1 unassigned wm 0 0 (0/0/0) 0
2 backup wu 0 - 1302 29.94GB (1303/0/0) 62798085
format [2nd] partition printTotal disk cylinders available: 1957 + 2 (reserved cylinders)
Part Tag Flag Cylinders Size Blocks
0 unassigned wm 0 0 (0/0/0) 0
1 unassigned wm 0 0 (0/0/0) 0
2 backup wu 0 - 1956 29.98GB (1957/0/0) 62878410
format [2nd] partition 0 root wm 3 XcPart Tag Flag Cylinders Size Blocks
0 root wm 3 - 1955 29.92GB (1953/0/0) 62749890
1 unassigned wm 0 0 (0/0/0) 0
2 backup wu 0 - 1956 29.98GB (1957/0/0) 62878410
format [2nd] partition labeldd if=/dev/rdsk/c8t0d0p0 bs=1048576 count=9000000 | wc -c
とかで。値の9TBは大きい目の数字というだけ。iostat -En | grep -v "^Vendor:" |grep -v "^Media Error:" | grep -v "^Illegal Request:" |
sed -e 's/Soft Errors:.*//' -e 's/ bytes.*//' -e 's/ *$//' -e 's/.* //' -e 's/^<//' -e 's/\n//' |
while read a ; do read b ; echo $a $b ; done
とwcの出力を比較すると、一応全容量読んで止まってくれてる感じはするんだよね。まぁあんまし検証してにゃいけど。iostat -En | grep -v "^Vendor:" |grep -v "^Media Error:" | grep -v "^Illegal Request:" |
sed -e 's/Soft Errors:.*//' -e 's/ bytes.*//' -e 's/ *$//' -e 's/.* //' -e 's/^<//' -e 's/\n//' |
while read a ; do read b ; echo $a $b ; done |
while read a b ; do
dd if=/dev/rdsk/${a}p0 bs=512 count=1 >/dev/null 2>/dev/null || continue
echo $b $a start
dd if=/dev/rdsk/${a}p0 bs=1048576 count=`expr $b / 1024 / 1024 + 1` | wc -c
echo $b $a end
done
みたいにゃ感じ?printf "analyze\nsetup\nn\n\n\n\n1\n\n\n\n\n\n\n\n\nread\ny\n" | format /dev/rdsk/c0t0d0p0
といった形式で再度作り直し。うーん、これは酷い(笑)iostat -en | sed -e 's/.* //' | grep [0-9] |
while read a ; do test -e /dev/rdsk/$a && echo $a ; done | sort | uniq
でにゃんとか。iostat -en | sed -e 's/.* //' | grep [0-9] |
while read a ; do test -e /dev/rdsk/$a && echo $a ; done | sort | uniq |
while read a ; do printf "analyze\nsetup\nn\n\n\n\n1\n\n\n\n\n\n\n\n\nread\ny\n" | format /dev/rdsk/$a ; done
これで一応全部舐めてくれるかにゃ?# splashimage /boot/solaris.xpm
# foreground d25f00
# background 115d93
# kernel$ /platform/i86pc/kernel/$ISADIR/unix -B $ZFS-BOOTFS,console=graphics
kernel$ /platform/i86pc/kernel/$ISADIR/unix -B $ZFS-BOOTFS
svcadm disable gdm
svcadm disable gdm
したのは良かったのだが、このマシンを再起動するといつまで経っても起動が終わらにゃい。ifconfig nic2 plumb
dladm create-aggr -l nic2 aggr1
ifconfig aggr1 plumb
ifconfig aggr1 xxx.xxx.xxx.xxx netmask 255.255.0.0 up
ifconfig nic1 down
dladm add-aggr -l nic1 aggr1
dladm show-aggr
dladm show-aggr -L
dladm show-aggr -x
dladm modify-aggr -P L2,L3,L4 aggr1
cp /etc/hostname.nic1 /etc/hostname.aggr1
init 6
みたいにゃ手順にゃんだが、途中でそのnic1をDownさせるのでリモートで実行する場合には一気にやる必要がある。zfs set volsize=4G rpool/swap
reboot
わ、おてがるぅzfs create -V 4G pool1/swap2
swap -a /dev/zvol/dsk/pool1/swap2
vi /etc/vfstab
とかする必要があるcat /etc/nsswitch.dns > /etc/nsswitch.conf
ntpdate pool.ntp.org
pfexec pkg install SUNWipkg
pfexec pkg image-update
init 6
echo | format
0. c8t0d0 <DEFAULT cyl 290 alt 2 hd 255 sec 252>
1. c8t1d0 <DEFAULT cyl 291 alt 2 hd 255 sec 252>
2. c9t0d0 <DEFAULT cyl 291 alt 2 hd 255 sec 252>
3. c9t1d0 <DEFAULT cyl 291 alt 2 hd 255 sec 252>
4. c10t0d0 <DEFAULT cyl 291 alt 2 hd 255 sec 252>
5. c10t1d0 <DEFAULT cyl 291 alt 2 hd 255 sec 252>
format c8t1d0 fdisk create SOLARIS2 10GB(シリンダで指定)
format c9t0d0 fdisk create SOLARIS2 10GB(シリンダで指定)
format c9t1d0 fdisk create SOLARIS2 10GB(シリンダで指定)
format c10t0d0 fdisk create SOLARIS2 10GB(シリンダで指定)
format c10t1d0 fdisk create SOLARIS2 10GB(シリンダで指定)
但しHDDによってセクタ/シリンダとか違ってくるので違うHDD間でミラーする時は注意
format -e c8t1d0 label SMI
format -e c9t0d0 label SMI
format -e c9t1d0 label SMI
format -e c10t0d0 label SMI
format -e c10t1d0 label SMI
prtvtoc /dev/rdsk/c8t0d0s2 | fmthard -s - /dev/rdsk/c8t1d0s2
prtvtoc /dev/rdsk/c8t0d0s2 | fmthard -s - /dev/rdsk/c9t0d0s2
prtvtoc /dev/rdsk/c8t0d0s2 | fmthard -s - /dev/rdsk/c9t1d0s2
prtvtoc /dev/rdsk/c8t0d0s2 | fmthard -s - /dev/rdsk/c10t0d0s2
prtvtoc /dev/rdsk/c8t0d0s2 | fmthard -s - /dev/rdsk/c10t1d0s2
zpool attach -f rpool c8t0d0s0 c8t1d0s0
zpool attach -f rpool c8t0d0s0 c9t0d0s0
zpool attach -f rpool c8t0d0s0 c9t1d0s0
zpool attach -f rpool c8t0d0s0 c10t0d0s0
zpool attach -f rpool c8t0d0s0 c10t1d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c8t1d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c9t0d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c9t1d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c10t0d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c10t1d0s0
zpool list
zpool status
format c8t0d0 fdisk create SOLARIS2 99%
format c8t1d0 fdisk create SOLARIS2 99%
format c9t0d0 fdisk create SOLARIS2 99%
format c9t1d0 fdisk create SOLARIS2 99%
format c10t0d0 fdisk create SOLARIS2 99%
format c10t1d0 fdisk create SOLARIS2 99%
zpool create pool1 raidz c8t0d0p2 c8t1d0p2 c9t0d0p2 c9t1d0p2 c10t0d0p2 c10t1d0p2
zfs create -o atime=off -o casesensitivity=mixed -o snapdir=visible -o aclinherit=passthrough pool1/share
zfs set compression=on pool1/share
zfs set recordsize=64k pool1/share
zfs set aclmode=passthrough pool1/share
zfs set sharenfs=on pool1/share
chmod 777 /pool1/share
vi hosts
share
echo set nfs:nfs_allow_preepoch_time = 1 >> /etc/system
echo set nfs:nfs3_max_threads = 32 >> /etc/system
echo set nfs:nfs3_nra = 32 >> /etc/system
echo set zfs:zfs_nocacheflush = 1 >> /etc/system
echo set zfs:zfs_txg_timeout = 5 >> /etc/system
echo set zfs:zil_disable = 1 >> /etc/system
crontab -e
0 4 * * * svcadm restart idmap
0 * * * * ntpdate pool.ntp.org
svcadm restart cron
pkg install SUNWsmbskr
pkg install SUNWsmbs
init 6
svcadm enable -r smb/server
vi resolv.conf
zfs set sharesmb=on pool1/share
zfs set sharesmb=name=share1 pool1/share
smbadm join -w WORKGROUP
echo other password required pam_smb_passwd.so.1 nowarn >> /etc/pam.conf
passwd 既存ユーザ(smbpasswd相当)
smbadm join -u domain_admin DOMAIN
svcadm disable smb/server
svcadm enable -r smb/server
chmod -R A=owner@:full_set:fd:allow /pool1/share
chmod -R A+group@:full_set:fd:allow /pool1/share
chmod -R A+everyone@:read_set:fd:allow /pool1/share
sharemgr show -vp
init 6
useradd -b /export/home/ -g staff -m -s /usr/bin/bash user1
vi /etc/auto_home
usermod -d /home/user1 user1
passwd user1
vi /etc/user_attr
vi /rpool/boot/grub/menu.lst
svcadm disable gdm
visudo
user1 ALL=(ALL) NOPASSWD: ALL
Defaults:user1 !env_reset
定期snapshotをcronに登録とか、mail送信用スクリプトを適当にゃ所にコピーとか、定期ディスク表面検査とか、ssh関連とかvisudoとか適時設定のこと。dladm show-link -s -i 1 e1000g0
とかで。dladm show-link -s -i 1 bge0 | perl -nple 's/(\d{1,3})(?=(?:\d\d\d)+(?!\d))/$1,/g;;pr'
で多少見やすくにゃるにゃset nfs:nfs3_max_threads = 32
set nfs:nfs3_nra = 32
set zfs:zfs_nocacheflush = 1
とかを/etc/systemに。nfs_host:/dir/dir /vmfs/volumes/ds_name nfs4 rw,rsize=65536,wsize=65536,hard,intr 0 0
とか。オプション部分は必要に応じて変える。 NAME STATE READ WRITE CKSUM
pool1 ONLINE 0 0 0
mirror ONLINE 0 0 0
c7t3d0 ONLINE 1 0 0
c7t4d0 ONLINE 0 0 0
みたいにゃのが出てたけど放置してたら、今度はハングした(笑) NAME STATE READ WRITE CKSUM
pool1 DEGRADED 0 0 0
mirror DEGRADED 0 0 0
c7t3d0 DEGRADED 0 0 97 too many errors
c7t4d0 ONLINE 0 0 0
みたいにゃことに。install
GUIで適度にIPとか変更。
cat /etc/nsswitch.dns > /etc/nsswitch.conf
ntpdate pool.ntp.org
pfexec pkg install SUNWipkg
pfexec pkg image-update
init 6
format fdisk c8t0d0 create partition 2
prtvtoc /dev/rdsk/c8t0d0s0 | fmthard -s - /dev/rdsk/c8t1d0s0
zpool attach -f rpool c8t0d0s0 c8t1d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c8t0d0s0
installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c8t1d0s0
zpool list
zpool status
format fdisk c8t0d0 create patition 2
format fdisk c8t1d0 create patition 2
zpool create pool1 c8t0d0p2 c8t1d0p2 c9t0d0 c9t1d0 c10t0d0 c10t1d0
zfs create -o atime=off -o casesensitivity=mixed -o snapdir=visible -o aclmode=passthrough -o aclinherit=passthrough pool1/share
zfs set sharenfs=on pool1/share
echo set nfs:nfs_allow_preepoch_time = 1 >> /etc/system
echo set nfs:nfs3_max_threads = 32 >> /etc/system
echo set nfs:nfs3_nra = 32 >> /etc/system
echo set zfs:zfs_nocacheflush = 1 >> /etc/system
echo set zfs:zil_disable = 1 >> /etc/system
crontab -e
0 4 * * * svcadm restart idmap
0 * * * * ntpdate pool.ntp.org
svcadm restart cron
pkg install SUNWsmbskr
pkg install SUNWsmbs
init 6
svcadm enable -r smb/server
zfs set sharesmb=on pool1/share
zfs set sharesmb=name=share1 pool1/share
smbadm join -w WORKGROUP
echo other password required pam_smb_passwd.so.1 nowarn >> /etc/pam.conf
passwd 既存ユーザ(smbpasswd相当)
vi resolv.conf
share
chmod 777 /pool1/share
sharemgr show -vp
vi resolv.conf
reboot
若しくは
smbadm join -u domain_admin DOMAIN
svcadm disable smb/server
svcadm enable -r smb/server
chmod -R A=owner@:full_set:fd:allow /pool1/share
chmod -R A+group@:full_set:fd:allow /pool1/share
chmod -R A+everyone@:read_set:fd:allow /pool1/share
ただしあちこちbuggyというか、やっぱダメだこのOSsvcadm disable smb/server
svcadm disable samba
/opt/csw/bin/pkgutil -U
/opt/csw/bin/pkgutil -a | grep samba
/opt/csw/bin/pkgutil -i samba
/opt/csw/bin/smbd -b
/etc/opt/csw/samba/smb.conf
/opt/csw/bin/smbpasswd -a hoge
a=(cswnmbd default cswwinbindd cswsmbd);for i in ${a[@]}; do svcadm restart cswsamba:$i; done
あたりでにゃんとか。これで良いのかどうか不安だが。unix charset = UTF-8
dos charset = CP932
display charset = UTF-8
ea support = yes
store dos attributes = yes
fake oplocks = yes
workgroup = WORKGROUP
smbpasswd -a hogeOpenSolaris 2009.06をISOからinst。
pfexec pkg image-update
pkg set-publisher -P -O http://pkg.opensolaris.org/dev/ opensolaris.org
pfexec pkg image-update
zpool upgrade
zfs upgrade
非RAIDにゃzpool作成
zfs create -o atime=off -o casesensitivity=mixed -o compression=on -o snapdir=visible -o aclmode=passthrough -o aclinherit=passthrough
zfs set dedup=on
zfs set sharesmb=on
zfs set sharenfs=on
echo set zfs:zfs_nocacheflush = 1 >> /etc/system
この状態で50gほどのvmイメージ類をsmbで順次書き込み。cat /etc/nsswitch.dns > /etc/nsswitch.conf
pfexec pkg install SUNWipkg
pfexec pkg image-update
init 6
format
HDDその1に2ndパーティション作る。
zpool create -f pool1 c8t0d0p2 c8t1d0 c8t2d0 c8t3d0 c8t4d0 c8t5d0
容量違うけど無視
zpool list
zpool status
zfs create -o atime=off -o casesensitivity=mixed -o compression=on -o snapdir=visible -o aclmode=passthrough -o aclinherit=passthrough pool1/share
zfs set sharenfs=on pool1/share
vi resolv.conf
share
chmod 777 /pool1/share
pkg install SUNWsmbskr
pkg install SUNWsmbs
svcadm enable -r smb/server
zfs set sharesmb=on pool1/share
zfs set sharesmb=name=share pool1/share
echo set nfs:nfs_allow_preepoch_time = 1 >> /etc/system
echo set nfs:nfs3_max_threads = 32 >> /etc/system
echo set nfs:nfs3_nra = 32 >> /etc/system
echo set zfs:zil_disable = 1 >> /etc/system
echo set zfs:zfs_nocacheflush = 1 >> /etc/system
sharemgr show -vp
ntpdate pool.ntp.org
vi resolv.conf
reboot
smbadm join -w WORKGROUP
echo other password required pam_smb_passwd.so.1 nowarn >> /etc/pam.conf
passwd 既存ユーザ(smbpasswd相当)
若しくは
smbadm join -u domain_admin DOMAIN
svcadm disable smb/server
svcadm enable -r smb/server
chmod -R A=owner@:full_set:fd:allow /pool1/share
chmod -R A+group@:full_set:fd:allow /pool1/share
chmod -R A+everyone@:read_set:fd:allow /pool1/share
とまぁこんにゃ感じでやってみたんだけど、あまりに勝手が違ってよく分からにゃい。