nas SOlaris

por | 18 marzo, 2009

devfsadm

zpool create nas c3t600A0B80004934D60000048D49C03492d0
echo $?

nas 398G 25G 373G 7% /nas

Para añadir más espacio
zpool add nas c4t600A0B80004934D600000ACB4C40287Ad0

nas 663G 25G 638G 4% /nas

#zpool status
pool: nas
state: ONLINE
scrub: none requested
config:

NAME STATE READ WRITE CKSUM
nas ONLINE 0 0 0
c4t600A0B80004934D6000004B949CF782Cd0 ONLINE 0 0 0
c4t600A0B80004934D600000ACB4C40287Ad0 ONLINE 0 0 0

errors: No known data errors

pool: rpool
state: ONLINE
scrub: none requested
config:

NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
c2t0d0s0 ONLINE 0 0 0

errors: No known data errors

pool: webpool
state: UNAVAIL
status: One or more devices could not be opened. There are insufficient
replicas for the pool to continue functioning.
action: Attach the missing device and online it using 'zpool online'.
see: http://www.sun.com/msg/ZFS-8000-3C
scrub: none requested
config:

NAME STATE READ WRITE CKSUM
webpool UNAVAIL 0 0 0 insufficient replicas
c0t1d0 UNAVAIL 0 0 0 cannot open

share nas

svcadm -v enable -r network/nfs/server

share -F nfs -o rw /nas

PARA QUE PERSISTA
#vi /etc/dfs/dfstab
"/etc/dfs/dfstab" 12 lines, 397 characters

# Place share(1M) commands here for automatic execution
# on entering init state 3.
#
# Issue the command 'svcadm enable network/nfs/server' to
# run the NFS daemon processes and the share commands, after adding
# the very first entry to this file.
#
# share [-F fstype] [ -o options] [-d ""] [resource]
# .e.g,
# share -F nfs -o rw=engineering -d "home dirs" /export/home2
share -F nfs -o rw /nas

to verify share exports

#exportfs

3. Run the following command to mount from the client side:

mount -F nfs server:/webpool /mountpoint