Advertisements

Category Archives: Solaris

Solaris 9 Branded Zone was not starting ftp when running kill -HUP

I have a Solaris 9 Branded Zone

root@solaris9:/ # uname -a
SunOS solaris9 5.9 Generic_Virtual sun4v sparc sun4v

Configured to run FTP

root@solaris9:/ # grep ftp /etc/inetd.conf
# ftp telnet shell login exec tftp finger printer
# TFTPD – tftp server (primarily used for booting)
#tftp dgram udp6 wait root /usr/sbin/in.tftpd in.tftpd -s /tftpboot
ftp stream tcp6 nowait root /usr/sbin/in.ftpd in.ftpd -l

But it was not working

root@solaris9:/ # ps -ef | grep ftp
root 10137 13230 0 13:31:28 pts/4 0:00 grep ftp

root@solaris9:/ # ps -ef | grep inet
root 12579 13230 0 13:31:34 pts/4 0:00 grep inet
root 1325 12833 0 Mar 12 ? 0:00 /usr/sbin/inetd -s start

Tried to kill -HUP but still not working

root@solaris9:/ # kill -HUP 1325

root@solaris9:/ # netstat -an | grep 21 | grep LISTEN
142.40.236.158.1521 *.* 0 0 1048576 0 LISTEN
142.40.236.10.1521 *.* 0 0 1048576 0 LISTEN

Stopped and started inetsvc

root@solaris9:/ # /etc/init.d/inetsvc stop
root@solaris9:/ # /etc/init.d/inetsvc start

root@solaris9:/ # ps -ef | grep inet
root 12098 12833 0 13:49:02 ? 0:00 /usr/sbin/inetd -s
root 15358 3734 0 13:49:05 pts/4 0:00 grep inet

FTP working again

root@solaris9:/ # netstat -an | grep 21 | grep LISTEN
142.40.236.158.1521 *.* 0 0 1048576 0 LISTEN
142.40.236.10.1521 *.* 0 0 1048576 0 LISTEN
*.21 *.* 0 0 1048576 0 LISTEN
*.21 *.* 0 0 1048576 0 LISTEN

Advertisements

UXMON: SSHD Daemon is not running or not doing it properly, please check

Node : solaris.setaoffice.com
Node Type : Sun SPARC (HTTPS)
Severity : normal
OM Server Time: 2016-09-10 08:03:10
Message : UXMON: SSHD Daemon is not running or not doing it properly, please check
Msg Group : OS
Application : sshd_mon
Object : sshd
Event Type :
not_found

Instance Name :
not_found

Instruction : It has been detected an SSH installation but the SSHD is not running
Please check SSH status, because it might happen also there are still some ssh spawned processes running but the father has died.

Note that if the SSH is not available this might prevent users log in the server and even impact some applications.

HPOM is complaining that ssh is not running but obviously is running because you’re connected to the server using ssh

root@solaris:~ # /var/opt/OV/bin/instrumentation/UXMONbroker -check sshdmon
Fri Sep 23 11:45:46 2016 : INFO : UXMONsshdmon is running now, pid=2250
Fri Sep 23 11:45:46 2016 : SSHDMON: SSHD – Not running
mv: /dev/null and /dev/null are identical
Fri Sep 23 11:45:46 2016 : INFO : UXMONsshdmon end, pid=2250

Check directory /var/run

root@solaris:/var/run # ls -la
total 16
drwxr-xr-x 4 root other 5 Sep 23 11:50 .
drwxr-xr-x 44 root sys 50 Aug 16 11:04 ..
-rw——- 1 root root 6 Jul 7 11:24 ds_agent.pid
drwxr-xr-x 13 root root 13 Aug 10 15:33 install_engine
drwx–x–x 2 root sys 2 Jul 6 14:27 sudo

It should have many files in /var/run

root@solaris:/var/run # ls -l
total 272
-rw——- 1 root root 0 Sep 10 21:27 AdDrEm.lck
drwxr-xr-x 3 root sys 183 Sep 10 21:43 cacao
-rw-rw-rw- 1 root bin 14 Sep 23 09:20 cdrom_rcm.conf
drwxr-xr-x 2 daemon daemon 183 Sep 23 12:18 daemon
-rw-r—– 1 root root 6 Sep 23 10:41 did_reloader.lock
-rw——- 1 root root 5 Sep 10 21:27 ds_agent.pid
Drw-r—– 1 root root 0 Sep 10 21:28 event_listener_proxy_door
Drw-r–r– 1 root root 0 Sep 10 21:40 fed_doorglobal
Drw-r–r– 1 root root 0 Sep 10 21:27 hotplugd_door
Drw-r–r– 1 root root 0 Sep 10 21:28 ifconfig_proxy_doorglobal
-rw——- 1 root root 0 Sep 10 21:26 ipsecconf.lock
Dr–r–r– 1 daemon daemon 0 Sep 10 21:26 kcfd_door
-rw——- 1 root root 0 Sep 14 09:07 lockf_raidctl
Dr–r–r– 1 root root 0 Sep 10 21:26 name_service_door
-rw-r–r– 1 root root 8 Sep 10 21:40 nfs4_domain
drwxr-xr-x 2 root root 179 Sep 10 21:40 pcmcia
Dr–r–r– 1 root root 0 Sep 10 21:26 picld_door
Drw-r–r– 1 root root 0 Sep 10 21:30 pmfd_doorglobal
-rw-r–r– 1 root sys 58 Sep 10 21:30 psn
Dr——– 1 root root 0 Sep 10 21:26 rcm_daemon_door
-rw-r–r– 1 root root 0 Sep 10 21:26 rcm_daemon_lock
-rw——- 1 root root 1068 Sep 10 21:26 rcm_daemon_state
Drw-r–r– 1 root root 0 Sep 10 21:40 rgmd_receptionist_doorglobal
drwxrwxrwt 2 root root 186 Sep 10 21:27 rpc_door
drwx—— 2 root root 182 Sep 10 21:27 smc898
-rw-r–r– 1 root root 5 Sep 10 21:27 sshd.pid
drwx–x–x 3 root sys 176 Sep 10 21:31 sudo
drwxr-xr-x 3 root root 191 Sep 10 21:26 sysevent_channels
Drw-r–r– 1 root root 0 Sep 10 21:30 sysevent_proxy_doorglobal
-rw-r–r– 1 root root 5 Sep 10 21:27 syslog.pid
Drw-r–r– 1 root root 0 Sep 10 21:27 syslog_door
-rw-r–r– 1 root root 8192 Sep 10 21:26 tzsync
drwx—— 2 root root 2625 Sep 23 10:26 zones
Drw-r–r– 1 root root 0 Sep 10 21:30 zoneup_doorglobal

Fixing the issue for the ticket. Check ssh processes for root user

root@solaris:/var/run # ps -ef | grep ssh | grep root
root 8047 1 0 Sep 21 ? 0:00 /usr/lib/ssh/sshd
root 17380 13924 0 00:07:02 ? 0:00 /usr/lib/ssh/sshd
root 5570 13878 0 08:08:40 ? 0:00 /usr/lib/ssh/sshd
root 13877 1 0 Sep 21 ? 0:00 /usr/lib/ssh/sshd
root 1003 13878 0 09:17:01 ? 0:00 /usr/lib/ssh/sshd
root 13903 1 0 Sep 21 ? 0:00 /usr/lib/ssh/sshd
root 60966 13918 0 00:03:07 ? 0:00 /usr/lib/ssh/sshd
root 48654 13878 0 10:13:22 ? 0:00 /usr/lib/ssh/sshd
root 13918 1 0 Sep 21 ? 0:00 /usr/lib/ssh/sshd
root 17389 13924 0 00:07:02 ? 0:00 /usr/lib/ssh/sshd
root 39554 13878 0 09:21:51 ? 0:00 /usr/lib/ssh/sshd
root 64681 1 0 11:25:02 ? 0:00 /usr/lib/ssh/sshd
root 11912 13878 0 09:29:14 ? 0:00 /usr/lib/ssh/sshd
root 56172 13878 0 11:54:55 ? 0:00 /usr/lib/ssh/sshd
root 17386 13924 0 00:07:02 ? 0:00 /usr/lib/ssh/sshd
root 34708 13878 0 08:51:07 ? 0:00 /usr/lib/ssh/sshd
root 60201 13878 0 09:27:36 ? 0:00 /usr/lib/ssh/sshd
root 55272 1 0 11:54:33 ? 0:00 /usr/lib/ssh/sshd
root 5850 13878 0 08:08:47 ? 0:00 /usr/lib/ssh/sshd
root 9865 44290 0 11:56:17 pts/4 0:00 grep ssh
root 13924 1 0 Sep 21 ? 0:00 /usr/lib/ssh/sshd
root 13878 1 0 Sep 21 ? 0:01 /usr/lib/ssh/sshd

Creating file /var/run/sshd.pid with sshd PID

echo 8047 > /var/run/sshd.pid

root@solaris:/var/run # ls -l sshd.pid
-rw-r–r– 1 root root 5 Sep 10 21:27 sshd.pid

sshdmon does not complain anymore

root@solaris:~ # /var/opt/OV/bin/instrumentation/UXMONbroker -check sshdmon
Fri Sep 23 11:58:15 2016 : INFO : UXMONsshdmon is running now, pid=18095
mv: /dev/null and /dev/null are identical
Fri Sep 23 11:58:15 2016 : INFO : UXMONsshdmon end, pid=18095

Solaris Volume Manager – Delete replicas of the metadevice state database

In this Solaris server, one of the disks needs replacement

root@solaris # echo | format
Searching for disks…done

AVAILABLE DISK SELECTIONS:
0. c0t2d0 <drive not available>
/pci@1f,0/pci@1,1/scsi@2/sd@2,0
1. c0t3d0 <SUN72G cyl 14087 alt 2 hd 24 sec 424>
/pci@1f,0/pci@1,1/scsi@2/sd@3,0
Specify disk (enter its number): Specify disk (enter its number):

Checking replicas of the metadevice state database

root@solaris # metadb
flags first blk block count
M p 16 unknown /dev/dsk/c0t2d0s4
M p 8208 unknown /dev/dsk/c0t2d0s4
M p 16400 unknown /dev/dsk/c0t2d0s4
M p 16 unknown /dev/dsk/c0t2d0s5
M p 8208 unknown /dev/dsk/c0t2d0s5
M p 16400 unknown /dev/dsk/c0t2d0s5
a m p lu 16 8192 /dev/dsk/c0t3d0s4
a p l 8208 8192 /dev/dsk/c0t3d0s4
a p l 16400 8192 /dev/dsk/c0t3d0s4
a p l 16 8192 /dev/dsk/c0t3d0s5
a p l 8208 8192 /dev/dsk/c0t3d0s5
a p l 16400 8192 /dev/dsk/c0t3d0s5

Deleting metadevice state database on the slices of the bad disk. First slice 4

root@solaris # metadb -d /dev/dsk/c0t2d0s4
metadb: solaris: Bad address

root@solaris # metadb
flags first blk block count
M p 16 unknown /dev/dsk/c0t2d0s5
M p 8208 unknown /dev/dsk/c0t2d0s5
M p 16400 unknown /dev/dsk/c0t2d0s5
a m p lu 16 8192 /dev/dsk/c0t3d0s4
a p l 8208 8192 /dev/dsk/c0t3d0s4
a p l 16400 8192 /dev/dsk/c0t3d0s4
a p l 16 8192 /dev/dsk/c0t3d0s5
a p l 8208 8192 /dev/dsk/c0t3d0s5
a p l 16400 8192 /dev/dsk/c0t3d0s5

And then in slice 5

root@solaris # metadb -d /dev/dsk/c0t2d0s5
metadb: solaris: Bad address

root@solaris # metadb
flags first blk block count
a m p lu 16 8192 /dev/dsk/c0t3d0s4
a p l 8208 8192 /dev/dsk/c0t3d0s4
a p l 16400 8192 /dev/dsk/c0t3d0s4
a p l 16 8192 /dev/dsk/c0t3d0s5
a p l 8208 8192 /dev/dsk/c0t3d0s5
a p l 16400 8192 /dev/dsk/c0t3d0s5

Suppressing EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full! error in HPOM

Node : linux.setaoffice.com
Node Type : Intel/AMD x64(HTTPS)
Severity : minor
OM Server Time: 2016-06-18 04:38:03
Message : EXT4-fs: warning (device dm-134): ext4_dx_add_entry: Directory index full!
Msg Group : OS
Application : dmsg_mon
Object : EXT4
Event Type :
not_found

Instance Name :
not_found

Instruction : No

Check if the file /var/opt/OV/conf/OpC/dmsg_mon.cfg
This is the error message showing on dmesg

root@linux:~ # dmesg
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!

I’m going to suppress in HPOM. First check if the file exists and if it doesn’t make a copy from instrumentation directory

root@linux:~ # ls -l /var/opt/OV/conf/OpC/dmsg_mon.cfg
ls: cannot access /var/opt/OV/conf/OpC/dmsg_mon.cfg: No such file or directory

root@linux:~ # cp /var/opt/OV/bin/instrumentation/dmsg_mon.cfg /var/opt/OV/conf/OpC/

Edit the file and add the string complained by dmsg_mon

root@linux:~ # vi /var/opt/OV/conf/OpC/dmsg_mon.cfg
###############################################################################
#@(#) $Id: dmsg_mon.cfg 2132 2014-08-22 06:47:32Z zhaofeif $
#@(#) $Rev: 2132 $
#@(#) $Author: zhaofeif $
#@(#) $Date: 2014-08-22 14:47:32 +0800 (Fri, 22 Aug 2014) $
#@(#) $LastChangedBy: zhaofeif $
###############################################################################

###############################################################################
#
# File: dmsg_mon.
# [disable = yes|no]
# [interval = ]
# disable
#===============
# If set disable to YES (or yes), this module won’t run anytime
#
# interval
#===============
# If the module will allow to run after the interval minutes

# Description: strings listed here don’t generate an ITO message for dmesg
# Syntax: just list the strings, one line for each
# !!! all dmesg lines matching one of the listed strings
# are taken out of monitoring !!!
#
# Example:
#
# hardware path
#
# If the string “hardware path” is listed, all dmesg lines matching (containing)
# the string “hardware path” are ignored for monitoring purposes.
# Still, the dmesg history contains these lines, but no message is generated.
#
###############################################################################

###############################################################################
# End of dmesg_mon.cfg
###############################################################################

EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!

Run UXMONbroker. The module related to the dmsg_mon.cfg template is dmesg

root@linux:~ # /var/opt/OV/bin/instrumentation/UXMONbroker -h

GD UXMON monitoring package
Broker utility

usage: /var/opt/OV/bin/instrumentation/UXMONbroker [-h | –help] [-x ] [ -d ] [-l ] [-c ] [-f]
[ –col -p param ]
[ -t ] [ -p ] [-b ]
[ –os ]
[ -v ]

-h : this (help) message
–help : this (help) message

-x : triggers the execution of the module passed as parameter
-d : Allows execution activating debug
-check : Same as -x but output is redirected standard output, no logfile used
-l : output the logfile used by the module passed as parameter
-c : output the preferred config file used by the module

-t : output the TEMPORAL folder to be used if needed
-b : output the folder where the commands or instrumentation are located
-perl : output the perl runtime to be used

–col : Execute the collecting information of module
-p : Parameter passed to the recollection
–os : Show the OS name

-v : Version of UXMON package
-f : force the execution of the module bypass interval setting
supported modules are:
actmon, sshdmon, uxmon, nfsmon, selfcheck, swapmon, evm, mpmon, mdmon, cronmon, bondmon, rcmon, volmon, scmon, loopmon, dmesg, advfsmon, ntpmon, hwmon, bootmon, nicmon, perfmon, psmon, lpmon, vcmon, ktsmon, sgmon, dfmon.
This is the interface to the OVO templates. Templates will call this command
to get executed the different modules available, or retrieve configuration
information about the UXMON and the platform

Run UXMONbroker with dmesg

root@linux:~ # /var/opt/OV/bin/instrumentation/UXMONbroker -d dmesg
>>Debug mode activated
>>Opened the logfile: /var/opt/OV/log/OpC/dmsg_mon.log
>>logfile: /var/opt/OV/log/OpC/dmsg_mon.log
>>history file: /var/opt/OV/log/OpC/dmsg_mon.hist[1-5]
>>config line: EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!
>>Module interval setting: 0
>>process info: 108389 /opt/OV/nonOV/perl/a/bin/perl -I/var/opt/OV/bin/instrumentation /var/opt/OV/bin/instrumentation/UXMONdmsg -d -c /var/opt/OV/conf/OpC/dmsg_mon.cfg -l /var/opt/OV/log/OpC/dmsg_mon.log
>>configuration file: /var/opt/OV/conf/OpC/dmsg_mon.cfg
>>lock file: /var/opt/OV/tmp/OpC/UXMONdmsg_dmsg_mon.lock
>>get lock to read /var/opt/OV/tmp/OpC/UXMONdmsg_dmsg_mon.lock
>>run the process now
>>Logged this info…:Wed Jun 22 14:50:56 2016 : INFO : UXMONdmsg is running now, pid=108389
>>There are the following lines in actual dmesg buffer program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!

>>This is the old dmesg buffer program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
program bma is using a deprecated SCSI ioctl, please convert it to SG_IO
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!
EXT4-fs warning (device dm-134): ext4_dx_add_entry: Directory index full!

>>Difference between old and new dmesg buffers
>>The following is assigned to dmesg for processing
>>ReleaseLock: 108389 1466617856 0 stopped 108389 /opt/OV/nonOV/perl/a/bin/perl -I/var/opt/OV/bin/instrumentation /var/opt/OV/bin/instrumentation/UXMONdmsg -d -c /var/opt/OV/conf/OpC/dmsg_mon.cfg -l /var/opt/OV/log/OpC/dmsg_mon.log
>>Logged this info…:Wed Jun 22 14:50:56 2016 : INFO : UXMONdmsg end, pid=108389

The ‘directory index full’ error will be seen if there are lots of files/directories in the filesystem so that the tree reaches its indexing limits and cannot keep track further.

The directory index is an additional tree structure which helps directory lookups, improving performance for huge directories.
Source: https://access.redhat.com/solutions/29894

HPE Fibreutils

Fibreutils is a set of utility scripts for Linux that make certain operations, such as information gathering, easier. The following sections describe commands contained in the Fibreutils
package.

adapter_info

The adapter_info command lists information about the Fibre Channel adapters.

–I Lists all FC adapters in this system.
–L Lists all LUNs for each adapter.
–t Lists all targets seen by each adapter.
-m Lists each adapter’s model.
–v Verbose
–h Prints the help menu.
–d Prints LUN and verbose information for a specific device. The specific device should be a SCSI host number such as 0 or 6.

lssd

The lssd command lists all bound /dev/sd* devices.

–c Prints cached data instead of scanning.
–h Prints help message.
–l Prints inquiry page 0 x 83 UUID of devices.
–w Prints WWNN of devices.

lssg

The lssg command lists all bound /dev/sg* devices.

–c Prints cached data instead of scanning.
–h Prints help message.
–l Prints inquiry page 0 x 83 UUID of devices.
–w Print WWNN of devices.

hp_rescan

The hp_rescan command rescans LUNs on Hewlett Packard Enterprise supported Fibre Channel
adapters.

–a Rescans all adapters.
–h Prints help message
–i Rescans a specific adapter instance. The specific device should be a SCSI host number such as 0 or 6.
–l Lists all FC adapters.
–n Do not perform scsi remove-single-device when executing probe-luns.

Locating adapter_info

root@linux:~ # which adapter_info
/usr/bin/adapter_info

It is provided by the package fibreutils

root@linux:~ # rpm -qf /usr/bin/adapter_info
fibreutils-3.2-6

root@linux:~ # rpm -qi fibreutils-3.2-6
Name : fibreutils Relocations: (not relocatable)
Version : 3.2 Vendor: Hewlett-Packard Company
Release : 6 Build Date: Tue 18 Dec 2012 02:38:23 AM BRST
Install Date: Wed 26 Mar 2014 10:42:41 AM BRT Build Host: localhost.localdomain
Group : Applications/System Source RPM: fibreutils-3.2-6.src.rpm
Size : 77657 License: Proprietary
Signature : RSA/SHA1, Tue 18 Feb 2014 04:44:32 PM BRT, Key ID b070680a5ce2d476
Packager : Hewlett-Packard Company
URL : http://www.hp.com
Summary : Provides complimentary programs and scripts for HP supported fibre channel HBAs
Description :
This RPM has the following components:

* Miscellaneous scripts and programs to compliment HP supported FC drivers:

lssd
lssg
adapter_info
hp_rescan
scsi_info

Help message for adapter_info

root@linux:~ # /usr/bin/adapter_info -h
NAME

adapter_info

DESCRIPTION

Prints information about Fibre Channel HBAs/CNAs.

OPTIONS

-d, –device – Prints all information for a specific SCSI host adapter
-h, –help – Prints this help message
-i, –versioninfo – Prints driver version information for all HBAs
-l, –luns – Prints the device information for all HBAs
-m, –model – Prints the HBA model for all HBAs
-p, –pciids – Prints the PCI IDs for all HBAs
-r, –remoteports – Prints the attached remote ports for all HBAs
-v, –verbose – Prints all information except device and LUN information

Checking HBA card WWNN WWPN and state

root@linux:~ # /usr/bin/adapter_info
/sys/class/scsi_host/host3: wwnn=0x2000001b32001275 wwpn=0x2100001b32001275 state=Link Up – F_Port
/sys/class/scsi_host/host4: wwnn=0x2000001b3200c270 wwpn=0x2100001b3200c270 state=Link Up – F_Port
/sys/class/scsi_host/host1: wwnn=0x20000000c97e3572 wwpn=0x10000000c97e3572 state=Online
/sys/class/scsi_host/host2: wwnn=0x20000000c97e3573 wwpn=0x10000000c97e3573 state=Linkdown

Source: https://h20565.www2.hpe.com/hpsc/doc/public/display?sp4ts.oid=461311&docId=emr_na-c01487184&docLocale=en_US

cmrunpkg: Unable to start some package or package instances.

Checking Serviceguard cluster packages

root@linux01:~ # cmviewcl

CLUSTER STATUS
cluster_virtual_scc_004 up

NODE STATUS STATE
linux01 up running
linux02 up running

PACKAGE STATUS STATE AUTO_RUN NODE
infraWP0 up running enabled linux02
ascsWP0 up running enabled linux02
wdpWP0 up running enabled linux02
infraSCP up running enabled linux02
scsSCP up running enabled linux02

UNOWNED_PACKAGES

PACKAGE STATUS STATE AUTO_RUN NODE
wdpSCP down failed enabled unowned

When trying to start the cluster package wdpSCP on server linux02 it fails because it says the node is not eligible

root@linux01:~ # cmrunpkg -n linux02 wdpSCP
Checking for license………
Found Valid Advanced License
Number of Advanced licenses:1
Unable to run package wdpSCP on node linux02. Node is not eligible.
cmrunpkg: Unable to start some package or package instances.

Checking detailed statistics about the cluster package wdpSCP says that it is disabled to run on node linux02

root@linux01:~ # cmviewcl -v -p wdpSCP

UNOWNED_PACKAGES

PACKAGE STATUS STATE AUTO_RUN NODE
wdpSCP down failed enabled unowned

Policy_Parameters:
POLICY_NAME CONFIGURED_VALUE
Failover configured_node
Failback manual

Script_Parameters:
ITEM STATUS NODE_NAME NAME

Node_Switching_Parameters:
NODE_TYPE STATUS SWITCHING NAME
Primary up enabled linux01
Alternate up disabled linux02

Dependency_Parameters:
DEPENDENCY_NAME NODE_NAME SATISFIED
infraSCP_dep linux01 no
infraSCP_dep linux02 yes

Other_Attributes:
ATTRIBUTE_NAME ATTRIBUTE_VALUE
Style modular
Priority no_priority

Enable the cluster package to run on node linux02

root@linux01:~ # cmmodpkg -e -v -n linux02 wdpSCP
Checking for license………
Found Valid Advanced License
Number of Advanced licenses:1
Enabling node linux02 for switching of package wdpSCP
Successfully enabled package wdpSCP to run on node linux02
cmmodpkg: Completed successfully on all packages specified

Using smsconnectsc to connect to another System Controller

Use smsconnectsc to connect to the other System Controller when it is inacessible by its IP address

sms-svc@sc:~ $ smsconnectsc
Attempting to determine remote SC power state. This may take some time.
Attempting to get TTY connection state. This may take some time.
TTY connection is OFF.
About to connect to other SC. Do you want to continue (yes/no)? yes
Attempting to set TTY connection state. This may take some time.
connected

HP DDMI exit: rc = 6

root@linux:~ # /opt/hps/inventory/bin/HPS_SCANNER_linux-x86 -log:debug

HP Discovery and Dependency Mapping Inventory v9.32.003 Build 1130 linux-x86
(C) Copyright 1993-2015 Hewlett-Packard Development Company, L.P.
Includes GNU ISO C++ Library, GNU GCC Shared Support Library and GNU C Library, Copyright (C) 1987-2008 Free Software Foundation, Inc. released under LGPL, see the file COPYING.LIB for license details.

+ reading scanner parameters
Debug: Scanner PID: 16496
Debug: Scanner Stage: Initialization
Debug: wxString OSCreateTempFileName(const wxString&): Creating temp file in /tmp.
Debug: CSingleInstanceChecker::CSingleInstanceChecker(const wxString&): successfully created temp file: sclEnLody
end of scan
Debug: Scanner Status: end of scan
exit: rc = 6
Debug: Scanner Status: exit: rc = 6
Debug: Scanner Exitcode: 6
Debug: Scanner Stage: Exit
Debug: void CScannerApp::RemoveFileNameFromDeleteList(const wxString&, bool): path: /tmp/edscan.lck, delete? no
Debug: void CScannerApp::RemoveFileNameFromDeleteList(const wxString&, bool): path: , delete? yes
Debug: Stop to update scanner status!

Stopping DDMI

root@linux:~ # /etc/init.d/lw_agt stop
Checking status of Light Weight Agent:
LW Agent Is Running 11245
Stopping LW AGT…pid 11245
LW AGT Stopped

Check any processes called HPS_Execute_DDMI and HPS_SCANNER. Terminate them

root@linux:~ # ps -ef | grep HPS_Execute_DDMI
root 29866 28462 0 10:32 pts/0 00:00:00 grep HPS_Execute_DDMI

root@linux:~ # ps -ef | grep HPS_SCANNER
root 15424 1 0 Jan10 ? 00:00:09 /opt/hps/inventory/bin/HPS_SCANNER_linux-x86 -p:/var/log/hps/inventory -cfg:/opt/hps/inventory/bin/ddmi-unix-sw.cxz -l:/opt/hps/inventory/temp/local.xsf
root 16608 1 0 2015 ? 00:00:09 /opt/hps/inventory/bin/HPS_SCANNER_linux-x86 -p:/var/log/hps/inventory -cfg:/opt/hps/inventory/bin/ddmi-unix-sw.cxz -l:/opt/hps/inventory/temp/local.xsf
root 29872 28462 0 10:32 pts/0 00:00:00 grep HPS_SCANNER
root@linux:~ # kill -9 16608
root@linux:~ # kill -9 15424

Remove the lock file and start the agent

root@linux:~ # rm /tmp/edscan.lck

root@linux:~ # /etc/init.d/lw_agt start
Starting LW AGT….
Checking status of Light Weight Agent:
LW Agent Is Running 29884

Renaming a disk in Veritas Volume Manager

Listing disks

root@solaris:~ # vxdisk -o alldgs -e list
DEVICE TYPE DISK GROUP STATUS OS_NATIVE_NAME ATTR
disk_43 auto:SVM – – SVM c1t10d0s2 –
disk_44 auto:sliced softmirr softwaredg online c1t11d0s2 –
disk_45 auto:sliced softdisk softwaredg online c0t9d0s2 –
disk_46 auto:SVM – – SVM c0t8d0s2 –
ibm_ds8x000_5169 auto:cdsdisk usersdgd01 users175sbcdg online c8t6005076308FFC2A70000000000005169d0s2 std
ibm_ds8x000_5170 auto:cdsdisk bkpcvrddgd01 bkpcvrd175sbcdg online c8t6005076308FFC2A70000000000005170d0s2 std
ibm_ds8x000_5171 auto – – nolabel c8t6005076308FFC2A70000000000005171d0s2 std
ibm_ds8x000_5172 auto – – nolabel c8t6005076308FFC2A70000000000005172d0s2 std
ibm_ds8x000_5173 auto – – nolabel c8t6005076308FFC2A70000000000005173d0s2 std
ibm_ds8x000_5174 auto – – nolabel c8t6005076308FFC2A70000000000005174d0s2 std

Renaming disk in disk group users175sbcdg named usersdgd01 to users175sbcd01
Renaming disk in disk group bkpcvrd175sbcdg named bkpcvrddgd01 to bkpcvrd175sbcd01

root@solaris:~ # vxedit -g users175sbcdg rename usersdgd01 users175sbcd01
root@solaris:~ # vxedit -g bkpcvrd175sbcdg rename bkpcvrddgd01 bkpcvrd175sbcd01

Listing disks

root@solaris:~ # vxdisk -o alldgs -e list
DEVICE TYPE DISK GROUP STATUS OS_NATIVE_NAME ATTR
disk_43 auto:SVM – – SVM c1t10d0s2 –
disk_44 auto:sliced softmirr softwaredg online c1t11d0s2 –
disk_45 auto:sliced softdisk softwaredg online c0t9d0s2 –
disk_46 auto:SVM – – SVM c0t8d0s2 –
ibm_ds8x000_5169 auto:cdsdisk users175sbcd01 users175sbcdg online c8t6005076308FFC2A70000000000005169d0s2 std
ibm_ds8x000_5170 auto:cdsdisk bkpcvrd175sbcd01 bkpcvrd175sbcdg online c8t6005076308FFC2A70000000000005170d0s2 std
ibm_ds8x000_5171 auto – – nolabel c8t6005076308FFC2A70000000000005171d0s2 std
ibm_ds8x000_5172 auto – – nolabel c8t6005076308FFC2A70000000000005172d0s2 std
ibm_ds8x000_5173 auto – – nolabel c8t6005076308FFC2A70000000000005173d0s2 std
ibm_ds8x000_5174 auto – – nolabel c8t6005076308FFC2A70000000000005174d0s2 std

Replacing root disk / root mirror for Solaris server with Solaris Volume Manager

Reviewing metadevice state database

root@solaris10:/ # metadb
flags first blk block count
a m p luo 16 8192 /dev/dsk/c0t1d0s7
a p luo 8208 8192 /dev/dsk/c0t1d0s7
a p luo 16400 8192 /dev/dsk/c0t1d0s7
a p luo 16 8192 /dev/dsk/c0t0d0s7
a p luo 8208 8192 /dev/dsk/c0t0d0s7
a p luo 16400 8192 /dev/dsk/c0t0d0s7

The disk to replace is the c0t0d0

Deleting metadevice state database from the disk that is being replaced

root@solaris10:/ # metadb -d /dev/dsk/c0t0d0s7

Reviewing metadevices and disk slices

root@solaris10:/ # metastat -c
d40 m 516MB d41 d42
d41 s 516MB c0t0d0s6
d42 s 516MB c0t1d0s6
d20 m 20GB d21 d22
d21 s 20GB c0t0d0s1
d22 s 20GB c0t1d0s1
d30 m 23GB d31 d32
d31 s 23GB c0t0d0s5
d32 s 23GB c0t1d0s5
d10 m 24GB d11 d12
d11 s 24GB c0t0d0s0
d12 s 24GB c0t1d0s0

Splitting mirror

root@solaris10:/ # metadetach d10 d11
d10: submirror d11 is detached
root@solaris10:/ # metadetach d20 d21
d20: submirror d21 is detached
root@solaris10:/ # metadetach d30 d31
d30: submirror d31 is detached
root@solaris10:/ # metadetach d40 d41
d40: submirror d41 is detached

Removing the metadevice

root@solaris10:/ # metaclear d11
d11: Concat/Stripe is cleared
root@solaris10:/ # metaclear d21
d21: Concat/Stripe is cleared
root@solaris10:/ # metaclear d31
d31: Concat/Stripe is cleared
root@solaris10:/ # metaclear d41
d41: Concat/Stripe is cleared

Identifying the disk

root@solaris10:/ # format
Searching for disks…done

AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c6t60060E8004587800000058780000012Cd0
/scsi_vhci/ssd@g60060e8004587800000058780000012c
3. c6t60060E8004587800000058780000010Ed0 <HITACHI-OPEN-V*4-SUN-5009 cyl 48744 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000010e
4. c6t60060E8004587800000058780000012Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000012e
5. c6t60060E8004587800000058780000011Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000011e
6. c6t60060E8004587800000058780000011Fd0 <HITACHI-OPEN-V*2-SUN-5009 cyl 24371 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000011f
7. c6t60060E80045878000000587800000088d0
/scsi_vhci/ssd@g60060e80045878000000587800000088
8. c6t60060E80045878000000587800000087d0
/scsi_vhci/ssd@g60060e80045878000000587800000087
9. c6t60060E80045878000000587800000084d0
/scsi_vhci/ssd@g60060e80045878000000587800000084
10. c6t60060E80045878000000587800000082d0
/scsi_vhci/ssd@g60060e80045878000000587800000082
11. c6t60060E80045878000000587800000080d0
/scsi_vhci/ssd@g60060e80045878000000587800000080
12. c6t60060E80045878000000587800000013d0
/scsi_vhci/ssd@g60060e80045878000000587800000013
13. c6t60060E80045878000000587800000012d0
/scsi_vhci/ssd@g60060e80045878000000587800000012
14. c6t60060E80045878000000587800000089d0
/scsi_vhci/ssd@g60060e80045878000000587800000089
Specify disk (enter its number):

After selecting the disk, select analyze

Specify disk (enter its number): 0
selecting c0t0d0
[disk formatted]

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format>

Perform a read only test

format> analyze

ANALYZE MENU:
read – read only test (doesn’t harm SunOS)
refresh – read then write (doesn’t harm data)
test – pattern testing (doesn’t harm data)
write – write then read (corrupts data)
compare – write, read, compare (corrupts data)
purge – write, read, write (corrupts data)
verify – write entire disk, then verify (corrupts data)
print – display data buffer
setup – set analysis parameters
config – show analysis parameters
! – execute , then return
quit
analyze>

Type read and go check which disk have the LED blinking. Before unplugging the disk, interrupt the test with CTRL+C

analyze> read
Ready to analyze (won’t harm SunOS). This takes a long time,
but is interruptable with CTRL-C. Continue? y

pass 0
^C 818/4/164 CTRL+C
Total of 0 defective blocks repaired.
analyze>

Quit format

analyze> quit

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format> quit

dmesg shows that the disk has been replaced

root@solaris10:/ # dmesg
Nov 24 05:00:01 solaris10 xntpd[10967]: [ID 266339 daemon.notice] using kernel phase-lock loop 0041, drift correction -239.18301
Nov 24 10:52:18 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:18 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:18 solaris10 mpt_handle_event_sync : SAS target 0 not responding.
Nov 24 10:52:18 solaris10 mpt_handle_event_sync : SAS target 0 not responding.
Nov 24 10:52:23 solaris10 SC Alert: [ID 209909 daemon.error] DISK at HDD0 has been removed.
Nov 24 10:52:23 solaris10 SC Alert: [ID 209909 daemon.error] DISK at HDD0 has been removed.
Nov 24 10:52:39 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:39 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:39 solaris10 mpt_handle_event_sync : SAS target 0 added.
Nov 24 10:52:39 solaris10 mpt_handle_event_sync : SAS target 0 added.
Nov 24 10:52:39 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:39 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:39 solaris10 wwn for target has changed
Nov 24 10:52:39 solaris10 wwn for target has changed
Nov 24 10:52:41 solaris10 SC Alert: [ID 735619 daemon.error] DISK at HDD0 has been inserted.
Nov 24 10:52:41 solaris10 SC Alert: [ID 735619 daemon.error] DISK at HDD0 has been inserted.

Force Solaris to rescan the disk. You can skip these steps

root@solaris10:/ # format
Searching for disks…done

AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c6t60060E8004587800000058780000012Cd0
/scsi_vhci/ssd@g60060e8004587800000058780000012c
3. c6t60060E8004587800000058780000010Ed0 <HITACHI-OPEN-V*4-SUN-5009 cyl 48744 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000010e
4. c6t60060E8004587800000058780000012Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000012e
5. c6t60060E8004587800000058780000011Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000011e
6. c6t60060E8004587800000058780000011Fd0 <HITACHI-OPEN-V*2-SUN-5009 cyl 24371 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000011f
7. c6t60060E80045878000000587800000088d0
/scsi_vhci/ssd@g60060e80045878000000587800000088
8. c6t60060E80045878000000587800000087d0
/scsi_vhci/ssd@g60060e80045878000000587800000087
9. c6t60060E80045878000000587800000084d0
/scsi_vhci/ssd@g60060e80045878000000587800000084
10. c6t60060E80045878000000587800000082d0
/scsi_vhci/ssd@g60060e80045878000000587800000082
11. c6t60060E80045878000000587800000080d0
/scsi_vhci/ssd@g60060e80045878000000587800000080
12. c6t60060E80045878000000587800000013d0
/scsi_vhci/ssd@g60060e80045878000000587800000013
13. c6t60060E80045878000000587800000012d0
/scsi_vhci/ssd@g60060e80045878000000587800000012
14. c6t60060E80045878000000587800000089d0
/scsi_vhci/ssd@g60060e80045878000000587800000089
Specify disk (enter its number):

Select the disk 0 – c0t0d0

Specify disk (enter its number): 0
selecting c0t0d0
[disk formatted]

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format>

Select type and Auto configure

format> t

AVAILABLE DRIVE TYPES:
0. Auto configure
1. Quantum ProDrive 80S
2. Quantum ProDrive 105S
3. CDC Wren IV 94171-344
4. SUN0104
5. SUN0207
6. SUN0327
7. SUN0340
8. SUN0424
9. SUN0535
10. SUN0669
11. SUN1.0G
12. SUN1.05
13. SUN1.3G
14. SUN2.1G
15. SUN2.9G
16. Zip 100
17. Zip 250
18. Peerless 10GB
19. SUN72G
20. FUJITSU-MAY2073RCSUN72G-0501
21. HITACHI-OPEN-V-SUN-5009
22. HITACHI-OPEN-V*2-SUN-5009
23. HITACHI-OPEN-V*4-SUN-5009
24. other
Specify disk type (enter its number)[19]: 0
c0t0d0: configured with capacity of 68.35GB

selecting c0t0d0
[disk formatted]
format> label
Ready to label disk, continue? y

format> quit

Verifying disk partition

root@solaris10:/ # format
Searching for disks…done

AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c6t60060E8004587800000058780000012Cd0
/scsi_vhci/ssd@g60060e8004587800000058780000012c
3. c6t60060E8004587800000058780000010Ed0 <HITACHI-OPEN-V*4-SUN-5009 cyl 48744 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000010e
4. c6t60060E8004587800000058780000012Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000012e
5. c6t60060E8004587800000058780000011Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000011e
6. c6t60060E8004587800000058780000011Fd0 <HITACHI-OPEN-V*2-SUN-5009 cyl 24371 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000011f
7. c6t60060E80045878000000587800000088d0
/scsi_vhci/ssd@g60060e80045878000000587800000088
8. c6t60060E80045878000000587800000087d0
/scsi_vhci/ssd@g60060e80045878000000587800000087
9. c6t60060E80045878000000587800000084d0
/scsi_vhci/ssd@g60060e80045878000000587800000084
10. c6t60060E80045878000000587800000082d0
/scsi_vhci/ssd@g60060e80045878000000587800000082
11. c6t60060E80045878000000587800000080d0
/scsi_vhci/ssd@g60060e80045878000000587800000080
12. c6t60060E80045878000000587800000013d0
/scsi_vhci/ssd@g60060e80045878000000587800000013
13. c6t60060E80045878000000587800000012d0
/scsi_vhci/ssd@g60060e80045878000000587800000012
14. c6t60060E80045878000000587800000089d0
/scsi_vhci/ssd@g60060e80045878000000587800000089
Specify disk (enter its number):

Selecting disk c0t0d0

Specify disk (enter its number): 0
selecting c0t0d0
[disk formatted]

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format> p

Printing disk partitions

PARTITION MENU:
0 – change `0′ partition
1 – change `1′ partition
2 – change `2′ partition
3 – change `3′ partition
4 – change `4′ partition
5 – change `5′ partition
6 – change `6′ partition
7 – change `7′ partition
select – select a predefined table
modify – modify a predefined partition table
name – name the current table
print – display the current table
label – write partition map and label to the disk
! – execute , then return
quit
partition> p

The most important thing to check is disk slice 2 – backup

Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 25 129.19MB (26/0/0) 264576
1 swap wu 26 – 51 129.19MB (26/0/0) 264576
2 backup wu 0 – 14086 68.35GB (14087/0/0) 143349312
3 unassigned wm 0 0 (0/0/0) 0
4 unassigned wm 0 0 (0/0/0) 0
5 unassigned wm 0 0 (0/0/0) 0
6 usr wm 52 – 14086 68.10GB (14035/0/0) 142820160
7 unassigned wm 0 0 (0/0/0) 0

Check the other mirror side – c0t1d0

format> disk

AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c6t60060E8004587800000058780000012Cd0
/scsi_vhci/ssd@g60060e8004587800000058780000012c
3. c6t60060E8004587800000058780000010Ed0 <HITACHI-OPEN-V*4-SUN-5009 cyl 48744 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000010e
4. c6t60060E8004587800000058780000012Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000012e
5. c6t60060E8004587800000058780000011Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000011e
6. c6t60060E8004587800000058780000011Fd0 <HITACHI-OPEN-V*2-SUN-5009 cyl 24371 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000011f
7. c6t60060E80045878000000587800000088d0
/scsi_vhci/ssd@g60060e80045878000000587800000088
8. c6t60060E80045878000000587800000087d0
/scsi_vhci/ssd@g60060e80045878000000587800000087
9. c6t60060E80045878000000587800000084d0
/scsi_vhci/ssd@g60060e80045878000000587800000084
10. c6t60060E80045878000000587800000082d0
/scsi_vhci/ssd@g60060e80045878000000587800000082
11. c6t60060E80045878000000587800000080d0
/scsi_vhci/ssd@g60060e80045878000000587800000080
12. c6t60060E80045878000000587800000013d0
/scsi_vhci/ssd@g60060e80045878000000587800000013
13. c6t60060E80045878000000587800000012d0
/scsi_vhci/ssd@g60060e80045878000000587800000012
14. c6t60060E80045878000000587800000089d0
/scsi_vhci/ssd@g60060e80045878000000587800000089
Specify disk (enter its number)[0]: 1

Check disk partition

selecting c0t1d0
[disk formatted]
format> p

PARTITION MENU:
0 – change `0′ partition
1 – change `1′ partition
2 – change `2′ partition
3 – change `3′ partition
4 – change `4′ partition
5 – change `5′ partition
6 – change `6′ partition
7 – change `7′ partition
select – select a predefined table
modify – modify a predefined partition table
name – name the current table
print – display the current table
label – write partition map and label to the disk
! – execute , then return
quit
partition> p
Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 5031 24.42GB (5032/0/0) 51205632
1 swap wu 5032 – 9153 20.00GB (4122/0/0) 41945472
2 backup wm 0 – 14086 68.35GB (14087/0/0) 143349312
3 unassigned wu 0 0 (0/0/0) 0
4 unassigned wu 0 0 (0/0/0) 0
5 var wm 9154 – 13951 23.28GB (4798/0/0) 48824448
6 unassigned wm 13952 – 14055 516.75MB (104/0/0) 1058304
7 unassigned wm 14056 – 14086 154.03MB (31/0/0) 315456

partition>

Compare partition table for the two disks. Both have the same block number
====================================================================================================

partition> p
Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 25 129.19MB (26/0/0) 264576
1 swap wu 26 – 51 129.19MB (26/0/0) 264576
2 backup wu 0 – 14086 68.35GB ( 14087/0/0) 143349312
3 unassigned wm 0 0 (0/0/0) 0
4 unassigned wm 0 0 (0/0/0) 0
5 unassigned wm 0 0 (0/0/0) 0
6 usr wm 52 – 14086 68.10GB (14035/0/0) 142820160
7 unassigned wm 0 0 (0/0/0) 0

partition> p
Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 5031 24.42GB (5032/0/0) 51205632
1 swap wu 5032 – 9153 20.00GB (4122/0/0) 41945472
2 backup wm 0 – 14086 68.35GB ( 14087/0/0) 143349312
3 unassigned wu 0 0 (0/0/0) 0
4 unassigned wu 0 0 (0/0/0) 0
5 var wm 9154 – 13951 23.28GB (4798/0/0) 48824448
6 unassigned wm 13952 – 14055 516.75MB (104/0/0) 1058304
7 unassigned wm 14056 – 14086 154.03MB (31/0/0) 315456

====================================================================================================

Even if the brand is different, same number of blocks, proceeding

0. c0t0d0 <SUN72G cyl 14087 alt 2 hd 24 sec 424>
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0 <FUJITSU-MAY2073RCSUN72G-0501 cyl 14087 alt 2 hd 24 sec 424>
/pci@780/pci@0/pci@9/scsi@0/sd@1,0

Copying disk partitioning to the newly inserted disk

root@solaris10:/ # prtvtoc /dev/rdsk/c0t1d0s2 | fmthard -s – /dev/rdsk/c0t0d0s2
fmthard: New volume table of contents now in place.

Now c0t0d0 have the same partitions as c0t1d0

partition> p
Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 5031 24.42GB (5032/0/0) 51205632
1 swap wu 5032 – 9153 20.00GB (4122/0/0) 41945472
2 backup wm 0 – 14086 68.35GB (14087/0/0) 143349312
3 unassigned wu 0 0 (0/0/0) 0
4 unassigned wu 0 0 (0/0/0) 0
5 var wm 9154 – 13951 23.28GB (4798/0/0) 48824448
6 unassigned wm 13952 – 14055 516.75MB (104/0/0) 1058304
7 unassigned wm 14056 – 14086 154.03MB (31/0/0) 315456

Recreating metadevice state database

root@solaris10:/ # metadb -a -f -c3 c0t0d0s7
root@solaris10:/ # metadb
flags first blk block count
a m p luo 16 8192 /dev/dsk/c0t1d0s7
a p luo 8208 8192 /dev/dsk/c0t1d0s7
a p luo 16400 8192 /dev/dsk/c0t1d0s7
a u 16 8192 /dev/dsk/c0t0d0s7
a u 8208 8192 /dev/dsk/c0t0d0s7
a u 16400 8192 /dev/dsk/c0t0d0s7

Recreating metadevices

root@solaris10:/ # metainit d11 1 1 c0t0d0s0
d11: Concat/Stripe is setup
root@solaris10:/ # metainit d21 1 1 c0t0d0s1
d21: Concat/Stripe is setup
root@solaris10:/ # metainit d31 1 1 c0t0d0s5
d31: Concat/Stripe is setup
root@solaris10:/ # metainit d41 1 1 c0t0d0s6
d41: Concat/Stripe is setup

Reattaching metadevices

root@solaris10:/ # metattach d10 d11
d10: submirror d11 is attached
root@solaris10:/ # metattach d20 d21
d20: submirror d21 is attached
root@solaris10:/ # metattach d30 d31
d30: submirror d31 is attached
root@solaris10:/ # metattach d40 d41
d40: submirror d41 is attached

Checking status

root@solaris10:/ # metastat -c
d40 m 516MB d42 d41 (resync-1%)
d42 s 516MB c0t1d0s6
d41 s 516MB c0t0d0s6
d20 m 20GB d22 d21 (resync-0%)
d22 s 20GB c0t1d0s1
d21 s 20GB c0t0d0s1
d30 m 23GB d32 d31 (resync-0%)
d32 s 23GB c0t1d0s5
d31 s 23GB c0t0d0s5
d10 m 24GB d12 d11 (resync-0%)
d12 s 24GB c0t1d0s0
d11 s 24GB c0t0d0s0

%d bloggers like this: