Category: Solaris

HP DDMI exit: rc = 6

root@linux:~ # /opt/hps/inventory/bin/HPS_SCANNER_linux-x86 -log:debug

HP Discovery and Dependency Mapping Inventory v9.32.003 Build 1130 linux-x86
(C) Copyright 1993-2015 Hewlett-Packard Development Company, L.P.
Includes GNU ISO C++ Library, GNU GCC Shared Support Library and GNU C Library, Copyright (C) 1987-2008 Free Software Foundation, Inc. released under LGPL, see the file COPYING.LIB for license details.

+ reading scanner parameters
Debug: Scanner PID: 16496
Debug: Scanner Stage: Initialization
Debug: wxString OSCreateTempFileName(const wxString&): Creating temp file in /tmp.
Debug: CSingleInstanceChecker::CSingleInstanceChecker(const wxString&): successfully created temp file: sclEnLody
end of scan
Debug: Scanner Status: end of scan
exit: rc = 6
Debug: Scanner Status: exit: rc = 6
Debug: Scanner Exitcode: 6
Debug: Scanner Stage: Exit
Debug: void CScannerApp::RemoveFileNameFromDeleteList(const wxString&, bool): path: /tmp/edscan.lck, delete? no
Debug: void CScannerApp::RemoveFileNameFromDeleteList(const wxString&, bool): path: , delete? yes
Debug: Stop to update scanner status!

Stopping DDMI

root@linux:~ # /etc/init.d/lw_agt stop
Checking status of Light Weight Agent:
LW Agent Is Running 11245
Stopping LW AGT…pid 11245
LW AGT Stopped

Check any processes called HPS_Execute_DDMI and HPS_SCANNER. Terminate them

root@linux:~ # ps -ef | grep HPS_Execute_DDMI
root 29866 28462 0 10:32 pts/0 00:00:00 grep HPS_Execute_DDMI

root@linux:~ # ps -ef | grep HPS_SCANNER
root 15424 1 0 Jan10 ? 00:00:09 /opt/hps/inventory/bin/HPS_SCANNER_linux-x86 -p:/var/log/hps/inventory -cfg:/opt/hps/inventory/bin/ddmi-unix-sw.cxz -l:/opt/hps/inventory/temp/local.xsf
root 16608 1 0 2015 ? 00:00:09 /opt/hps/inventory/bin/HPS_SCANNER_linux-x86 -p:/var/log/hps/inventory -cfg:/opt/hps/inventory/bin/ddmi-unix-sw.cxz -l:/opt/hps/inventory/temp/local.xsf
root 29872 28462 0 10:32 pts/0 00:00:00 grep HPS_SCANNER
root@linux:~ # kill -9 16608
root@linux:~ # kill -9 15424

Remove the lock file and start the agent

root@linux:~ # rm /tmp/edscan.lck

root@linux:~ # /etc/init.d/lw_agt start
Starting LW AGT….
Checking status of Light Weight Agent:
LW Agent Is Running 29884

Renaming a disk in Veritas Volume Manager

Listing disks

root@solaris:~ # vxdisk -o alldgs -e list
DEVICE TYPE DISK GROUP STATUS OS_NATIVE_NAME ATTR
disk_43 auto:SVM – – SVM c1t10d0s2 –
disk_44 auto:sliced softmirr softwaredg online c1t11d0s2 –
disk_45 auto:sliced softdisk softwaredg online c0t9d0s2 –
disk_46 auto:SVM – – SVM c0t8d0s2 –
ibm_ds8x000_5169 auto:cdsdisk usersdgd01 users175sbcdg online c8t6005076308FFC2A70000000000005169d0s2 std
ibm_ds8x000_5170 auto:cdsdisk bkpcvrddgd01 bkpcvrd175sbcdg online c8t6005076308FFC2A70000000000005170d0s2 std
ibm_ds8x000_5171 auto – – nolabel c8t6005076308FFC2A70000000000005171d0s2 std
ibm_ds8x000_5172 auto – – nolabel c8t6005076308FFC2A70000000000005172d0s2 std
ibm_ds8x000_5173 auto – – nolabel c8t6005076308FFC2A70000000000005173d0s2 std
ibm_ds8x000_5174 auto – – nolabel c8t6005076308FFC2A70000000000005174d0s2 std

Renaming disk in disk group users175sbcdg named usersdgd01 to users175sbcd01
Renaming disk in disk group bkpcvrd175sbcdg named bkpcvrddgd01 to bkpcvrd175sbcd01

root@solaris:~ # vxedit -g users175sbcdg rename usersdgd01 users175sbcd01
root@solaris:~ # vxedit -g bkpcvrd175sbcdg rename bkpcvrddgd01 bkpcvrd175sbcd01

Listing disks

root@solaris:~ # vxdisk -o alldgs -e list
DEVICE TYPE DISK GROUP STATUS OS_NATIVE_NAME ATTR
disk_43 auto:SVM – – SVM c1t10d0s2 –
disk_44 auto:sliced softmirr softwaredg online c1t11d0s2 –
disk_45 auto:sliced softdisk softwaredg online c0t9d0s2 –
disk_46 auto:SVM – – SVM c0t8d0s2 –
ibm_ds8x000_5169 auto:cdsdisk users175sbcd01 users175sbcdg online c8t6005076308FFC2A70000000000005169d0s2 std
ibm_ds8x000_5170 auto:cdsdisk bkpcvrd175sbcd01 bkpcvrd175sbcdg online c8t6005076308FFC2A70000000000005170d0s2 std
ibm_ds8x000_5171 auto – – nolabel c8t6005076308FFC2A70000000000005171d0s2 std
ibm_ds8x000_5172 auto – – nolabel c8t6005076308FFC2A70000000000005172d0s2 std
ibm_ds8x000_5173 auto – – nolabel c8t6005076308FFC2A70000000000005173d0s2 std
ibm_ds8x000_5174 auto – – nolabel c8t6005076308FFC2A70000000000005174d0s2 std

Replacing root disk / root mirror for Solaris server with Solaris Volume Manager

Reviewing metadevice state database

root@solaris10:/ # metadb
flags first blk block count
a m p luo 16 8192 /dev/dsk/c0t1d0s7
a p luo 8208 8192 /dev/dsk/c0t1d0s7
a p luo 16400 8192 /dev/dsk/c0t1d0s7
a p luo 16 8192 /dev/dsk/c0t0d0s7
a p luo 8208 8192 /dev/dsk/c0t0d0s7
a p luo 16400 8192 /dev/dsk/c0t0d0s7

The disk to replace is the c0t0d0

Deleting metadevice state database from the disk that is being replaced

root@solaris10:/ # metadb -d /dev/dsk/c0t0d0s7

Reviewing metadevices and disk slices

root@solaris10:/ # metastat -c
d40 m 516MB d41 d42
d41 s 516MB c0t0d0s6
d42 s 516MB c0t1d0s6
d20 m 20GB d21 d22
d21 s 20GB c0t0d0s1
d22 s 20GB c0t1d0s1
d30 m 23GB d31 d32
d31 s 23GB c0t0d0s5
d32 s 23GB c0t1d0s5
d10 m 24GB d11 d12
d11 s 24GB c0t0d0s0
d12 s 24GB c0t1d0s0

Splitting mirror

root@solaris10:/ # metadetach d10 d11
d10: submirror d11 is detached
root@solaris10:/ # metadetach d20 d21
d20: submirror d21 is detached
root@solaris10:/ # metadetach d30 d31
d30: submirror d31 is detached
root@solaris10:/ # metadetach d40 d41
d40: submirror d41 is detached

Removing the metadevice

root@solaris10:/ # metaclear d11
d11: Concat/Stripe is cleared
root@solaris10:/ # metaclear d21
d21: Concat/Stripe is cleared
root@solaris10:/ # metaclear d31
d31: Concat/Stripe is cleared
root@solaris10:/ # metaclear d41
d41: Concat/Stripe is cleared

Identifying the disk

root@solaris10:/ # format
Searching for disks…done

AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c6t60060E8004587800000058780000012Cd0
/scsi_vhci/ssd@g60060e8004587800000058780000012c
3. c6t60060E8004587800000058780000010Ed0 <HITACHI-OPEN-V*4-SUN-5009 cyl 48744 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000010e
4. c6t60060E8004587800000058780000012Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000012e
5. c6t60060E8004587800000058780000011Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000011e
6. c6t60060E8004587800000058780000011Fd0 <HITACHI-OPEN-V*2-SUN-5009 cyl 24371 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000011f
7. c6t60060E80045878000000587800000088d0
/scsi_vhci/ssd@g60060e80045878000000587800000088
8. c6t60060E80045878000000587800000087d0
/scsi_vhci/ssd@g60060e80045878000000587800000087
9. c6t60060E80045878000000587800000084d0
/scsi_vhci/ssd@g60060e80045878000000587800000084
10. c6t60060E80045878000000587800000082d0
/scsi_vhci/ssd@g60060e80045878000000587800000082
11. c6t60060E80045878000000587800000080d0
/scsi_vhci/ssd@g60060e80045878000000587800000080
12. c6t60060E80045878000000587800000013d0
/scsi_vhci/ssd@g60060e80045878000000587800000013
13. c6t60060E80045878000000587800000012d0
/scsi_vhci/ssd@g60060e80045878000000587800000012
14. c6t60060E80045878000000587800000089d0
/scsi_vhci/ssd@g60060e80045878000000587800000089
Specify disk (enter its number):

After selecting the disk, select analyze

Specify disk (enter its number): 0
selecting c0t0d0
[disk formatted]

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format>

Perform a read only test

format> analyze

ANALYZE MENU:
read – read only test (doesn’t harm SunOS)
refresh – read then write (doesn’t harm data)
test – pattern testing (doesn’t harm data)
write – write then read (corrupts data)
compare – write, read, compare (corrupts data)
purge – write, read, write (corrupts data)
verify – write entire disk, then verify (corrupts data)
print – display data buffer
setup – set analysis parameters
config – show analysis parameters
! – execute , then return
quit
analyze>

Type read and go check which disk have the LED blinking. Before unplugging the disk, interrupt the test with CTRL+C

analyze> read
Ready to analyze (won’t harm SunOS). This takes a long time,
but is interruptable with CTRL-C. Continue? y

pass 0
^C 818/4/164 CTRL+C
Total of 0 defective blocks repaired.
analyze>

Quit format

analyze> quit

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format> quit

dmesg shows that the disk has been replaced

root@solaris10:/ # dmesg
Nov 24 05:00:01 solaris10 xntpd[10967]: [ID 266339 daemon.notice] using kernel phase-lock loop 0041, drift correction -239.18301
Nov 24 10:52:18 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:18 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:18 solaris10 mpt_handle_event_sync : SAS target 0 not responding.
Nov 24 10:52:18 solaris10 mpt_handle_event_sync : SAS target 0 not responding.
Nov 24 10:52:23 solaris10 SC Alert: [ID 209909 daemon.error] DISK at HDD0 has been removed.
Nov 24 10:52:23 solaris10 SC Alert: [ID 209909 daemon.error] DISK at HDD0 has been removed.
Nov 24 10:52:39 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:39 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:39 solaris10 mpt_handle_event_sync : SAS target 0 added.
Nov 24 10:52:39 solaris10 mpt_handle_event_sync : SAS target 0 added.
Nov 24 10:52:39 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:39 solaris10 scsi: [ID 107833 kern.warning] WARNING: /pci@780/pci@0/pci@9/scsi@0 (mpt0):
Nov 24 10:52:39 solaris10 wwn for target has changed
Nov 24 10:52:39 solaris10 wwn for target has changed
Nov 24 10:52:41 solaris10 SC Alert: [ID 735619 daemon.error] DISK at HDD0 has been inserted.
Nov 24 10:52:41 solaris10 SC Alert: [ID 735619 daemon.error] DISK at HDD0 has been inserted.

Force Solaris to rescan the disk. You can skip these steps

root@solaris10:/ # format
Searching for disks…done

AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c6t60060E8004587800000058780000012Cd0
/scsi_vhci/ssd@g60060e8004587800000058780000012c
3. c6t60060E8004587800000058780000010Ed0 <HITACHI-OPEN-V*4-SUN-5009 cyl 48744 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000010e
4. c6t60060E8004587800000058780000012Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000012e
5. c6t60060E8004587800000058780000011Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000011e
6. c6t60060E8004587800000058780000011Fd0 <HITACHI-OPEN-V*2-SUN-5009 cyl 24371 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000011f
7. c6t60060E80045878000000587800000088d0
/scsi_vhci/ssd@g60060e80045878000000587800000088
8. c6t60060E80045878000000587800000087d0
/scsi_vhci/ssd@g60060e80045878000000587800000087
9. c6t60060E80045878000000587800000084d0
/scsi_vhci/ssd@g60060e80045878000000587800000084
10. c6t60060E80045878000000587800000082d0
/scsi_vhci/ssd@g60060e80045878000000587800000082
11. c6t60060E80045878000000587800000080d0
/scsi_vhci/ssd@g60060e80045878000000587800000080
12. c6t60060E80045878000000587800000013d0
/scsi_vhci/ssd@g60060e80045878000000587800000013
13. c6t60060E80045878000000587800000012d0
/scsi_vhci/ssd@g60060e80045878000000587800000012
14. c6t60060E80045878000000587800000089d0
/scsi_vhci/ssd@g60060e80045878000000587800000089
Specify disk (enter its number):

Select the disk 0 – c0t0d0

Specify disk (enter its number): 0
selecting c0t0d0
[disk formatted]

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format>

Select type and Auto configure

format> t

AVAILABLE DRIVE TYPES:
0. Auto configure
1. Quantum ProDrive 80S
2. Quantum ProDrive 105S
3. CDC Wren IV 94171-344
4. SUN0104
5. SUN0207
6. SUN0327
7. SUN0340
8. SUN0424
9. SUN0535
10. SUN0669
11. SUN1.0G
12. SUN1.05
13. SUN1.3G
14. SUN2.1G
15. SUN2.9G
16. Zip 100
17. Zip 250
18. Peerless 10GB
19. SUN72G
20. FUJITSU-MAY2073RCSUN72G-0501
21. HITACHI-OPEN-V-SUN-5009
22. HITACHI-OPEN-V*2-SUN-5009
23. HITACHI-OPEN-V*4-SUN-5009
24. other
Specify disk type (enter its number)[19]: 0
c0t0d0: configured with capacity of 68.35GB

selecting c0t0d0
[disk formatted]
format> label
Ready to label disk, continue? y

format> quit

Verifying disk partition

root@solaris10:/ # format
Searching for disks…done

AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c6t60060E8004587800000058780000012Cd0
/scsi_vhci/ssd@g60060e8004587800000058780000012c
3. c6t60060E8004587800000058780000010Ed0 <HITACHI-OPEN-V*4-SUN-5009 cyl 48744 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000010e
4. c6t60060E8004587800000058780000012Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000012e
5. c6t60060E8004587800000058780000011Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000011e
6. c6t60060E8004587800000058780000011Fd0 <HITACHI-OPEN-V*2-SUN-5009 cyl 24371 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000011f
7. c6t60060E80045878000000587800000088d0
/scsi_vhci/ssd@g60060e80045878000000587800000088
8. c6t60060E80045878000000587800000087d0
/scsi_vhci/ssd@g60060e80045878000000587800000087
9. c6t60060E80045878000000587800000084d0
/scsi_vhci/ssd@g60060e80045878000000587800000084
10. c6t60060E80045878000000587800000082d0
/scsi_vhci/ssd@g60060e80045878000000587800000082
11. c6t60060E80045878000000587800000080d0
/scsi_vhci/ssd@g60060e80045878000000587800000080
12. c6t60060E80045878000000587800000013d0
/scsi_vhci/ssd@g60060e80045878000000587800000013
13. c6t60060E80045878000000587800000012d0
/scsi_vhci/ssd@g60060e80045878000000587800000012
14. c6t60060E80045878000000587800000089d0
/scsi_vhci/ssd@g60060e80045878000000587800000089
Specify disk (enter its number):

Selecting disk c0t0d0

Specify disk (enter its number): 0
selecting c0t0d0
[disk formatted]

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format> p

Printing disk partitions

PARTITION MENU:
0 – change `0′ partition
1 – change `1′ partition
2 – change `2′ partition
3 – change `3′ partition
4 – change `4′ partition
5 – change `5′ partition
6 – change `6′ partition
7 – change `7′ partition
select – select a predefined table
modify – modify a predefined partition table
name – name the current table
print – display the current table
label – write partition map and label to the disk
! – execute , then return
quit
partition> p

The most important thing to check is disk slice 2 – backup

Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 25 129.19MB (26/0/0) 264576
1 swap wu 26 – 51 129.19MB (26/0/0) 264576
2 backup wu 0 – 14086 68.35GB (14087/0/0) 143349312
3 unassigned wm 0 0 (0/0/0) 0
4 unassigned wm 0 0 (0/0/0) 0
5 unassigned wm 0 0 (0/0/0) 0
6 usr wm 52 – 14086 68.10GB (14035/0/0) 142820160
7 unassigned wm 0 0 (0/0/0) 0

Check the other mirror side – c0t1d0

format> disk

AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c6t60060E8004587800000058780000012Cd0
/scsi_vhci/ssd@g60060e8004587800000058780000012c
3. c6t60060E8004587800000058780000010Ed0 <HITACHI-OPEN-V*4-SUN-5009 cyl 48744 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000010e
4. c6t60060E8004587800000058780000012Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000012e
5. c6t60060E8004587800000058780000011Ed0
/scsi_vhci/ssd@g60060e8004587800000058780000011e
6. c6t60060E8004587800000058780000011Fd0 <HITACHI-OPEN-V*2-SUN-5009 cyl 24371 alt 2 hd 15 sec 512>
/scsi_vhci/ssd@g60060e8004587800000058780000011f
7. c6t60060E80045878000000587800000088d0
/scsi_vhci/ssd@g60060e80045878000000587800000088
8. c6t60060E80045878000000587800000087d0
/scsi_vhci/ssd@g60060e80045878000000587800000087
9. c6t60060E80045878000000587800000084d0
/scsi_vhci/ssd@g60060e80045878000000587800000084
10. c6t60060E80045878000000587800000082d0
/scsi_vhci/ssd@g60060e80045878000000587800000082
11. c6t60060E80045878000000587800000080d0
/scsi_vhci/ssd@g60060e80045878000000587800000080
12. c6t60060E80045878000000587800000013d0
/scsi_vhci/ssd@g60060e80045878000000587800000013
13. c6t60060E80045878000000587800000012d0
/scsi_vhci/ssd@g60060e80045878000000587800000012
14. c6t60060E80045878000000587800000089d0
/scsi_vhci/ssd@g60060e80045878000000587800000089
Specify disk (enter its number)[0]: 1

Check disk partition

selecting c0t1d0
[disk formatted]
format> p

PARTITION MENU:
0 – change `0′ partition
1 – change `1′ partition
2 – change `2′ partition
3 – change `3′ partition
4 – change `4′ partition
5 – change `5′ partition
6 – change `6′ partition
7 – change `7′ partition
select – select a predefined table
modify – modify a predefined partition table
name – name the current table
print – display the current table
label – write partition map and label to the disk
! – execute , then return
quit
partition> p
Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 5031 24.42GB (5032/0/0) 51205632
1 swap wu 5032 – 9153 20.00GB (4122/0/0) 41945472
2 backup wm 0 – 14086 68.35GB (14087/0/0) 143349312
3 unassigned wu 0 0 (0/0/0) 0
4 unassigned wu 0 0 (0/0/0) 0
5 var wm 9154 – 13951 23.28GB (4798/0/0) 48824448
6 unassigned wm 13952 – 14055 516.75MB (104/0/0) 1058304
7 unassigned wm 14056 – 14086 154.03MB (31/0/0) 315456

partition>

Compare partition table for the two disks. Both have the same block number
====================================================================================================

partition> p
Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 25 129.19MB (26/0/0) 264576
1 swap wu 26 – 51 129.19MB (26/0/0) 264576
2 backup wu 0 – 14086 68.35GB ( 14087/0/0) 143349312
3 unassigned wm 0 0 (0/0/0) 0
4 unassigned wm 0 0 (0/0/0) 0
5 unassigned wm 0 0 (0/0/0) 0
6 usr wm 52 – 14086 68.10GB (14035/0/0) 142820160
7 unassigned wm 0 0 (0/0/0) 0

partition> p
Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 5031 24.42GB (5032/0/0) 51205632
1 swap wu 5032 – 9153 20.00GB (4122/0/0) 41945472
2 backup wm 0 – 14086 68.35GB ( 14087/0/0) 143349312
3 unassigned wu 0 0 (0/0/0) 0
4 unassigned wu 0 0 (0/0/0) 0
5 var wm 9154 – 13951 23.28GB (4798/0/0) 48824448
6 unassigned wm 13952 – 14055 516.75MB (104/0/0) 1058304
7 unassigned wm 14056 – 14086 154.03MB (31/0/0) 315456

====================================================================================================

Even if the brand is different, same number of blocks, proceeding

0. c0t0d0 <SUN72G cyl 14087 alt 2 hd 24 sec 424>
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0 <FUJITSU-MAY2073RCSUN72G-0501 cyl 14087 alt 2 hd 24 sec 424>
/pci@780/pci@0/pci@9/scsi@0/sd@1,0

Copying disk partitioning to the newly inserted disk

root@solaris10:/ # prtvtoc /dev/rdsk/c0t1d0s2 | fmthard -s – /dev/rdsk/c0t0d0s2
fmthard: New volume table of contents now in place.

Now c0t0d0 have the same partitions as c0t1d0

partition> p
Current partition table (original):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part Tag Flag Cylinders Size Blocks
0 root wm 0 – 5031 24.42GB (5032/0/0) 51205632
1 swap wu 5032 – 9153 20.00GB (4122/0/0) 41945472
2 backup wm 0 – 14086 68.35GB (14087/0/0) 143349312
3 unassigned wu 0 0 (0/0/0) 0
4 unassigned wu 0 0 (0/0/0) 0
5 var wm 9154 – 13951 23.28GB (4798/0/0) 48824448
6 unassigned wm 13952 – 14055 516.75MB (104/0/0) 1058304
7 unassigned wm 14056 – 14086 154.03MB (31/0/0) 315456

Recreating metadevice state database

root@solaris10:/ # metadb -a -f -c3 c0t0d0s7
root@solaris10:/ # metadb
flags first blk block count
a m p luo 16 8192 /dev/dsk/c0t1d0s7
a p luo 8208 8192 /dev/dsk/c0t1d0s7
a p luo 16400 8192 /dev/dsk/c0t1d0s7
a u 16 8192 /dev/dsk/c0t0d0s7
a u 8208 8192 /dev/dsk/c0t0d0s7
a u 16400 8192 /dev/dsk/c0t0d0s7

Recreating metadevices

root@solaris10:/ # metainit d11 1 1 c0t0d0s0
d11: Concat/Stripe is setup
root@solaris10:/ # metainit d21 1 1 c0t0d0s1
d21: Concat/Stripe is setup
root@solaris10:/ # metainit d31 1 1 c0t0d0s5
d31: Concat/Stripe is setup
root@solaris10:/ # metainit d41 1 1 c0t0d0s6
d41: Concat/Stripe is setup

Reattaching metadevices

root@solaris10:/ # metattach d10 d11
d10: submirror d11 is attached
root@solaris10:/ # metattach d20 d21
d20: submirror d21 is attached
root@solaris10:/ # metattach d30 d31
d30: submirror d31 is attached
root@solaris10:/ # metattach d40 d41
d40: submirror d41 is attached

Checking status

root@solaris10:/ # metastat -c
d40 m 516MB d42 d41 (resync-1%)
d42 s 516MB c0t1d0s6
d41 s 516MB c0t0d0s6
d20 m 20GB d22 d21 (resync-0%)
d22 s 20GB c0t1d0s1
d21 s 20GB c0t0d0s1
d30 m 23GB d32 d31 (resync-0%)
d32 s 23GB c0t1d0s5
d31 s 23GB c0t0d0s5
d10 m 24GB d12 d11 (resync-0%)
d12 s 24GB c0t1d0s0
d11 s 24GB c0t0d0s0

Connecting to XSCF and accessing console

Sun SPARC Enterprise M-class server has a new service processor.

XSCF> man intro

Intro Intro(1)

NAME
Intro – list the commands provided by the XSCF firmware

DESCRIPTION
This Intro page lists the user commands (exit(1), man(1),
and who(1)) and system administration commands (all the oth-
ers, beginning with addboard(8)) provided by the XSCF
firmware of the SPARC Enterprise
M3000/M4000/M5000/M8000/M9000 servers. Some XSCF commands
have the same name as their Oracle Solaris OS counterpart,
but function slightly differently. For details, refer to the
man page for each command.

The following commands are supported:

exit exit the XSCF shell

man display manual pages of specified
XSCF shell command

who display a list of the user accounts
who are logged in to the XSCF

addboard configure an eXtended System
Board(XSB) into the domain confi-
guration or assigns it to the domain
configuration

addcodactivation add a Capacity on Demand (COD)
hardware activation key (COD key) to
the COD database

addcodlicense add a Capacity on Demand (COD)
right-to-use (RTU) license key to
the COD license database

addfru add a Field Replaceable Unit (FRU)

XCP Last change: April 2013 1

Intro Intro(1)

adduser create an XSCF user account

applynetwork apply XSCF network information to
the XSCF

cfgdevice connect a CD-RW/DVD-RW and TAPE
drive unit to the port, disconnect
it from the port, or display the
status of the drive

clockboard set or display the clock control
unit used at system startup

console connect to a domain console

deleteboard disconnect an eXtended System Board
(XSB) from the domain configuration

deletecodactivation remove a Capacity on Demand (COD)
hardware activation key (COD key)
from the COD database

deletecodlicense remove a Capacity on Demand (COD)
right-to-use (RTU) license key from
the COD license database

deletefru delete a Field Replaceable Unit
(FRU)

deleteuser delete an XSCF user account

XCP Last change: April 2013 2

Intro Intro(1)

disableuser disable an XSCF user account

dumpconfig save system configuration informa-
tion to a file

enableuser enable an XSCF user account

flashupdate update the firmware

fmadm fault management configuration tool

fmdump view fault management logs

fmstat report fault management module
statistics

forcerebootxscf reset the XSCF forcibly

getflashimage download a firmware image file

ioxadm manage External I/O Expansion Units
and add-in cards that contain Energy
Storage Modules and are attached to
the host system

moveboard move an eXtended System Board (XSB)
from the current domain to another

nslookup refer to the DNS server for the host

XCP Last change: April 2013 3

Intro Intro(1)

password manage user passwords and expiration
settings

ping send the ICMP ECHO_REQUEST packets
to the network host or the network
device

poweroff turn off the power to the specified
domain

poweron turn on the power to the specified
domain

prtfru display FRUID data on the system and
External I/O Expansion Unit

rebootxscf reset the XSCF

replacefru replace a Field Replaceable Unit
(FRU)

reset reset the specified domain

resetdateoffset reset time of domains to match sys-
tem time

restoreconfig restore the system configuration
information previously saved by
dumpconfig

restoredefaults restore factory settings of the
server or XSCF unit

XCP Last change: April 2013 4

Intro Intro(1)

sendbreak send a break signal to the specified
domain

setad configure Active Directory

setaltitude set the altitude of the system or
whether or not the air filter
installed

setarchiving configure the log archiving func-
tionality

setaudit manage the system auditing func-
tionality

setautologout set the session timeout time of the
XSCF shell

setcod set up the Capacity on Demand (COD)
resources used for domains

setdate set the date and time of XSCF

setdcl set a domain component list (DCL)

setdomainmode set a domain mode

setdomparam forcibly rewrite OpenBoot PROM
environment variables

XCP Last change: April 2013 5

Intro Intro(1)

setdscp set the IP address assignments for
the Domain to Service Processor Com-
munications Protocol (DSCP)

setdualpowerfeed set dual power feed mode

setemailreport set up the email report configura-
tion data

sethostname set a host name and domain name for
an XSCF unit

sethttps start or stop the HTTPS service,
which is used in the XSCF network.
This command also performs
authentication-related settings

setldap configure the Service Processor as a
Lightweight Directory Access Proto-
col (LDAP) client

setldapssl configure LDAP/SSL

setlocale set the default locale of the XSCF

setlocator control the blinking of the CHECK
LED on the operator panel

setloginlockout enable or disable login lockout
feature

XCP Last change: April 2013 6

Intro Intro(1)

setlookup enable or disable the use of the
Lightweight Directory Access Proto-
col (LDAP) server for authentication
and privilege lookup

setnameserver set the domain name system (DNS)
servers and the DNS search paths
used in the XSCF network

setnetwork set or remove the network interface
that used in XSCF

setntp set the NTP servers used on the XSCF
network, the stratum value, the pre-
ferred server and the clock address
of the local clock of XSCF

setpacketfilters set the IP packet filtering rules to
be used in the XSCF network

setpasswordpolicy manage the system password policy

setpowerupdelay set the warm-up time of the system
and wait time before system startup

setprivileges assign user privileges

setroute set routing information for an XSCF
network interface

setshutdowndelay set the shutdown wait time at power
interruption of the uninterruptible
power supply (UPS)

XCP Last change: April 2013 7

Intro Intro(1)

setsmtp set up the Simple Mail Transfer Pro-
tocol (SMTP) settings

setsnmp manage the SNMP agent

setsnmpusm specify the SNMPv3 agent’s User-
based Security Model (USM) confi-
guration

setsnmpvacm modify the SNMPv3 agent’s View-based
Access Control Model (VACM) confi-
guration

setssh configure the settings for the
Secure Shell (SSH) service used in
the XSCF network

setsunmc start or stop the Sun Management
Center agent and make changes to its
configuration

settelnet start or stop the telnet service
used in the XSCF network

settimezone set the time zone and Daylight Sav-
ing Time of XSCF

setupfru set up device hardware

setupplatform set up platform specific settings

XCP Last change: April 2013 8

Intro Intro(1)

showad show Active Directory configuration
and messages

showaltitude display the altitude of the system
and whether the air filter installed

showarchiving display log archiving configuration
and status

showaudit display the current auditing system
state

showautologout display the session timeout time of
the XSCF shell

showboards display information on an eXtended
System Board (XSB)

showcod display Capacity on Demand (COD)
information

showcodactivation display the current Capacity on
Demand (COD) hardware activation
permits (COD permits) stored in the
COD database

showcodactivationhistoryshow Capacity on Demand (COD)
activation log

showcodlicense display the current Capacity on
Demand (COD) right-to-use (RTU)
licenses stored in the COD license
database

XCP Last change: April 2013 9

Intro Intro(1)

showcodusage display the current usage statistics
for Capacity on Demand (COD)
resources

showconsolepath display information on the domain
console that is currently connected

showdate show the date and time of XSCF

showdateoffset display differences between system
time and domain times

showdcl display the current domain component
list (DCL)

showdevices display current information on an
eXtended System Board (XSB)

showdomainmode display the domain mode

showdomainstatus display the current domain component
list (DCL)

showdscp display the IP addresses assigned to
the Domain to Service Processor Com-
munications Protocol (DSCP)

showdualpowerfeed display the current setting of dual
power feed mode

showemailreport display the email report configura-
tion data

XCP Last change: April 2013 10

Intro Intro(1)

showenvironment display the air flow volume, intake
air temperature and humidity, tem-
perature sensor, voltage sensor, fan
speed, and power consumption infor-
mation in the server

showfru display the hardware settings of
specified device

showhardconf display information about Field
Replaceable Units (FRUs) installed
in the system

showhostname display the current host name for
the XSCF unit

showhttps display the status of the HTTPS ser-
vice set for the XSCF network

showldap display the Lightweight Directory
Access Protocol (LDAP) configuration
for the Service Processor

showldapssl show LDAP/SSL configuration and mes-
sages

showlocale display the current setting for the
XSCF locale

showlocator display the state of the CHECK LED
on the operator panel

showloginlockout display the account lockout setting

XCP Last change: April 2013 11

Intro Intro(1)

showlogs display the specified log

showlookup display the configuration for
authentication and privileges lookup

showmonitorlog display the contents of monitoring
messages in real time

shownameserver display the registered domain name
system (DNS) servers and the DNS
search paths specified on the XSCF
network

shownetwork display information of network
interfaces for XSCF

shownotice display copyright and license infor-
mation for the copyright information
for eXtended System Control Facility
(XSCF) Control Package (XCP)

showntp display the NTP servers currently
set for the XSCF network

showpacketfilters show the IP packet filtering rules
that are set in the XSCF network

showpasswordpolicy display the current password set-
tings

showpowerupdelay display the current settings for the
warm-up time of the system and wait
time before system startup

XCP Last change: April 2013 12

Intro Intro(1)

showresult display the exit status of the most
recently executed command

showroute display routing information for an
XSCF network interface

showshutdowndelay show the shutdown wait time at power
interruption of the uninterruptible
power supply (UPS)

showsmtp display the Simple Mail Transfer
Protocol (SMTP) configuration infor-
mation

showsnmp display the configuration informa-
tion and current status of the SNMP
agent

showsnmpusm display the current User-based Secu-
rity Model (USM) information for the
SNMP agent

showsnmpvacm display the current View-based
Access Control Access (VACM) infor-
mation for the SNMP agent

showssh display the settings of the Secure
Shell (SSH) service that configured
for the XSCF network

showstatus display the degraded Field Replace-
able Units (FRUs)

XCP Last change: April 2013 13

Intro Intro(1)

showsunmc show setup information and status of
Sun Management Center agent

showtelnet display the current status of the
telnet service for the XSCF network

showtimezone display the XSCF time zone and Day-
light Saving Time information of
current settings

showuser display user account information

snapshot collect and transfer environment,
log, error, and FRUID data

switchscf switch the XSCF unit between the
active and standby states

testsb perform an initial diagnosis of the
specified physical system board
(PSB)

traceroute display the route packets take to
the specified network host or the
network device

unlockmaintenance forcibly release the locked status
of XSCF

version display firmware version

XCP Last change: April 2013 14

Intro Intro(1)

viewaudit display audit records

XCP Last change: April 2013 15

Displays the domain status for all domains

XSCF> showdomainstatus -a
DID Domain Status
00 Running
01 Running
02 Running
03 –
04 –
05 –
06 –
07 –
08 –
09 –
10 –
11 –
12 –
13 –
14 –
15 –
16 –
17 –
18 –
19 –
20 –
21 –
22 –
23 –

Connecting to domain 00. To exit, press #.

XSCF> console -d 00

Console contents may be logged.
Connect to DomainID 0?[y|n] :y

*******************************************************************************
* *
* Use of this system is restricted to authorized users. User activity is *
* monitored and recorded by system personnel. Anyone using this system *
* expressly consents to such monitoring and recording. BE ADVISED: if *
* possible criminal activity is detected, system records, along with certain *
* personal information, may be provided to law enforcement officials. *
* *
*******************************************************************************
solaris10 console login: exit from console.

Connecting to domain 01. To exit, press #.

XSCF> console -d 01

Console contents may be logged.
Connect to DomainID 1?[y|n] :y

*******************************************************************************
* *
* Use of this system is restricted to authorized users. User activity is *
* monitored and recorded by system personnel. Anyone using this system *
* expressly consents to such monitoring and recording. BE ADVISED: if *
* possible criminal activity is detected, system records, along with certain *
* personal information, may be provided to law enforcement officials. *
* *
*******************************************************************************
solaris11 console login: exit from console.

Type exit to exit XSCF

XSCF> exit
logout
Connection to 172.22.250.60 closed.

Resizing a VXFS filesystem on Solaris

I will resize a filesystem with 4 LUNs with 500GB

First, checking disk group, volume and mount point

root@solaris:/ # df -h /usr/software/documentum/engineering
Filesystem size used avail capacity Mounted on
/dev/vx/dsk/documentumdg/ged_engineeringvol
9.3T 8.8T 473G 96% /usr/software/documentum/engineering

Storage team provided these four LUN IDs
6029 / 6030 / 6031 / 6032

Verifying which HBA card is online – c3 and c5

root@solaris:/ # fcinfo hba-port | egrep ‘OS Device Name|State’
OS Device Name: /dev/cfg/c11
State: offline
OS Device Name: /dev/cfg/c12
State: offline
OS Device Name: /dev/cfg/c9
State: offline
OS Device Name: /dev/cfg/c10
State: offline
OS Device Name: /dev/cfg/c4
State: offline
OS Device Name: /dev/cfg/c5
State: online
OS Device Name: /dev/cfg/c2
State: offline
OS Device Name: /dev/cfg/c3
State: online

Making the LUNs available to Solaris and Veritas Volume Manager

root@solaris:/ # cfgadm -c configure c5
root@solaris:/ # cfgadm -c configure c3
root@solaris:/ # devfsadm -C
root@solaris:/ # vxdctl enable

Searching for all 4 LUNs in Solaris

root@solaris:/ # echo | format | grep 6029
c6t6005076308FFC2A70000000000006029d0: configured with capacity of 499.98GB
4. c6t6005076308FFC2A70000000000006029d0
/scsi_vhci/ssd@g6005076308ffc2a70000000000006029

root@solaris:/ # echo | format | grep 6030
c6t6005076308FFC2A70000000000006030d0: configured with capacity of 499.98GB
9. c6t6005076308FFC2A70000000000006030d0
/scsi_vhci/ssd@g6005076308ffc2a70000000000006030

root@solaris:/ # echo | format | grep 6031
c6t6005076308FFC2A70000000000006031d0: configured with capacity of 499.98GB
11. c6t6005076308FFC2A70000000000006031d0
/scsi_vhci/ssd@g6005076308ffc2a70000000000006031

root@solaris:/ # echo | format | grep 6032
c6t6005076308FFC2A70000000000006032d0: configured with capacity of 499.98GB
10. c6t6005076308FFC2A70000000000006032d0
/scsi_vhci/ssd@g6005076308ffc2a70000000000006032

Searching for all 4 LUNs in Veritas Volume Manager

root@solaris:/ # vxdisk -o alldgs -e list | egrep ‘6029|6030|6031|6032’
ibm_ds8x000_6029 auto – – nolabel c6t6005076308FFC2A70000000000006029d0s2 std
ibm_ds8x000_6030 auto – – nolabel c6t6005076308FFC2A70000000000006030d0s2 std
ibm_ds8x000_6031 auto – – nolabel c6t6005076308FFC2A70000000000006031d0s2 std
ibm_ds8x000_6032 auto – – nolabel c6t6005076308FFC2A70000000000006032d0s2 std

Apply label on all 4 disks

root@solaris:/ # format c6t6005076308FFC2A70000000000006029d0s2

c6t6005076308FFC2A70000000000006029d0s2: configured with capacity of 499.98GB
selecting c6t6005076308FFC2A70000000000006029d0s2
[disk formatted]

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format> label
Ready to label disk, continue? y

format> q

The disks will show the status online invalid instead of nolabel

root@solaris:/ # vxdisk -o alldgs -e list | egrep ‘6029|6030|6031|6032’
ibm_ds8x000_6029 auto:none – – online invalid c6t6005076308FFC2A70000000000006029d0s2 std
ibm_ds8x000_6030 auto:none – – online invalid c6t6005076308FFC2A70000000000006030d0s2 std
ibm_ds8x000_6031 auto:none – – online invalid c6t6005076308FFC2A70000000000006031d0s2 std
ibm_ds8x000_6032 auto:none – – online invalid c6t6005076308FFC2A70000000000006032d0s2 std

I’ll be adding the LUNs to documentumdg. I follow a numerical order so I’m verifying which will be the next ones

root@solaris:/ # vxdisk -o alldgs -e list | grep documentumdg | sort -k3
ibm_ds8x000_3134 auto:cdsdisk documentum01 documentumdg online c6t6005076308FFC2A70000000000003134d0s2 std
ibm_ds8x000_6006 auto:cdsdisk documentum02 documentumdg online c6t6005076308FFC2A70000000000006006d0s2 std
ibm_ds8x000_3136 auto:cdsdisk documentum03 documentumdg online c6t6005076308FFC2A70000000000003136d0s2 std
ibm_ds8x000_6005 auto:cdsdisk documentum04 documentumdg online c6t6005076308FFC2A70000000000006005d0s2 std
ibm_ds8x000_3133 auto:cdsdisk documentum05 documentumdg online c6t6005076308FFC2A70000000000003133d0s2 std
ibm_ds8x000_6011 auto:cdsdisk documentum06 documentumdg online c6t6005076308FFC2A70000000000006011d0s2 std
ibm_ds8x000_6012 auto:cdsdisk documentum07 documentumdg online c6t6005076308FFC2A70000000000006012d0s2 std
ibm_ds8x000_6007 auto:cdsdisk documentum08 documentumdg online c6t6005076308FFC2A70000000000006007d0s2 std
ibm_ds8x000_6008 auto:cdsdisk documentum09 documentumdg online c6t6005076308FFC2A70000000000006008d0s2 std
ibm_ds8x000_6009 auto:cdsdisk documentum10 documentumdg online c6t6005076308FFC2A70000000000006009d0s2 std
ibm_ds8x000_6010 auto:cdsdisk documentum11 documentumdg online c6t6005076308FFC2A70000000000006010d0s2 std
ibm_ds8x000_6013 auto:cdsdisk documentum12 documentumdg online c6t6005076308FFC2A70000000000006013d0s2 std
ibm_ds8x000_6014 auto:cdsdisk documentum13 documentumdg online c6t6005076308FFC2A70000000000006014d0s2 std
ibm_ds8x000_6015 auto:cdsdisk documentum14 documentumdg online c6t6005076308FFC2A70000000000006015d0s2 std
ibm_ds8x000_6016 auto:cdsdisk documentum15 documentumdg online c6t6005076308FFC2A70000000000006016d0s2 std
ibm_ds8x000_6017 auto:cdsdisk documentum16 documentumdg online c6t6005076308FFC2A70000000000006017d0s2 std
ibm_ds8x000_6018 auto:cdsdisk documentum17 documentumdg online c6t6005076308FFC2A70000000000006018d0s2 std
ibm_ds8x000_6019 auto:cdsdisk documentum18 documentumdg online c6t6005076308FFC2A70000000000006019d0s2 std
ibm_ds8x000_6020 auto:cdsdisk documentum19 documentumdg online c6t6005076308FFC2A70000000000006020d0s2 std
ibm_ds8x000_6021 auto:cdsdisk documentum20 documentumdg online c6t6005076308FFC2A70000000000006021d0s2 std
ibm_ds8x000_6022 auto:cdsdisk documentum21 documentumdg online c6t6005076308FFC2A70000000000006022d0s2 std
ibm_ds8x000_6002 auto:cdsdisk documentum22 documentumdg online c6t6005076308FFC2A70000000000006002d0s2 std
ibm_ds8x000_6004 auto:cdsdisk documentum23 documentumdg online c6t6005076308FFC2A70000000000006004d0s2 std
ibm_ds8x000_6023 auto:cdsdisk documentum24 documentumdg online c6t6005076308FFC2A70000000000006023d0s2 std
ibm_ds8x000_6024 auto:cdsdisk documentum25 documentumdg online c6t6005076308FFC2A70000000000006024d0s2 std
ibm_ds8x000_6000 auto:cdsdisk documentum26 documentumdg online c6t6005076308FFC2A70000000000006000d0s2 std
ibm_ds8x000_6001 auto:cdsdisk documentum27 documentumdg online c6t6005076308FFC2A70000000000006001d0s2 std
ibm_ds8x000_6003 auto:cdsdisk documentum28 documentumdg online c6t6005076308FFC2A70000000000006003d0s2 std
ibm_ds8x000_6027 auto:cdsdisk documentum29 documentumdg online c6t6005076308FFC2A70000000000006027d0s2 std
ibm_ds8x000_6028 auto:cdsdisk documentum30 documentumdg online c6t6005076308FFC2A70000000000006028d0s2 std

All the disks in the disk group are formatted as cdsdisk. Configuring the disks to be used by Veritas Volume Manager

root@solaris:/ # vxdisksetup -i ibm_ds8x000_6029
root@solaris:/ # vxdisksetup -i ibm_ds8x000_6030
root@solaris:/ # vxdisksetup -i ibm_ds8x000_6031
root@solaris:/ # vxdisksetup -i ibm_ds8x000_6032

Checking the disks

root@solaris:/ # vxdisk -o alldgs -e list | egrep ‘6029|6030|6031|6032’
ibm_ds8x000_6029 auto:cdsdisk – – online c6t6005076308FFC2A70000000000006029d0s2 std
ibm_ds8x000_6030 auto:cdsdisk – – online c6t6005076308FFC2A70000000000006030d0s2 std
ibm_ds8x000_6031 auto:cdsdisk – – online c6t6005076308FFC2A70000000000006031d0s2 std
ibm_ds8x000_6032 auto:cdsdisk – – online c6t6005076308FFC2A70000000000006032d0s2 std

Adding them to documentumdg disk group

root@solaris:/ # vxdg -g documentumdg adddisk documentum31=ibm_ds8x000_6029
root@solaris:/ # vxdg -g documentumdg adddisk documentum32=ibm_ds8x000_6030
root@solaris:/ # vxdg -g documentumdg adddisk documentum33=ibm_ds8x000_6031
root@solaris:/ # vxdg -g documentumdg adddisk documentum34=ibm_ds8x000_6032

root@solaris:/ # vxdisk -o alldgs -e list | grep documentumdg | sort -k3
ibm_ds8x000_3134 auto:cdsdisk documentum01 documentumdg online c6t6005076308FFC2A70000000000003134d0s2 std
ibm_ds8x000_6006 auto:cdsdisk documentum02 documentumdg online c6t6005076308FFC2A70000000000006006d0s2 std
ibm_ds8x000_3136 auto:cdsdisk documentum03 documentumdg online c6t6005076308FFC2A70000000000003136d0s2 std
ibm_ds8x000_6005 auto:cdsdisk documentum04 documentumdg online c6t6005076308FFC2A70000000000006005d0s2 std
ibm_ds8x000_3133 auto:cdsdisk documentum05 documentumdg online c6t6005076308FFC2A70000000000003133d0s2 std
ibm_ds8x000_6011 auto:cdsdisk documentum06 documentumdg online c6t6005076308FFC2A70000000000006011d0s2 std
ibm_ds8x000_6012 auto:cdsdisk documentum07 documentumdg online c6t6005076308FFC2A70000000000006012d0s2 std
ibm_ds8x000_6007 auto:cdsdisk documentum08 documentumdg online c6t6005076308FFC2A70000000000006007d0s2 std
ibm_ds8x000_6008 auto:cdsdisk documentum09 documentumdg online c6t6005076308FFC2A70000000000006008d0s2 std
ibm_ds8x000_6009 auto:cdsdisk documentum10 documentumdg online c6t6005076308FFC2A70000000000006009d0s2 std
ibm_ds8x000_6010 auto:cdsdisk documentum11 documentumdg online c6t6005076308FFC2A70000000000006010d0s2 std
ibm_ds8x000_6013 auto:cdsdisk documentum12 documentumdg online c6t6005076308FFC2A70000000000006013d0s2 std
ibm_ds8x000_6014 auto:cdsdisk documentum13 documentumdg online c6t6005076308FFC2A70000000000006014d0s2 std
ibm_ds8x000_6015 auto:cdsdisk documentum14 documentumdg online c6t6005076308FFC2A70000000000006015d0s2 std
ibm_ds8x000_6016 auto:cdsdisk documentum15 documentumdg online c6t6005076308FFC2A70000000000006016d0s2 std
ibm_ds8x000_6017 auto:cdsdisk documentum16 documentumdg online c6t6005076308FFC2A70000000000006017d0s2 std
ibm_ds8x000_6018 auto:cdsdisk documentum17 documentumdg online c6t6005076308FFC2A70000000000006018d0s2 std
ibm_ds8x000_6019 auto:cdsdisk documentum18 documentumdg online c6t6005076308FFC2A70000000000006019d0s2 std
ibm_ds8x000_6020 auto:cdsdisk documentum19 documentumdg online c6t6005076308FFC2A70000000000006020d0s2 std
ibm_ds8x000_6021 auto:cdsdisk documentum20 documentumdg online c6t6005076308FFC2A70000000000006021d0s2 std
ibm_ds8x000_6022 auto:cdsdisk documentum21 documentumdg online c6t6005076308FFC2A70000000000006022d0s2 std
ibm_ds8x000_6002 auto:cdsdisk documentum22 documentumdg online c6t6005076308FFC2A70000000000006002d0s2 std
ibm_ds8x000_6004 auto:cdsdisk documentum23 documentumdg online c6t6005076308FFC2A70000000000006004d0s2 std
ibm_ds8x000_6023 auto:cdsdisk documentum24 documentumdg online c6t6005076308FFC2A70000000000006023d0s2 std
ibm_ds8x000_6024 auto:cdsdisk documentum25 documentumdg online c6t6005076308FFC2A70000000000006024d0s2 std
ibm_ds8x000_6000 auto:cdsdisk documentum26 documentumdg online c6t6005076308FFC2A70000000000006000d0s2 std
ibm_ds8x000_6001 auto:cdsdisk documentum27 documentumdg online c6t6005076308FFC2A70000000000006001d0s2 std
ibm_ds8x000_6003 auto:cdsdisk documentum28 documentumdg online c6t6005076308FFC2A70000000000006003d0s2 std
ibm_ds8x000_6027 auto:cdsdisk documentum29 documentumdg online c6t6005076308FFC2A70000000000006027d0s2 std
ibm_ds8x000_6028 auto:cdsdisk documentum30 documentumdg online c6t6005076308FFC2A70000000000006028d0s2 std
ibm_ds8x000_6029 auto:cdsdisk documentum31 documentumdg online c6t6005076308FFC2A70000000000006029d0s2 std
ibm_ds8x000_6030 auto:cdsdisk documentum32 documentumdg online c6t6005076308FFC2A70000000000006030d0s2 std
ibm_ds8x000_6031 auto:cdsdisk documentum33 documentumdg online c6t6005076308FFC2A70000000000006031d0s2 std
ibm_ds8x000_6032 auto:cdsdisk documentum34 documentumdg online c6t6005076308FFC2A70000000000006032d0s2 std

Checking available disk group space

root@solaris:/ # vxassist -g documentumdg maxsize
Maximum volume size: 4193908736 (2047807Mb)

Resizing volume and filesystem space

root@solaris:/ # vxresize -g documentumdg ged_engenhariavol +2047807M

root@solaris:/ # df -h /usr/software/documentum/fs_ged_engenharia_01
Filesystem size used avail capacity Mounted on
/dev/vx/dsk/documentumdg/ged_engineeringvol
11T 8.8T 2.4T 79% /usr/software/documentum/engineering

Solaris 10 ERROR: Failed to determine zone configuration for target boot environment.

Applying the recommended patch cluster I received the message below

root@solaris10node1:/10_Recommended # ./installpatchset -B NewBE –s10patchset
ERROR: Failed to determine zone configuration for target boot environment.
Please verify configuration with zoneadm(1M).

Looking at the internet it says about the file /etc/zones/index should have the zones all installed.

root@solaris10node1:~ # cat /etc/zones/index
# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# ident “@(#)zones-index 1.2 04/04/01 SMI”
#
# DO NOT EDIT: this file is automatically generated by zoneadm(1M)
# and zonecfg(1M). Any manual changes will be lost.
#
global:configured:/:

Since I’m using Live Upgrade, checking the name of the available boot environments

root@solaris10node1:~ # lustatus
Boot Environment Is Active Active Can Copy
Name Complete Now On Reboot Delete Status
————————– ——– —— ——— —— ———-
CurBE yes yes yes no –
NewBE yes no no yes –

Mounting the boot environment where I will apply the patch

root@solaris10node1:~ # lumount -n NewBE
/.alt.NewBE

And editing the file index in the new boot enviroment

root@solaris10node1:~ # cd /.alt.NewBE/etc/zones
root@solaris10node1:/.alt.NewBE/etc/zones # vi index
# Use is subject to license terms.
#
# ident “@(#)zones-index 1.2 04/04/01 SMI”
#
# DO NOT EDIT: this file is automatically generated by zoneadm(1M)
# and zonecfg(1M). Any manual changes will be lost.
#
#global:configured:/:
global:installed:/:

Umounting the boot environment and try again to install the patch

root@solaris10node1:~ # luumount -n NewBE

Solaris ps: show processes and sort by CPU consumption

Here are the options that you can use with ps

root@solaris:/ # ps -eo
ps: option requires an argument — o
usage: ps [ -aAdeflcjLPyZ ] [ -o format ] [ -t termlist ]
[ -u userlist ] [ -U userlist ] [ -G grouplist ]
[ -p proclist ] [ -g pgrplist ] [ -s sidlist ] [ -z zonelist ]
‘format’ is one or more of:
user ruser group rgroup uid ruid gid rgid pid ppid pgid sid taskid ctid
pri opri pcpu pmem vsz rss osz nice class time etime stime zone zoneid
f s c lwp nlwp psr tty addr wchan fname comm args projid project pset

If I use user,pid,ppid,stime,tty,pcpu,pmem,comm the header is shown below

root@solaris:/ # ps -eo user,pid,ppid,stime,tty,pcpu,pmem,comm | head -1
USER PID PPID STIME TT %CPU %MEM COMMAND

I have sorted the colum by %CPU column

root@solaris:/ # ps -eo user,pid,ppid,stime,tty,pcpu,pmem,comm | sort -n -k6 | tail
oracle 93 9842 nov_09 ? 0.1 1.1 /usr/software/oracle/product/j2ee_10131/jdk/bin/java
oracle 1973 9842 nov_10 ? 0.1 1.7 /usr/software/oracle/product/j2ee_10131/jdk/bin/java
oracle 6417 9842 nov_02 ? 0.1 0.6 /usr/software/oracle/product/j2ee_10131/jdk/bin/java
oracle 9842 4351 ago_13 ? 0.1 0.1 /usr/software/oracle/product/j2ee_10131/opmn/bin/opmn
oracle 21623 9842 ago_13 ? 0.1 1.4 /usr/software/oracle/product/j2ee_10131/jdk/bin/java
oracle 21640 9842 ago_13 ? 0.1 0.4 /usr/software/oracle/product/j2ee_10131/jdk/bin/java
oracle 24709 9842 nov_11 ? 0.1 1.5 /usr/software/oracle/product/j2ee_10131/jdk/bin/java
root 2379 1599 ago_13 ? 0.7 0.0 /usr/sbin/nscd
producao 5300 4425 ago_13 ? 2.0 0.0 xcomtp
oracle 8840 9842 ago_14 ? 77.3 3.3 /usr/software/oracle/product/j2ee_10131/jdk/bin/java

Sun Cluster Resource Start failed Faulted

Checking status I see the resource lsn-orapvtl-01-res running on solaris10node2 had a failure

root@solaris10node1:~ # scstat -g | grep pvtl
Resources: dbs-oracle-pvtl-rg ddg-arcpvtl-01-res ddg-orapvtl-01-res ddg-cmpvtl-01-res lhn-orapvtl-01-res lsn-orapvtl-01-res dbs-orapvtl-01-res
Group: dbs-oracle-pvtl-rg solaris10node2 Online faulted No
Group: dbs-oracle-pvtl-rg solaris10node1 Offline No
Resource: ddg-arcpvtl-01-res solaris10node2 Online Online
Resource: ddg-arcpvtl-01-res solaris10node1 Offline Offline
Resource: ddg-orapvtl-01-res solaris10node2 Online Online
Resource: ddg-orapvtl-01-res solaris10node1 Offline Offline
Resource: ddg-cmpvtl-01-res solaris10node2 Online Online
Resource: ddg-cmpvtl-01-res solaris10node1 Offline Offline
Resource: lhn-orapvtl-01-res solaris10node2 Online Online – LogicalHostname online.
Resource: lhn-orapvtl-01-res solaris10node1 Offline Offline
Resource: lsn-orapvtl-01-res solaris10node2 Start failed Faulted
Resource: lsn-orapvtl-01-res solaris10node1 Offline Offline
Resource: dbs-orapvtl-01-res solaris10node2 Online Online
Resource: dbs-orapvtl-01-res solaris10node1 Offline Offline

I stopped the resource

root@solaris10:~ # scswitch -n -j lsn-orapvtl-01-res

root@solaris10node1:~ # scstat -g | grep pvtl
Resources: dbs-oracle-pvtl-rg ddg-arcpvtl-01-res ddg-orapvtl-01-res ddg-cmpvtl-01-res lhn-orapvtl-01-res lsn-orapvtl-01-res dbs-orapvtl-01-res
Group: dbs-oracle-pvtl-rg solaris10node2 Online No
Group: dbs-oracle-pvtl-rg solaris10node1 Offline No
Resource: ddg-arcpvtl-01-res solaris10node2 Online Online
Resource: ddg-arcpvtl-01-res solaris10node1 Offline Offline
Resource: ddg-orapvtl-01-res solaris10node2 Online Online
Resource: ddg-orapvtl-01-res solaris10node1 Offline Offline
Resource: ddg-cmpvtl-01-res solaris10node2 Online Online
Resource: ddg-cmpvtl-01-res solaris10node1 Offline Offline
Resource: lhn-orapvtl-01-res solaris10node2 Online Online – LogicalHostname online.
Resource: lhn-orapvtl-01-res solaris10node1 Offline Offline
Resource: lsn-orapvtl-01-res solaris10node2 Offline Offline
Resource: lsn-orapvtl-01-res solaris10node1 Offline Offline
Resource: dbs-orapvtl-01-res solaris10node2 Online Online
Resource: dbs-orapvtl-01-res solaris10node1 Offline Offline

Then started it

root@solaris10:~ # scswitch -e -j lsn-orapvtl-01-res

root@solaris10~ # scstat -g | grep pvtl
Resources: dbs-oracle-pvtl-rg ddg-arcpvtl-01-res ddg-orapvtl-01-res ddg-cmpvtl-01-res lhn-orapvtl-01-res lsn-orapvtl-01-res dbs-orapvtl-01-res
Group: dbs-oracle-pvtl-rg solaris10node2 Online No
Group: dbs-oracle-pvtl-rg solaris10node1 Offline No
Resource: ddg-arcpvtl-01-res solaris10node2 Online Online
Resource: ddg-arcpvtl-01-res solaris10node1 Offline Offline
Resource: ddg-orapvtl-01-res solaris10node2 Online Online
Resource: ddg-orapvtl-01-res solaris10node1 Offline Offline
Resource: ddg-cmpvtl-01-res solaris10node2 Online Online
Resource: ddg-cmpvtl-01-res solaris10node1 Offline Offline
Resource: lhn-orapvtl-01-res solaris10node2 Online Online – LogicalHostname online.
Resource: lhn-orapvtl-01-res solaris10node1 Offline Offline
Resource: lsn-orapvtl-01-res solaris10node2 Online Online
Resource: lsn-orapvtl-01-res solaris10node1 Offline Offline
Resource: dbs-orapvtl-01-res solaris10node2 Online Online
Resource: dbs-orapvtl-01-res solaris10node1 Offline Offline

ERROR: Target boot environment not identified as being Solaris 10.

I was installing the recommended patch bundle for Solaris 10 and I received an error that the server is not a Solaris 10

root@solaris10:/tmp/patch/10_Recommended # ./installpatchset —-apply-prereq —-s10patchset
ERROR: Target boot environment not identified as being Solaris 10.

root@solaris10:/tmp/patch/10_Recommended # uname -a
SunOS solaris10 5.10 Generic_147440-12 sun4v sparc SUNW,Sun-Blade-T6320

Check two packages SUNWcsr and SUNWcsu

root@solaris10:~ # pkginfo -l SUNWcsr
ERROR: information for “SUNWcsr” was not found

root@solaris10:~ # pkginfo -l SUNWcsu
PKGINST: SUNWcsu
NAME: Core Solaris, (Usr)
CATEGORY: system
ARCH: sparc
VERSION: 11.10.0,REV=2005.01.21.15.53
BASEDIR: /
VENDOR: Oracle Corporation
DESC: core software for a specific instruction-set architecture
PSTAMP: on10-patch20120109151043
INSTDATE: Nov 18 2012 01:12
HOTLINE: Please contact your local service provider
STATUS: completely installed
FILES: 1666 installed pathnames
79 shared pathnames
295 linked files
144 directories
480 executables
30 setuid/setgid executables
30370 blocks used (approx)

In this case, it was missing the file /var/sadm/SUNWcsr/pkginfo

root@solaris10:/var/sadm/pkg/SUNWcsr # ls -l
total 4
drwxr-xr-x 2 root root 512 Nov 18 2012 install
drwxr-x— 27 root root 1024 Nov 18 2012 save

root@solaris10:/var/sadm/pkg/SUNWcsr # ls -l
total 56
drwxr-xr-x 2 root root 1024 Aug 18 2012 install
-rw-r–r– 1 root root 26479 Aug 18 2012 pkginfo
drwxr-x— 49 root root 1024 Aug 18 2012 save

There is two recommendations to solve this problem:
– reinstall the server
– restore from backup

Source: installpatch reports ERROR: Target boot environment not identified as being solaris 10 (Doc ID 1511328.1)

Solaris 10 – LUN expansion not showing the new size

I did the following: entered the format utility and chose type

root@solaris10:/ # format c8t6005076308FFC2A70000000000001143d0
selecting c8t6005076308FFC2A70000000000001143d0
[disk formatted]

FORMAT MENU:
disk – select a disk
type – select (define) a disk type
partition – select (define) a partition table
current – describe the current disk
format – format and analyze the disk
repair – repair a defective sector
label – write label to the disk
analyze – surface analysis
defect – defect list management
backup – search for backup labels
verify – read and display labels
save – save new disk/partition definitions
inquiry – show vendor, product and revision
volname – set 8-character volume name
! – execute , then return
quit
format> t

Selected 0 to Auto configure

AVAILABLE DRIVE TYPES:
0. Auto configure
1. Quantum ProDrive 80S
2. Quantum ProDrive 105S
3. CDC Wren IV 94171-344
4. SUN0104
5. SUN0207
6. SUN0327
7. SUN0340
8. SUN0424
9. SUN0535
10. SUN0669
11. SUN1.0G
12. SUN1.05
13. SUN1.3G
14. SUN2.1G
15. SUN2.9G
16. Zip 100
17. Zip 250
18. Peerless 10GB
19. IBM-2107900-.600
20. other
Specify disk type (enter its number)[19]: 0

After selecting to autoconfigure it was showing the new size and then it was labeled

c8t6005076308FFC2A70000000000001143d0: configured with capacity of 200.98GB
<IBM-2107900-.600 cyl 25726 alt 2 hd 64 sec 256>
selecting c8t6005076308FFC2A70000000000001143d0
[disk formatted]
format> l
Ready to label disk, continue? y

You can also take a look at this procedure to perform – Getting the Solaris format utility to work with an expanded LUN