How to recover a RAID array in Raspberry Pi

  Uncategorized
# scan the array to see it
mdadm --misc --scan --detail /dev/md127
# it's inactive
INACTIVE-ARRAY /dev/md127 metadata=1.2 spares=1 name=nas7:5 UUID=762dfe27:e976534f:0d66ec1b:271b9185


# try to assemble it, but drives are busy
mdadm --assemble /dev/md127 /dev/sda1 /dev/sdb1 /dev/sdc1 /dev/sdd1 /dev/sde1
mdadm: /dev/sda1 is busy - skipping
mdadm: /dev/sdb1 is busy - skipping
mdadm: /dev/sdc1 is busy - skipping
mdadm: /dev/sdd1 is busy - skipping
mdadm: /dev/sde1 is busy - skipping

# are the drives visible?
lsblk
NAME        MAJ:MIN RM  SIZE RO TYPE  MOUNTPOINTS
sda           8:0    0 18.2T  0 disk  
└─sda1        8:1    0 18.2T  0 part  
  └─md127     9:127  0    0B  0 raid5 
sdb           8:16   0 18.2T  0 disk  
└─sdb1        8:17   0 18.2T  0 part  
  └─md127     9:127  0    0B  0 raid5 
sdc           8:32   0 18.2T  0 disk  
└─sdc1        8:33   0 18.2T  0 part  
  └─md127     9:127  0    0B  0 raid5 
sdd           8:48   0 18.2T  0 disk  
└─sdd1        8:49   0 18.2T  0 part  
  └─md127     9:127  0    0B  0 raid5 
sde           8:64   0 18.2T  0 disk  
└─sde1        8:65   0 18.2T  0 part  
  └─md127     9:127  0    0B  0 raid5 
mmcblk0     179:0    0  7.4G  0 disk  
├─mmcblk0p1 179:1    0  512M  0 part  /boot/firmware
└─mmcblk0p2 179:2    0  6.9G  0 part  /

# drives are busy because the array is running

# stop the array
mdadm --stop /dev/md127

# rebuild it
mdadm --verbose --assemble --force /dev/md127 /dev/sda1 /dev/sdb1 /dev/sdc1 /dev/sdd1 /dev/sde1

mdadm: looking for devices for /dev/md127
mdadm: /dev/sda1 is identified as a member of /dev/md127, slot 0.
mdadm: /dev/sdb1 is identified as a member of /dev/md127, slot 1.
mdadm: /dev/sdc1 is identified as a member of /dev/md127, slot 2.
mdadm: /dev/sdd1 is identified as a member of /dev/md127, slot 3.
mdadm: /dev/sde1 is identified as a member of /dev/md127, slot 4.
mdadm: Marking array /dev/md127 as 'clean'
mdadm: added /dev/sdb1 to /dev/md127 as 1
mdadm: added /dev/sdc1 to /dev/md127 as 2
mdadm: added /dev/sdd1 to /dev/md127 as 3
mdadm: added /dev/sde1 to /dev/md127 as 4
mdadm: added /dev/sda1 to /dev/md127 as 0
mdadm: /dev/md127 has been started with 4 drives (out of 5) and 1 rebuilding.


# restarted array, that looks good

mdadm --detail --scan
ARRAY /dev/md127 metadata=1.2 spares=1 name=nas7:5 UUID=762dfe27:e976534f:0d66ec1b:271b9185

# show me the array details
mdadm --detail /dev/md127
/dev/md127:
           Version : 1.2
     Creation Time : Sat Jun 15 17:52:00 2024
        Raid Level : raid5
        Array Size : 78126764032 (72.76 TiB 80.00 TB)
     Used Dev Size : 19531691008 (18.19 TiB 20.00 TB)
      Raid Devices : 5
     Total Devices : 5
       Persistence : Superblock is persistent

     Intent Bitmap : Internal

       Update Time : Mon Jun 17 00:38:15 2024
             State : clean, degraded 
    Active Devices : 4
   Working Devices : 5
    Failed Devices : 0
     Spare Devices : 1

            Layout : left-symmetric
        Chunk Size : 512K

Consistency Policy : bitmap

              Name : nas7:5  (local to host nas7)
              UUID : 762dfe27:e976534f:0d66ec1b:271b9185
            Events : 25246

    Number   Major   Minor   RaidDevice State
       0       8        1        0      active sync   /dev/sda1
       1       8       17        1      active sync   /dev/sdb1
       2       8       33        2      active sync   /dev/sdc1
       3       8       49        3      active sync   /dev/sdd1
       5       8       65        4      spare rebuilding   /dev/sde1


# mount it
mount /dev/md127 /mnt/raid5-80tb/

# check the space
df -h /mnt/raid5-80tb/
Filesystem      Size  Used Avail Use% Mounted on
/dev/md127       73T  105G   69T   1% /mnt/raid5-80tb

# looks good