Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 1 | # This file causes block devices with Linux RAID (mdadm) signatures to |
| 2 | # automatically cause mdadm to be run. |
| 3 | # See udev(8) for syntax |
| 4 | |
| 5 | # Don't process any events if anaconda is running as anaconda brings up |
| 6 | # raid devices manually |
| 7 | ENV{ANACONDA}=="?*", GOTO="md_end" |
| 8 | |
| 9 | # Also don't process disks that are slated to be a multipath device |
| 10 | ENV{DM_MULTIPATH_DEVICE_PATH}=="?*", GOTO="md_end" |
| 11 | |
| 12 | # We process add events on block devices (since they are ready as soon as |
| 13 | # they are added to the system), but we must process change events as well |
| 14 | # on any dm devices (like LUKS partitions or LVM logical volumes) and on |
| 15 | # md devices because both of these first get added, then get brought live |
| 16 | # and trigger a change event. The reason we don't process change events |
| 17 | # on bare hard disks is because if you stop all arrays on a disk, then |
| 18 | # run fdisk on the disk to change the partitions, when fdisk exits it |
| 19 | # triggers a change event, and we want to wait until all the fdisks on |
| 20 | # all member disks are done before we do anything. Unfortunately, we have |
| 21 | # no way of knowing that, so we just have to let those arrays be brought |
| 22 | # up manually after fdisk has been run on all of the disks. |
| 23 | |
| 24 | # First, process all add events (md and dm devices will not really do |
| 25 | # anything here, just regular disks, and this also won't get any imsm |
| 26 | # array members either) |
| 27 | SUBSYSTEM=="block", ACTION=="add|change", ENV{ID_FS_TYPE}=="linux_raid_member", \ |
Samuel Mendoza-Jonas | fbddd4d | 2019-02-04 10:44:55 +1100 | [diff] [blame] | 28 | RUN+="/sbin/mdadm -I --export $env{DEVNAME}" |
Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 29 | |
| 30 | # Next, check to make sure the BIOS raid stuff wasn't turned off via cmdline |
| 31 | IMPORT{cmdline}="noiswmd" |
| 32 | IMPORT{cmdline}="nodmraid" |
| 33 | ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end" |
| 34 | ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end" |
| 35 | SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \ |
Samuel Mendoza-Jonas | fbddd4d | 2019-02-04 10:44:55 +1100 | [diff] [blame] | 36 | RUN+="/sbin/mdadm -I $env{DEVNAME}" |
Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 37 | LABEL="md_imsm_inc_end" |
| 38 | |
| 39 | SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \ |
Samuel Mendoza-Jonas | fbddd4d | 2019-02-04 10:44:55 +1100 | [diff] [blame] | 40 | RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}" |
Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 41 | SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \ |
Samuel Mendoza-Jonas | fbddd4d | 2019-02-04 10:44:55 +1100 | [diff] [blame] | 42 | RUN+="/sbin/mdadm -If $name" |
Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 43 | |
| 44 | # Next make sure that this isn't a dm device we should skip for some reason |
| 45 | ENV{DM_UDEV_RULES_VSN}!="?*", GOTO="dm_change_end" |
| 46 | ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="dm_change_end" |
| 47 | ENV{DM_SUSPENDED}=="1", GOTO="dm_change_end" |
| 48 | KERNEL=="dm-*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \ |
Samuel Mendoza-Jonas | fbddd4d | 2019-02-04 10:44:55 +1100 | [diff] [blame] | 49 | ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}" |
Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 50 | LABEL="dm_change_end" |
| 51 | |
| 52 | # Finally catch any nested md raid arrays. If we brought up an md raid |
| 53 | # array that's part of another md raid array, it won't be ready to be used |
| 54 | # until the change event that occurs when it becomes live |
| 55 | KERNEL=="md*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \ |
Samuel Mendoza-Jonas | fbddd4d | 2019-02-04 10:44:55 +1100 | [diff] [blame] | 56 | ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}" |
Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 57 | |
| 58 | # In case the initramfs only started some of the arrays in our container, |
| 59 | # run incremental assembly on the container itself. Note: we ran mdadm |
| 60 | # on the container in 64-md-raid.rules, and that's how the MD_LEVEL |
| 61 | # environment variable is already set. If that disappears from the other |
| 62 | # file, we will need to add this line into the middle of the next rule: |
Samuel Mendoza-Jonas | fbddd4d | 2019-02-04 10:44:55 +1100 | [diff] [blame] | 63 | # IMPORT{program}="/sbin/mdadm -D --export $tempnode", \ |
Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 64 | |
| 65 | SUBSYSTEM=="block", ACTION=="add|change", KERNEL=="md*", \ |
Samuel Mendoza-Jonas | fbddd4d | 2019-02-04 10:44:55 +1100 | [diff] [blame] | 66 | ENV{MD_LEVEL}=="container", RUN+="/sbin/mdadm -I $env{DEVNAME}" |
Samuel Mendoza-Jonas | c775f29 | 2015-09-14 13:10:27 +1000 | [diff] [blame] | 67 | |
| 68 | |
| 69 | LABEL="md_end" |