Increase default chunk size to 512K

This seems more appropriate for current (and recent) model drives than
64K.
64K is still the default for '--build' as changing that could corrupt
data.
64K is also the default rounding for 'linear' on kernels older than
2.6.16.

Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
NeilBrown 2009-11-17 13:08:55 +11:00
parent df0d4ea04e
commit 5f175898de
20 changed files with 51 additions and 41 deletions

View File

@ -234,8 +234,15 @@ int Create(struct supertype *st, char *mddev,
case 10:
case 6:
case 0:
case LEVEL_LINEAR: /* linear */
if (chunk == 0) {
chunk = 512;
if (verbose > 0)
fprintf(stderr, Name ": chunk size defaults to 512K\n");
}
break;
case LEVEL_LINEAR:
/* a chunksize of zero 0s perfectly valid (and preferred) since 2.6.16 */
if (get_linux_version() < 2006016 && chunk == 0) {
chunk = 64;
if (verbose > 0)
fprintf(stderr, Name ": chunk size defaults to 64K\n");

View File

@ -440,7 +440,9 @@ before the number of devices in the array is reduced.
.TP
.BR \-c ", " \-\-chunk=
Specify chunk size of kibibytes. The default is 64.
Specify chunk size of kibibytes. The default when creating an
array is 512KB. To ensure compatibility with earlier versions, the
default when Building and array with no persistent metadata is 64KB.
This is only meaningful for RAID0, RAID4, RAID5, RAID6, and RAID10.
.TP
@ -450,7 +452,8 @@ component will be rounded down to a multiple of this size.
This is a synonym for
.B \-\-chunk
but highlights the different meaning for Linear as compared to other
RAID levels.
RAID levels. The default is 64K if a kernel earlier than 2.6.16 is in
use, and is 0K (i.e. no rounding) in later kernels.
.TP
.BR \-l ", " \-\-level=

View File

@ -3,13 +3,13 @@
mdadm -CR $md0 -l linear -n3 $dev0 $dev1 $dev2
check linear
testdev $md0 3 $mdsize0 64
testdev $md0 3 $mdsize0 1
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=linear -n4 $dev0 $dev1 $dev2 $dev3
check linear
testdev $md0 4 $mdsize1 64
testdev $md0 4 $mdsize1 1
mdadm -S $md0
# now with no superblock

View File

@ -3,19 +3,19 @@
mdadm -CR $md0 -l raid0 -n3 $dev0 $dev1 $dev2
check raid0
testdev $md0 3 $mdsize0 64
testdev $md0 3 $mdsize0 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 -l0 -n4 $dev0 $dev1 $dev2 $dev3
check raid0
testdev $md0 4 $mdsize1 64
testdev $md0 4 $mdsize1 512
mdadm -S $md0
# now with no superblock
mdadm -B $md0 -l0 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
check raid0
testdev $md0 5 $size 64
testdev $md0 5 $size 512
mdadm -S $md0

View File

@ -13,6 +13,6 @@ do
esac
mdadm --create --run --level=raid10 --layout $lo --raid-disks 6 -x 1 $md0 $devs
check resync ; check raid10
testdev $md0 $m $mdsize0 $[64*cm]
testdev $md0 $m $mdsize0 $[512*cm]
mdadm -S $md0
done

View File

@ -3,13 +3,13 @@
mdadm -CfR $md0 -l 4 -n3 $dev0 $dev1 $dev2
check resync ; check raid[45]
testdev $md0 2 $mdsize0 64
testdev $md0 2 $mdsize0 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=raid4 -n4 $dev0 $dev1 $dev2 $dev3
check recovery; check raid[45]
testdev $md0 3 $mdsize1 64
testdev $md0 3 $mdsize1 512
mdadm -S $md0

View File

@ -3,13 +3,13 @@
mdadm -CfR $md0 -l 5 -n3 $dev0 $dev1 $dev2
check resync
testdev $md0 2 $mdsize0 64
testdev $md0 2 $mdsize0 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=raid5 -n4 $dev0 $dev1 $dev2 $dev3
check recovery
testdev $md0 3 $mdsize1 64
testdev $md0 3 $mdsize1 512
mdadm -S $md0
# now same again with explicit layout
@ -19,13 +19,13 @@ do
mdadm -CfR $md0 -l 5 -p $lo -n3 $dev0 $dev1 $dev2
check resync ; check raid5
testdev $md0 2 $mdsize0 64
testdev $md0 2 $mdsize0 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=raid5 --layout $lo -n4 $dev0 $dev1 $dev2 $dev3
check recovery ; check raid5
testdev $md0 3 $mdsize1 64
testdev $md0 3 $mdsize1 512
mdadm -S $md0
done

View File

@ -3,13 +3,13 @@
mdadm -CfR $md0 -l 6 -n4 $dev0 $dev1 $dev2 $dev3
check resync ; check raid6
testdev $md0 2 $mdsize0 64
testdev $md0 2 $mdsize0 512
mdadm -S $md0
# now with version-1 superblock
mdadm -CR $md0 -e1 --level=raid6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
check resync ; check raid6
testdev $md0 3 $mdsize1 64
testdev $md0 3 $mdsize1 512
mdadm -S $md0

View File

@ -10,13 +10,13 @@ do
1.2 ) sz=$mdsize12 ;;
esac
mdadm -CRf $md0 --level linear -e $e --raid-disks=1 $dev1
testdev $md0 1 $sz 64
testdev $md0 1 $sz 1
mdadm --grow $md0 --add $dev2
testdev $md0 2 $sz 64
testdev $md0 2 $sz 1
mdadm --grow $md0 --add $dev3
testdev $md0 3 $sz 64
testdev $md0 3 $sz 1
mdadm -S $md0
done

View File

@ -10,11 +10,11 @@ testdev $md0 2 $[size/2] 32
mdadm --grow $md0 --size max
check resync
check wait
testdev $md0 2 $mdsize0 64
testdev $md0 2 $mdsize0 32
mdadm --grow $md0 --size $[size/2]
check nosync
testdev $md0 2 $[size/2] 64
testdev $md0 2 $[size/2] 32
mdadm -S $md0

View File

@ -10,11 +10,11 @@ testdev $md0 2 $[size/2] 32
mdadm --grow $md0 --size max
check resync
check wait
testdev $md0 2 $mdsize0 64
testdev $md0 2 $mdsize0 32
mdadm --grow $md0 --size $[size/2]
check nosync
testdev $md0 2 $[size/2] 64
testdev $md0 2 $[size/2] 32
mdadm -S $md0

View File

@ -6,7 +6,7 @@
mdadm -CR $md2 -l0 -n3 $dev0 $dev1 $dev2
check raid0
tst="testdev $md2 3 $mdsize0 64"
tst="testdev $md2 3 $mdsize0 512"
$tst
uuid=`mdadm -Db $md2 | sed 's/.*UUID=//'`
mdadm -S $md2
@ -70,7 +70,7 @@ mdadm -S $md2
mdadm --zero-superblock $dev0 $dev1 $dev2
mdadm -CR $md2 -l0 --metadata=1.0 -n3 $dev0 $dev1 $dev2
check raid0
tst="testdev $md2 3 $mdsize1 64"
tst="testdev $md2 3 $mdsize1 512"
$tst
uuid=`mdadm -Db $md2 | sed 's/.*UUID=//'`

View File

@ -3,7 +3,7 @@
# including with missing devices.
mdadm -CR $md1 -l5 -n3 $dev0 $dev1 $dev2
tst="check raid5 ;testdev $md1 2 $mdsize0 64 ; mdadm -S $md1"
tst="check raid5 ;testdev $md1 2 $mdsize0 512 ; mdadm -S $md1"
uuid=`mdadm -Db $md1 | sed 's/.*UUID=//'`
check wait
eval $tst

View File

@ -2,7 +2,7 @@
# create a v-1 raid5 array and assemble in various ways
mdadm -CR -e1 --name one $md1 -l5 -n3 -x2 $dev0 $dev1 $dev2 $dev3 $dev4
tst="check raid5 ;testdev $md1 2 $mdsize1 64 ; mdadm -S $md1"
tst="check raid5 ;testdev $md1 2 $mdsize1 512 ; mdadm -S $md1"
uuid=`mdadm -Db $md1 | sed 's/.*UUID=//'`
check wait

View File

@ -1,7 +1,7 @@
# create a raid0, re-assemble with a different super-minor
mdadm -CR $md0 -l0 -n3 $dev0 $dev1 $dev2
testdev $md0 3 $mdsize0 64
testdev $md0 3 $mdsize0 512
minor1=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'`
mdadm -S /dev/md0

View File

@ -10,7 +10,7 @@ mdadm -Ss
mdadm -As -c /dev/null --homehost=testing -vvv
testdev $md1 1 $mdsize0 64
testdev $md2 1 $mdsize0 64
testdev $md0 2 $mdsize00 64
testdev $md0 2 $mdsize00 512
mdadm -Ss
mdadm --zero-superblock $dev0 $dev1 $dev2 $dev3
@ -20,5 +20,5 @@ mdadm -CR $md0 -l0 -n2 $md1 $dev2 --homehost=testing
mdadm -Ss
mdadm -As -c /dev/null --homehost=testing -vvv
testdev $md1 1 $mdsize0 64
testdev $md0 1 $[mdsize0+mdsize00] 64
testdev $md0 1 $[mdsize0+mdsize00] 512
mdadm -Ss

View File

@ -89,10 +89,10 @@ mdadm $md0 --fail $dev3
# switch layout to a DDF layout and back to make sure that works.
mdadm -G /dev/md0 --layout=ddf-N-continue --backup-file $bu
checkgeo md0 raid6 4 $[64*1024] 10
checkgeo md0 raid6 4 $[512*1024] 10
dotest 2
mdadm -G /dev/md0 --layout=ra --backup-file $bu
checkgeo md0 raid6 4 $[64*1024] 1
checkgeo md0 raid6 4 $[512*1024] 1
dotest 2
mdadm -G $md0 -l5 --backup-file $bu

View File

@ -12,7 +12,7 @@ export MDADM_GROW_VERITY=1
dotest() {
sleep 0.5
check wait
testdev $md0 $1 $mdsize0 64 nd
testdev $md0 $1 $mdsize0 512 nd
blockdev --flushbufs $md0
cmp -s -n $[textK*1024] $md0 /tmp/RandFile || { echo cmp failed; exit 2; }
# write something new - shift chars 4 space
@ -59,7 +59,7 @@ l5[5]=parity-last
for layout in 0 1 2 3 4 5 0
do
mdadm -G $md0 --layout=${l5[$layout]} --backup-file $bu
checkgeo md0 raid5 5 $[64*1024] $layout
checkgeo md0 raid5 5 $[512*1024] $layout
dotest 4
done
@ -86,6 +86,6 @@ l6[20]=parity-first-6
for layout in 0 1 2 3 4 5 8 9 10 16 17 18 19 20 0
do
mdadm -G $md0 --layout=${l6[$layout]} --backup-file $bu
checkgeo md0 raid6 5 $[64*1024] $layout
checkgeo md0 raid6 5 $[512*1024] $layout
dotest 3
done

View File

@ -12,7 +12,7 @@ do
do dd if=/dev/urandom of=$d bs=1024 || true
done
mdadm -CR $md0 -amd -l5 -n$disks --assume-clean $devs
mdadm -CR $md0 -amd -l5 -c 256 -n$disks --assume-clean $devs
mdadm $md0 --add $dev6
echo 20 > /proc/sys/dev/raid/speed_limit_max
mdadm --grow $md0 -n $[disks+1]

View File

@ -12,10 +12,10 @@ mdadm -CR /dev/md/ddf0 -e ddf -n 5 $dev8 $dev9 $dev10 $dev11 $dev12
mdadm -CR r0 -l0 -n5 /dev/md/ddf0 -z 5000
mdadm -CR r1 -l1 -n2 /dev/md/ddf0
mdadm -CR r5 -l5 -n3 /dev/md/ddf0
testdev /dev/md/r0 5 5000 64
# r0 will use 4992 due to chunk size, so that leave 27776 for the rest
testdev /dev/md/r1 1 27776 1
testdev /dev/md/r5 2 27776 64
testdev /dev/md/r0 5 5000 512
# r0 will use 4608 due to chunk size, so that leaves 28160 for the rest
testdev /dev/md/r1 1 28160 1
testdev /dev/md/r5 2 28160 512
dd if=/dev/sda of=/dev/md/r0 || true
dd if=/dev/sda of=/dev/md/r1 || true
dd if=/dev/sda of=/dev/md/r5 || true