Commit 8f79cfcd authored by Andre Noll's avatar Andre Noll Committed by NeilBrown

md: raid0: Remove hash spacing and sector shift.

The "sector_shift" and "spacing" fields of struct raid0_private_data
were only used for the hash table lookups. So the removal of the
hash table allows get rid of these fields as well which simplifies
create_strip_zones() and raid0_run() quite a bit.
Signed-off-by: default avatarAndre Noll <maan@systemlinux.org>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 09770e0b
......@@ -56,7 +56,6 @@ static int create_strip_zones (mddev_t *mddev)
{
int i, c, j;
sector_t curr_zone_end;
sector_t min_spacing;
raid0_conf_t *conf = mddev_to_conf(mddev);
mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
struct strip_zone *zone;
......@@ -202,28 +201,7 @@ static int create_strip_zones (mddev_t *mddev)
printk(KERN_INFO "raid0: current zone start: %llu\n",
(unsigned long long)smallest->sectors);
}
/* Now find appropriate hash spacing.
* We want a number which causes most hash entries to cover
* at most two strips, but the hash table must be at most
* 1 PAGE. We choose the smallest strip, or contiguous collection
* of strips, that has big enough size. We never consider the last
* strip though as it's size has no bearing on the efficacy of the hash
* table.
*/
conf->spacing = curr_zone_end;
min_spacing = curr_zone_end;
sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
for (i=0; i < conf->nr_strip_zones-1; i++) {
sector_t s = 0;
for (j = i; j < conf->nr_strip_zones - 1 &&
s < min_spacing; j++)
s += conf->strip_zone[j].sectors;
if (s >= min_spacing && s < conf->spacing)
conf->spacing = s;
}
mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
......@@ -273,10 +251,8 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return array_sectors;
}
static int raid0_run (mddev_t *mddev)
static int raid0_run(mddev_t *mddev)
{
unsigned cur=0, i=0, nb_zone;
s64 sectors;
raid0_conf_t *conf;
if (mddev->chunk_size == 0) {
......@@ -306,43 +282,6 @@ static int raid0_run (mddev_t *mddev)
printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
(unsigned long long)mddev->array_sectors);
printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
(unsigned long long)conf->spacing);
{
sector_t s = raid0_size(mddev, 0, 0);
sector_t space = conf->spacing;
int round;
conf->sector_shift = 0;
if (sizeof(sector_t) > sizeof(u32)) {
/*shift down space and s so that sector_div will work */
while (space > (sector_t) (~(u32)0)) {
s >>= 1;
space >>= 1;
s += 1; /* force round-up */
conf->sector_shift++;
}
}
round = sector_div(s, (u32)space) ? 1 : 0;
nb_zone = s + round;
}
printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
sectors = conf->strip_zone[cur].sectors;
for (i=1; i< nb_zone; i++) {
while (sectors <= conf->spacing) {
cur++;
sectors += conf->strip_zone[cur].sectors;
}
sectors -= conf->spacing;
}
if (conf->sector_shift) {
conf->spacing >>= conf->sector_shift;
/* round spacing up so when we divide by it, we
* err on the side of too-low, which is safest
*/
conf->spacing++;
}
/* calculate the max read-ahead size.
* For read-ahead of large files to be effective, we need to
* readahead at least twice a whole stripe. i.e. number of devices
......
......@@ -15,9 +15,6 @@ struct raid0_private_data
struct strip_zone *strip_zone;
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones;
sector_t spacing;
int sector_shift; /* shift this before divide by spacing */
};
typedef struct raid0_private_data raid0_conf_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment