Commit ff940fd4 authored by Florian Haas's avatar Florian Haas Committed by Philipp Reisner

Fix spelling errors.

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 83f2029c
......@@ -155,7 +155,7 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
iop = mdev->md_io_tmpp;
if (rw & WRITE) {
/* these are GFP_KERNEL pages, preallocated
/* these are GFP_KERNEL pages, pre-allocated
* on device initialization */
void *p = page_address(mdev->md_io_page);
void *hp = page_address(mdev->md_io_tmpp);
......@@ -243,7 +243,7 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
if (al_ext->lc_number != enr) {
/* drbd_al_write_transaction(mdev,al_ext,enr);
* recurses into generic_make_request(), which
* disalows recursion, bios being serialized on the
* disallows recursion, bios being serialized on the
* current->bio_tail list now.
* we have to delegate updates to the activity log
* to the worker thread. */
......@@ -390,7 +390,7 @@ static int drbd_al_read_tr(struct drbd_conf *mdev,
sector = bdev->md.md_offset + bdev->md.al_offset + index;
/* Dont process error normally,
* as this is done before disk is atached! */
* as this is done before disk is attached! */
if (!drbd_md_sync_page_io(mdev, bdev, sector, READ))
return -1;
......@@ -536,7 +536,7 @@ static void atodb_endio(struct bio *bio, int error)
struct page *page;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
/* strange behaviour of some lower level drivers...
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?! */
if (!error && !uptodate)
......@@ -662,7 +662,7 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
nr_elements = mdev->act_log->nr_elements;
/* GFP_KERNEL, we are not in anyones write-out path */
/* GFP_KERNEL, we are not in anyone's write-out path */
bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL);
if (!bios)
goto submit_one_by_one;
......@@ -683,7 +683,7 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
goto free_bios_submit_one_by_one;
}
/* unneccessary optimization? */
/* unnecessary optimization? */
lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
......@@ -789,7 +789,7 @@ static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
* @mdev: DRBD device.
*
* Removes all active extents form the activity log, waiting until
* the reference count of each etry dropped to 0 first, of course.
* the reference count of each entry dropped to 0 first, of course.
*
* You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
*/
......@@ -962,7 +962,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
/* we clear it (in sync).
* round up start sector, round down end sector. we make sure we only
* clear full, alligned, BM_BLOCK_SIZE (4K) blocks */
* clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
if (unlikely(esector < BM_SECT_PER_BIT-1))
return;
if (unlikely(esector == (nr_sectors-1)))
......@@ -987,7 +987,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
if (count) {
/* we need the lock for drbd_try_clear_on_disk_bm */
if (jiffies - mdev->rs_mark_time > HZ*10) {
/* should be roling marks,
/* should be rolling marks,
* but we estimate only anyways. */
if (mdev->rs_mark_left != drbd_bm_total_weight(mdev) &&
mdev->state.conn != C_PAUSED_SYNC_T &&
......@@ -1202,21 +1202,21 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
spin_lock_irq(&mdev->al_lock);
if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
/* in case you have very heavy scattered io, it may
* stall the syncer undefined if we giveup the ref count
* stall the syncer undefined if we give up the ref count
* when we try again and requeue.
*
* if we don't give up the refcount, but the next time
* we are scheduled this extent has been "synced" by new
* application writes, we'd miss the lc_put on the
* extent we keept the refcount on.
* so we remembered which extent we had to try agin, and
* extent we keep the refcount on.
* so we remembered which extent we had to try again, and
* if the next requested one is something else, we do
* the lc_put here...
* we also have to wake_up
*/
trace_drbd_resync(mdev, TRACE_LVL_ALL,
"dropping %u, aparently got 'synced' by application io\n",
"dropping %u, apparently got 'synced' by application io\n",
mdev->resync_wenr);
e = lc_find(mdev->resync, mdev->resync_wenr);
......@@ -1261,7 +1261,7 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
goto try_again;
}
/* Do or do not. There is no try. -- Joda */
/* Do or do not. There is no try. -- Yoda */
e = lc_get(mdev->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
......@@ -1451,7 +1451,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
/*
* round up start sector, round down end sector. we make sure we only
* handle full, alligned, BM_BLOCK_SIZE (4K) blocks */
* handle full, aligned, BM_BLOCK_SIZE (4K) blocks */
if (unlikely(esector < BM_SECT_PER_BIT-1))
return;
if (unlikely(esector == (nr_sectors-1)))
......
......@@ -32,7 +32,7 @@
/* OPAQUE outside this file!
* interface defined in drbd_int.h
* convetion:
* convention:
* function name drbd_bm_... => used elsewhere, "public".
* function name bm_... => internal to implementation, "private".
......@@ -323,7 +323,7 @@ void drbd_bm_cleanup(struct drbd_conf *mdev)
/*
* since (b->bm_bits % BITS_PER_LONG) != 0,
* this masks out the remaining bits.
* Rerturns the number of bits cleared.
* Returns the number of bits cleared.
*/
static int bm_clear_surplus(struct drbd_bitmap *b)
{
......@@ -434,11 +434,11 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
/*
* make sure the bitmap has enough room for the attached storage,
* if neccessary, resize.
* if necessary, resize.
* called whenever we may have changed the device size.
* returns -ENOMEM if we could not allocate enough memory, 0 on success.
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initiallized to all bits set.
* Otherwise, the bitmap is initialized to all bits set.
*/
int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
{
......@@ -722,7 +722,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
int uptodate = bio_flagged(bio, BIO_UPTODATE);
/* strange behaviour of some lower level drivers...
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?!
* do we want to WARN() on this? */
......@@ -849,7 +849,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
/* flush bitmap to stable storage */
drbd_md_flush(mdev);
} else /* rw == READ */ {
/* just read, if neccessary adjust endianness */
/* just read, if necessary adjust endianness */
b->bm_set = bm_count_bits_swap_endian(b);
dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
jiffies - now);
......@@ -885,7 +885,7 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
* @mdev: DRBD device.
* @enr: Extent number in the resync lru (happens to be sector offset)
*
* The BM_EXT_SIZE is on purpose exactle the amount of the bitmap covered
* The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered
* by a single sector write. Therefore enr == sector offset from the
* start of the bitmap.
*/
......@@ -1011,7 +1011,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
}
/* returns number of bits actually changed.
* for val != 0, we change 0 -> 1, return code positiv
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
......@@ -1051,7 +1051,7 @@ int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
}
/* returns number of bits actually changed.
* for val != 0, we change 0 -> 1, return code positiv
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
......
......@@ -150,7 +150,7 @@ mempool_t *drbd_request_mempool;
mempool_t *drbd_ee_mempool;
/* I do not use a standard mempool, because:
1) I want to hand out the preallocated objects first.
1) I want to hand out the pre-allocated objects first.
2) I want to be able to interrupt sleeping allocation with a signal.
Note: This is a single linked list, the next pointer is the private
member of struct page.
......@@ -328,7 +328,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
if (nob)
mdev->oldest_tle = nob;
/* if nob == NULL b was the only barrier, and becomes the new
barrer. Threfore mdev->oldest_tle points already to b */
barrier. Therefore mdev->oldest_tle points already to b */
} else {
D_ASSERT(nob != NULL);
mdev->oldest_tle = nob;
......@@ -644,7 +644,7 @@ abort:
}
/**
* _drbd_request_state() - Reqest a state change (with flags)
* _drbd_request_state() - Request a state change (with flags)
* @mdev: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
......@@ -838,7 +838,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
put_ldev(mdev);
}
/* Dissalow Network errors to configure a device's network part */
/* Disallow Network errors to configure a device's network part */
if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
os.conn <= C_DISCONNECTING)
ns.conn = os.conn;
......@@ -959,7 +959,7 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
{
if (cs == C_VERIFY_T) {
/* starting online verify from an arbitrary position
* does not fit well into the existion protocol.
* does not fit well into the existing protocol.
* on C_VERIFY_T, we initialize ov_left and friends
* implicitly in receive_DataRequest once the
* first P_OV_REQUEST is received */
......@@ -1249,7 +1249,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
state change. This function might sleep */
if (fp == FP_STONITH && ns.susp) {
/* case1: The outdate peer handler is successfull:
/* case1: The outdate peer handler is successful:
* case2: The connection was established again: */
if ((os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) ||
(os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)) {
......@@ -1405,7 +1405,7 @@ restart:
* if now a re-connect request comes in, conn state goes C_UNCONNECTED,
* and receiver thread will be "started".
* drbd_thread_start needs to set "Restarting" in that case.
* t_state check and assignement needs to be within the same spinlock,
* t_state check and assignment needs to be within the same spinlock,
* so either thread_start sees Exiting, and can remap to Restarting,
* or thread_start see None, and can proceed as normal.
*/
......@@ -1545,7 +1545,7 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
* @mdev: DRBD device.
*
* Forces all threads of a device onto the same CPU. This is benificial for
* Forces all threads of a device onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration.
*/
void drbd_calc_cpu_mask(struct drbd_conf *mdev)
......@@ -2427,7 +2427,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
/* p.seq_num = 0; No sequence numbers here.. */
/* Only called by our kernel thread.
* This one may be interupted by DRBD_SIG and/or DRBD_SIGKILL
* This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
* in response to admin command or module unload.
*/
if (!drbd_get_data_sock(mdev))
......@@ -2839,7 +2839,7 @@ Enomem:
static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
/* just so we have it. you never know what interessting things we
/* just so we have it. you never know what interesting things we
* might want to do here some day...
*/
......@@ -3168,7 +3168,7 @@ int __init drbd_init(void)
rwlock_init(&global_state_lock);
printk(KERN_INFO "drbd: initialised. "
printk(KERN_INFO "drbd: initialized. "
"Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
......@@ -3397,7 +3397,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
* drbd_md_mark_dirty() - Mark meta data super block as dirty
* @mdev: DRBD device.
*
* Call this function if you change enything that should be written to
* Call this function if you change anything that should be written to
* the meta-data super block. This function sets MD_DIRTY, and starts a
* timer that ensures that within five seconds you have to call drbd_md_sync().
*/
......@@ -3566,7 +3566,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
* @done: callback to be called after the bitmap IO was performed
* @why: Descriptive text of the reason for doing the IO
*
* While IO on the bitmap happens we freeze appliation IO thus we ensure
* While IO on the bitmap happens we freeze application IO thus we ensure
* that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
* called from worker context. It MUST NOT be used while a previous such
* work is still pending!
......
......@@ -228,7 +228,7 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
break;
case 6: /* Peer is primary, voluntarily outdate myself.
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but findes the other peer being active. */
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
dev_warn(DEV, "Peer is primary, outdating myself.\n");
nps = D_UNKNOWN;
......@@ -498,7 +498,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
}
/**
* drbd_determin_dev_size() - Sets the right device size obeying all constraints
* drbd_determine_dev_size() - Sets the right device size obeying all constraints
* @mdev: DRBD device.
*
* Returns 0 on success, negative return values indicate errors.
......@@ -977,7 +977,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
logical_block_size, MD_SECTOR_SIZE);
dev_warn(DEV, "Workaround engaged (has performace impact).\n");
dev_warn(DEV, "Workaround engaged (has performance impact).\n");
mdev->md_io_tmpp = page;
}
......@@ -1070,7 +1070,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
os = mdev->state;
ns.i = os.i;
/* If MDF_CONSISTENT is not set go into inconsistent state,
otherwise investige MDF_WasUpToDate...
otherwise investigate MDF_WasUpToDate...
If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
otherwise into D_CONSISTENT state.
*/
......@@ -1095,7 +1095,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
this point, because drbd_request_state() modifies these
flags. */
/* In case we are C_CONNECTED postpone any desicion on the new disk
/* In case we are C_CONNECTED postpone any decision on the new disk
state after the negotiation phase. */
if (mdev->state.conn == C_CONNECTED) {
mdev->new_state_tmp.i = ns.i;
......@@ -1412,7 +1412,7 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
pdsk, D_OUTDATED));
} else if (retcode == SS_CW_FAILED_BY_PEER) {
/* The peer probabely wants to see us outdated. */
/* The peer probably wants to see us outdated. */
retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
disk, D_OUTDATED),
CS_ORDERED);
......@@ -2044,7 +2044,7 @@ static void drbd_connector_callback(struct cn_msg *req)
cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
reply->minor = nlp->drbd_minor;
reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
/* reply->tag_list; might be modified by cm->fucntion. */
/* reply->tag_list; might be modified by cm->function. */
rr = cm->function(mdev, nlp, reply);
......@@ -2201,7 +2201,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
if (!reason || !reason[0])
return;
/* aparently we have to memcpy twice, first to prepare the data for the
/* apparently we have to memcpy twice, first to prepare the data for the
* struct cn_msg, then within cn_netlink_send from the cn_msg to the
* netlink skb. */
/* receiver thread context, which is not in the writeout path (of this node),
......
......@@ -153,7 +153,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
/**
* drbd_pp_alloc() - Returns a page, fails only if a signal comes in
* @mdev: DRBD device.
* @retry: wether or not to retry allocation forever (or until signalled)
* @retry: whether or not to retry allocation forever (or until signalled)
*
* Tries to allocate a page, first from our own page pool, then from the
* kernel, unless this allocation would exceed the max_buffers setting.
......@@ -624,7 +624,7 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
* for the outgoing connections.
* This is needed for multihomed hosts and to be
* able to use lo: interfaces for drbd.
* Make sure to use 0 as portnumber, so linux selects
* Make sure to use 0 as port number, so linux selects
* a free one dynamically.
*/
memcpy(&src_in6, mdev->net_conf->my_addr,
......@@ -768,7 +768,7 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
/*
* return values:
* 1 yess, we have a valid connection
* 1 yes, we have a valid connection
* 0 oops, did not work out, please try again
* -1 peer talks different language,
* no point in trying again, please go standalone.
......@@ -1180,7 +1180,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
/* As long as the -ENOTSUPP on the barrier is reported immediately
that will never trigger. It it is reported late, we will just
print that warning an continue corretly for all future requests
print that warning an continue correctly for all future requests
with WO_bdev_flush */
if (previous_epoch(mdev, e->epoch))
dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
......@@ -2020,7 +2020,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
case P_RS_DATA_REQUEST:
e->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
/* Eventually this should become asynchrously. Currently it
/* Eventually this should become asynchronously. Currently it
* blocks the whole receiver just to delay the reading of a
* resync data block.
* the drbd_work_queue mechanism is made for this...
......@@ -2079,7 +2079,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
}
e->w.cb = w_e_end_ov_req;
fault_type = DRBD_FAULT_RS_RD;
/* Eventually this should become asynchrously. Currently it
/* Eventually this should become asynchronous. Currently it
* blocks the whole receiver just to delay the reading of a
* resync data block.
* the drbd_work_queue mechanism is made for this...
......@@ -2231,7 +2231,7 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
if (self != SS_SUCCESS) {
drbd_khelper(mdev, "pri-lost-after-sb");
} else {
dev_warn(DEV, "Sucessfully gave up primary role.\n");
dev_warn(DEV, "Successfully gave up primary role.\n");
rv = hg;
}
} else
......@@ -2273,7 +2273,7 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
if (self != SS_SUCCESS) {
drbd_khelper(mdev, "pri-lost-after-sb");
} else {
dev_warn(DEV, "Sucessfully gave up primary role.\n");
dev_warn(DEV, "Successfully gave up primary role.\n");
rv = hg;
}
} else
......@@ -2356,7 +2356,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
mdev->p_uuid[UI_BITMAP] != (u64)0 &&
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
dev_info(DEV, "was SyncTarget, peer missed the resync finished event, correced peer:\n");
dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
if (mdev->agreed_pro_version < 91)
return -1001;
......@@ -2890,7 +2890,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
}
/* just store the peer's disk size for now.
* we still need to figure out wether we accept that. */
* we still need to figure out whether we accept that. */
mdev->p_size = p_size;
#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
......@@ -2974,7 +2974,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
if (mdev->state.conn > C_WF_REPORT_PARAMS) {
if (be64_to_cpu(p->c_size) !=
drbd_get_capacity(mdev->this_bdev) || ldsc) {
/* we have different sizes, probabely peer
/* we have different sizes, probably peer
* needs to know my new size... */
drbd_send_sizes(mdev, 0);
}
......@@ -3204,7 +3204,7 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h)
peer_state.disk != D_NEGOTIATING ) {
/* we want resync, peer has not yet decided to sync... */
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpTpDate with that */
setting its disk to UpToDate with that */
drbd_send_uuids(mdev);
drbd_send_state(mdev);
}
......@@ -3391,7 +3391,7 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
we would need to process it from the highest address to the lowest,
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we suceessfully received it. */
returns 0 on failure, 1 if we successfully received it. */
static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
{
struct bm_xfer_ctx c;
......@@ -3804,7 +3804,7 @@ static int drbd_send_handshake(struct drbd_conf *mdev)
/*
* return values:
* 1 yess, we have a valid connection
* 1 yes, we have a valid connection
* 0 oops, did not work out, please try again
* -1 peer talks different language,
* no point in trying again, please go standalone.
......
......@@ -69,7 +69,7 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* well, only if it had been there in the first
* place... if it had not (local only or conflicting
* and never sent), it should still be "empty" as
* initialised in drbd_req_new(), so we can list_del() it
* initialized in drbd_req_new(), so we can list_del() it
* here unconditionally */
list_del(&req->tl_requests);
/* Set out-of-sync unless both OK flags are set
......@@ -267,7 +267,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
/*
* figure out whether to report success or failure.
*
* report success when at least one of the operations suceeded.
* report success when at least one of the operations succeeded.
* or, to put the other way,
* only report failure, when both operations failed.
*
......@@ -582,7 +582,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
if (bio_data_dir(req->master_bio) == WRITE &&
mdev->net_conf->wire_protocol == DRBD_PROT_A) {
/* this is what is dangerous about protocol A:
* pretend it was sucessfully written on the peer. */
* pretend it was successfully written on the peer. */
if (req->rq_state & RQ_NET_PENDING) {
dec_ap_pending(mdev);
req->rq_state &= ~RQ_NET_PENDING;
......@@ -618,7 +618,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
case write_acked_by_peer_and_sis:
req->rq_state |= RQ_NET_SIS;
case conflict_discarded_by_peer:
/* for discarded conflicting writes of multiple primarys,
/* for discarded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
* node crashes are covered by the activity log. */
if (what == conflict_discarded_by_peer)
......@@ -636,11 +636,11 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
* A barrier request is expected to have forced all prior
* requests onto stable storage, so completion of a barrier
* request could set NET_DONE right here, and not wait for the
* P_BARRIER_ACK, but that is an unecessary optimisation. */
* P_BARRIER_ACK, but that is an unnecessary optimization. */
/* this makes it effectively the same as for: */
case recv_acked_by_peer:
/* protocol B; pretends to be sucessfully written on peer.
/* protocol B; pretends to be successfully written on peer.
* see also notes above in handed_over_to_network about
* protocol != C */
req->rq_state |= RQ_NET_OK;
......@@ -731,7 +731,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
if (!req) {
dec_ap_bio(mdev);
/* only pass the error to the upper layers.
* if user cannot handle io errors, thats not our business. */
* if user cannot handle io errors, that's not our business. */
dev_err(DEV, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
return 0;
......@@ -873,7 +873,7 @@ allocate_barrier:
* 'remote' may become wrong any time because the network could fail.
*
* This is a harmless race condition, though, since it is handled
* correctly at the appropriate places; so it just deferres the failure
* correctly at the appropriate places; so it just defers the failure
* of the respective operation.
*/
......@@ -884,7 +884,7 @@ allocate_barrier:
if (local)
_req_mod(req, to_be_submitted);
/* check this request on the colison detection hash tables.
/* check this request on the collision detection hash tables.
* if we have a conflict, just complete it here.
* THINK do we want to check reads, too? (I don't think so...) */
if (rw == WRITE && _req_conflicts(req)) {
......@@ -989,7 +989,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
/*
* Paranoia: we might have been primary, but sync target, or
* even diskless, then lost the connection.
* This should have been handled (panic? suspend?) somehwere
* This should have been handled (panic? suspend?) somewhere
* else. But maybe it was not, so check again here.
* Caution: as long as we do not have a read/write lock on mdev,
* to serialize state changes, this is racy, since we may lose
......@@ -1035,7 +1035,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
D_ASSERT((bio->bi_size & 0x1ff) == 0);
D_ASSERT(bio->bi_idx == 0);
/* to make some things easier, force allignment of requests within the
/* to make some things easier, force alignment of requests within the
* granularity of our hash tables */
s_enr = bio->bi_sector >> HT_SHIFT;
e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
......@@ -1099,7 +1099,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
* since we don't care for actual offset, but only check whether it
* would cross "activity log extent" boundaries.
*
* As long as the BIO is emtpy we have to allow at least one bvec,
* As long as the BIO is empty we have to allow at least one bvec,
* regardless of size and offset. so the resulting bio may still
* cross extent boundaries. those are dealt with (bio_split) in
* drbd_make_request_26.
......
......@@ -78,7 +78,7 @@ static const char *drbd_state_sw_errors[] = {
[-SS_RESYNC_RUNNING] = "Can not start OV/resync since it is already active",
[-SS_ALREADY_STANDALONE] = "Can not disconnect a StandAlone device",
[-SS_CW_FAILED_BY_PEER] = "State change was refused by peer node",
[-SS_IS_DISKLESS] = "Device is diskless, the requesed operation requires a disk",
[-SS_IS_DISKLESS] = "Device is diskless, the requested operation requires a disk",
[-SS_DEVICE_IN_USE] = "Device is held open by someone",
[-SS_NO_NET_CONFIG] = "Have no net/connection configuration",
[-SS_NO_VERIFY_ALG] = "Need a verify algorithm to start online verify",
......
......@@ -166,7 +166,7 @@ static void probe_drbd_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch,
switch (ev) {
case EV_TRACE_ALLOC:
dev_info(DEV, "Allocat epoch %p/xxxx { } nr_epochs=%d\n", epoch, mdev->epochs);
dev_info(DEV, "Allocate epoch %p/xxxx { } nr_epochs=%d\n", epoch, mdev->epochs);
break;
case EV_TRACE_FREE:
dev_info(DEV, "Freeing epoch %p/%d { size=%d } nr_epochs=%d\n",
......@@ -224,11 +224,11 @@ static void probe_drbd_actlog(struct drbd_conf *mdev, sector_t sector, char* msg
}
/**
* drbd_print_buffer() - Hexdump arbitraty binary data into a buffer
* drbd_print_buffer() - Hexdump arbitrary binary data into a buffer
* @prefix: String is output at the beginning of each line output.
* @flags: Currently only defined flag: DBGPRINT_BUFFADDR; if set, each
* line starts with the virtual address of the line being
* outupt. If clear, each line starts with the offset from the
* output. If clear, each line starts with the offset from the
* beginning of the buffer.
* @size: Indicates the size of each entry in the buffer. Supported
* values are sizeof(char), sizeof(short) and sizeof(int)
......
......@@ -59,7 +59,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
atodb_endio in drbd_actlog.c
drbd_bm_async_io_complete in drbd_bitmap.c
* For all these callbacks, note the follwing:
* For all these callbacks, note the following:
* The callbacks will be called in irq context by the IDE drivers,
* and in Softirqs/Tasklets/BH context by the SCSI drivers.
* Try to get the locking right :)
......@@ -102,7 +102,7 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
mdev = e->mdev;
if (!error && !uptodate) {
/* strange behaviour of some lower level drivers...
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?!
* do we want to dev_warn(DEV, ) on this? */
......@@ -145,7 +145,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
mdev = e->mdev;
if (!error && !uptodate) {
/* strange behaviour of some lower level drivers...
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?!
* do we want to dev_warn(DEV, ) on this? */
......@@ -212,7 +212,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
}
/* read, readA or write requests on R_PRIMARY comming from drbd_make_request
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
*/
void drbd_endio_pri(struct bio *bio, int error)
{
......@@ -224,7 +224,7 @@ void drbd_endio_pri(struct bio *bio, int error)
int uptodate = bio_flagged(bio, BIO_UPTODATE);
if (!error && !uptodate) {
/* strange behaviour of some lower level drivers...
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?!
* do we want to dev_warn(DEV, ) on this? */
......@@ -469,13 +469,13 @@ int w_make_resync_request(struct drbd_conf *mdev,
if (number > mx)
mx = number;
/* Limit the nunber of pending RS requests to no more than the peer's receive buffer */
/* Limit the number of pending RS requests to no more than the peer's receive buffer */
if ((pe + number) > mx) {
number = mx - pe;
}
for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the sendbuffer is filled */
/* Stop generating RS requests, when half of the send buffer is filled */
mutex_lock(&mdev->data.mutex);
if (mdev->data.socket) {
queued = mdev->data.socket->sk->sk_wmem_queued;
......@@ -516,10 +516,10 @@ next_sector:
/* try to find some adjacent bits.
* we stop if we have already the maximum req size.
*
* Aditionally always align bigger requests, in order to
* Additionally always align bigger requests, in order to
* be prepared for all stripe sizes of software RAIDs.
*
* we _do_ care about the agreed-uppon q->max_segment_size
* we _do_ care about the agreed-upon q->max_segment_size
* here, as splitting up the requests on the other side is more
* difficult. the consequence is, that on lvm and md and other
* "indirect" devices, this is dead code, since
......@@ -682,7 +682,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
* might set bits in the (main) bitmap, then the entries in the
* resync LRU would be wrong. */
if (drbd_rs_del_all(mdev)) {
/* In case this is not possible now, most probabely because
/* In case this is not possible now, most probably because
* there are P_RS_DATA_REPLY Packets lingering on the worker's
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
......@@ -1095,7 +1095,7 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int ok = 1;
/* really avoid racing with tl_clear. w.cb may have been referenced
* just before it was reassigned and requeued, so double check that.
* just before it was reassigned and re-queued, so double check that.
* actually, this race was harmless, since we only try to send the
* barrier packet here, and otherwise do nothing with the object.
* but compare with the head of w_clear_epoch */
......
......@@ -318,7 +318,7 @@ static int lc_unused_element_available(struct lru_cache *lc)
* "touches" and returns it.
*
* In case the requested number is not present, it needs to be added to the
* cache. Therefore it is possible that an other element becomes eviced from
* cache. Therefore it is possible that an other element becomes evicted from
* the cache. In either case, the user is notified so he is able to e.g. keep
* a persistent log of the cache changes, and therefore the objects in use.
*
......@@ -338,7 +338,7 @@ static int lc_unused_element_available(struct lru_cache *lc)
*
* In this case, the cache is marked %LC_DIRTY (blocking further changes),
* and the returned element pointer is removed from the lru list and
* hash collision chains. The user now should do whatever houskeeping
* hash collision chains. The user now should do whatever housekeeping
* is necessary.
* Then he must call lc_changed(lc,element_pointer), to finish
* the change.
......@@ -522,7 +522,7 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index)
* @seq: the &struct seq_file pointer to seq_printf into
* @utext: user supplied "heading" or other info
* @detail: function pointer the user may provide to dump further details
* of the object the lc_element is embeded in.
* of the object the lc_element is embedded in.
*/
void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
void (*detail) (struct seq_file *, struct lc_element *))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment