Commit efb8ad7e authored by Nathan Scott's avatar Nathan Scott Committed by Tim Shimmin

[XFS] Add a debug flag for allocations which are known to be larger than

one page.

SGI-PV: 955302
SGI-Modid: xfs-linux-melb:xfs-kern:26800a
Signed-off-by: default avatarNathan Scott <nathans@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 3f89243c
...@@ -34,6 +34,14 @@ kmem_alloc(size_t size, unsigned int __nocast flags) ...@@ -34,6 +34,14 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
gfp_t lflags = kmem_flags_convert(flags); gfp_t lflags = kmem_flags_convert(flags);
void *ptr; void *ptr;
#ifdef DEBUG
if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
printk(KERN_WARNING "Large %s attempt, size=%ld\n",
__FUNCTION__, (long)size);
dump_stack();
}
#endif
do { do {
if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
ptr = kmalloc(size, lflags); ptr = kmalloc(size, lflags);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define KM_NOSLEEP 0x0002u #define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u #define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u #define KM_MAYFAIL 0x0008u
#define KM_LARGE 0x0010u
/* /*
* We use a special process flag to avoid recursive callbacks into * We use a special process flag to avoid recursive callbacks into
...@@ -41,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags) ...@@ -41,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags)
{ {
gfp_t lflags; gfp_t lflags;
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
if (flags & KM_NOSLEEP) { if (flags & KM_NOSLEEP) {
lflags = GFP_ATOMIC | __GFP_NOWARN; lflags = GFP_ATOMIC | __GFP_NOWARN;
......
...@@ -768,7 +768,7 @@ xfs_buf_get_noaddr( ...@@ -768,7 +768,7 @@ xfs_buf_get_noaddr(
_xfs_buf_initialize(bp, target, 0, len, 0); _xfs_buf_initialize(bp, target, 0, len, 0);
try_again: try_again:
data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
if (unlikely(data == NULL)) if (unlikely(data == NULL))
goto fail_free_buf; goto fail_free_buf;
......
...@@ -112,17 +112,17 @@ xfs_Gqm_init(void) ...@@ -112,17 +112,17 @@ xfs_Gqm_init(void)
{ {
xfs_dqhash_t *udqhash, *gdqhash; xfs_dqhash_t *udqhash, *gdqhash;
xfs_qm_t *xqm; xfs_qm_t *xqm;
uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL; uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
/* /*
* Initialize the dquot hash tables. * Initialize the dquot hash tables.
*/ */
hsize = XFS_QM_HASHSIZE_HIGH; hsize = XFS_QM_HASHSIZE_HIGH;
while (!(udqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), flags))) { while (!(udqhash = kmem_zalloc(hsize * sizeof(*udqhash), flags))) {
if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW) if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW)
flags = KM_SLEEP; flags = KM_SLEEP;
} }
gdqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), KM_SLEEP); gdqhash = kmem_zalloc(hsize * sizeof(*gdqhash), KM_SLEEP | KM_LARGE);
ndquot = hsize << 8; ndquot = hsize << 8;
xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
......
...@@ -75,7 +75,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) ...@@ -75,7 +75,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
sleep); sleep);
} else { } else {
ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
sleep); sleep | KM_LARGE);
} }
if (ktep == NULL) { if (ktep == NULL) {
......
...@@ -50,7 +50,7 @@ void ...@@ -50,7 +50,7 @@ void
xfs_ihash_init(xfs_mount_t *mp) xfs_ihash_init(xfs_mount_t *mp)
{ {
__uint64_t icount; __uint64_t icount;
uint i, flags = KM_SLEEP | KM_MAYFAIL; uint i, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
if (!mp->m_ihsize) { if (!mp->m_ihsize) {
icount = mp->m_maxicount ? mp->m_maxicount : icount = mp->m_maxicount ? mp->m_maxicount :
...@@ -95,7 +95,7 @@ xfs_chash_init(xfs_mount_t *mp) ...@@ -95,7 +95,7 @@ xfs_chash_init(xfs_mount_t *mp)
mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize); mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize);
mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
* sizeof(xfs_chash_t), * sizeof(xfs_chash_t),
KM_SLEEP); KM_SLEEP | KM_LARGE);
for (i = 0; i < mp->m_chsize; i++) { for (i = 0; i < mp->m_chsize; i++) {
spinlock_init(&mp->m_chash[i].ch_lock,"xfshash"); spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
} }
......
...@@ -1196,7 +1196,7 @@ xlog_alloc_log(xfs_mount_t *mp, ...@@ -1196,7 +1196,7 @@ xlog_alloc_log(xfs_mount_t *mp,
kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP); kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP);
iclog = *iclogp; iclog = *iclogp;
iclog->hic_data = (xlog_in_core_2_t *) iclog->hic_data = (xlog_in_core_2_t *)
kmem_zalloc(iclogsize, KM_SLEEP); kmem_zalloc(iclogsize, KM_SLEEP | KM_LARGE);
iclog->ic_prev = prev_iclog; iclog->ic_prev = prev_iclog;
prev_iclog = iclog; prev_iclog = iclog;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment