diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-05-11 14:44:27 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-05-11 14:44:31 +0200 | 
| commit | 41fb454ebe6024f5c1e3b3cbc0abc0da762e7b51 (patch) | |
| tree | 51c50bcb67a5039448ddfa1869d7948cab1217e9 /mm/shmem.c | |
| parent | 19c1a6f5764d787113fa323ffb18be7991208f82 (diff) | |
| parent | 091bf7624d1c90cec9e578a18529f615213ff847 (diff) | |
Merge commit 'v2.6.30-rc5' into core/iommu
Merge reason: core/iommu was on an .30-rc1 base,
              update it to .30-rc5 to refresh.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/shmem.c')
| -rw-r--r-- | mm/shmem.c | 35 | 
1 files changed, 27 insertions, 8 deletions
| diff --git a/mm/shmem.c b/mm/shmem.c index d94d2e9146bc..b25f95ce3db7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -24,6 +24,7 @@  #include <linux/init.h>  #include <linux/vfs.h>  #include <linux/mount.h> +#include <linux/pagemap.h>  #include <linux/file.h>  #include <linux/mm.h>  #include <linux/module.h> @@ -43,7 +44,6 @@ static struct vfsmount *shm_mnt;  #include <linux/exportfs.h>  #include <linux/generic_acl.h>  #include <linux/mman.h> -#include <linux/pagemap.h>  #include <linux/string.h>  #include <linux/slab.h>  #include <linux/backing-dev.h> @@ -65,13 +65,28 @@ static struct vfsmount *shm_mnt;  #include <asm/div64.h>  #include <asm/pgtable.h> +/* + * The maximum size of a shmem/tmpfs file is limited by the maximum size of + * its triple-indirect swap vector - see illustration at shmem_swp_entry(). + * + * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, + * but one eighth of that on a 64-bit kernel.  With 8kB page size, maximum + * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, + * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. + * + * We use / and * instead of shifts in the definitions below, so that the swap + * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. + */  #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) -#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) -#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512) +#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) -#define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) -#define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) +#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) +#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) +#define SHMEM_MAX_BYTES  min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) +#define SHMEM_MAX_INDEX  ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) + +#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)  #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)  /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ @@ -1325,8 +1340,12 @@ repeat:  			shmem_swp_unmap(entry);  			spin_unlock(&info->lock);  			if (error == -ENOMEM) { -				/* allow reclaim from this memory cgroup */ -				error = mem_cgroup_shrink_usage(swappage, +				/* +				 * reclaim from proper memory cgroup and +				 * call memcg's OOM if needed. +				 */ +				error = mem_cgroup_shmem_charge_fallback( +								swappage,  								current->mm,  								gfp);  				if (error) { @@ -2581,7 +2600,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)  #define shmem_get_inode(sb, mode, dev, flags)	ramfs_get_inode(sb, mode, dev)  #define shmem_acct_size(flags, size)		0  #define shmem_unacct_size(flags, size)		do {} while (0) -#define SHMEM_MAX_BYTES				LLONG_MAX +#define SHMEM_MAX_BYTES				MAX_LFS_FILESIZE  #endif /* CONFIG_SHMEM */ | 
