Skip to content

Commit b221385

Browse files
AdrianBunkLinus Torvalds
authored andcommitted
[PATCH] mm/: make functions static
This patch makes the following needlessly global functions static: - slab.c: kmem_find_general_cachep() - swap.c: __page_cache_release() - vmalloc.c: __vmalloc_node() Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
1 parent 204ec84 commit b221385

File tree

6 files changed

+25
-31
lines changed

6 files changed

+25
-31
lines changed

include/linux/mm.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -318,8 +318,6 @@ static inline int get_page_unless_zero(struct page *page)
318318
return atomic_inc_not_zero(&page->_count);
319319
}
320320

321-
extern void FASTCALL(__page_cache_release(struct page *));
322-
323321
static inline int page_count(struct page *page)
324322
{
325323
if (unlikely(PageCompound(page)))

include/linux/slab.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
6767
extern void kmem_cache_free(kmem_cache_t *, void *);
6868
extern unsigned int kmem_cache_size(kmem_cache_t *);
6969
extern const char *kmem_cache_name(kmem_cache_t *);
70-
extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
7170

7271
/* Size description struct for general caches. */
7372
struct cache_sizes {
@@ -223,7 +222,6 @@ extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
223222
/* SLOB allocator routines */
224223

225224
void kmem_cache_init(void);
226-
struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
227225
struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
228226
unsigned long,
229227
void (*)(void *, struct kmem_cache *, unsigned long),

include/linux/vmalloc.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,6 @@ extern void *vmalloc_32_user(unsigned long size);
4444
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
4545
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
4646
pgprot_t prot);
47-
extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask,
48-
pgprot_t prot, int node);
4947
extern void vfree(void *addr);
5048

5149
extern void *vmap(struct page **pages, unsigned int count,

mm/slab.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
768768
return csizep->cs_cachep;
769769
}
770770

771-
struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
771+
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
772772
{
773773
return __find_general_cachep(size, gfpflags);
774774
}
775-
EXPORT_SYMBOL(kmem_find_general_cachep);
776775

777776
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
778777
{

mm/swap.c

Lines changed: 19 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,25 @@
3434
/* How many pages do we try to swap or page in/out together? */
3535
int page_cluster;
3636

37+
/*
38+
* This path almost never happens for VM activity - pages are normally
39+
* freed via pagevecs. But it gets used by networking.
40+
*/
41+
static void fastcall __page_cache_release(struct page *page)
42+
{
43+
if (PageLRU(page)) {
44+
unsigned long flags;
45+
struct zone *zone = page_zone(page);
46+
47+
spin_lock_irqsave(&zone->lru_lock, flags);
48+
VM_BUG_ON(!PageLRU(page));
49+
__ClearPageLRU(page);
50+
del_page_from_lru(zone, page);
51+
spin_unlock_irqrestore(&zone->lru_lock, flags);
52+
}
53+
free_hot_page(page);
54+
}
55+
3756
static void put_compound_page(struct page *page)
3857
{
3958
page = (struct page *)page_private(page);
@@ -222,26 +241,6 @@ int lru_add_drain_all(void)
222241
}
223242
#endif
224243

225-
/*
226-
* This path almost never happens for VM activity - pages are normally
227-
* freed via pagevecs. But it gets used by networking.
228-
*/
229-
void fastcall __page_cache_release(struct page *page)
230-
{
231-
if (PageLRU(page)) {
232-
unsigned long flags;
233-
struct zone *zone = page_zone(page);
234-
235-
spin_lock_irqsave(&zone->lru_lock, flags);
236-
VM_BUG_ON(!PageLRU(page));
237-
__ClearPageLRU(page);
238-
del_page_from_lru(zone, page);
239-
spin_unlock_irqrestore(&zone->lru_lock, flags);
240-
}
241-
free_hot_page(page);
242-
}
243-
EXPORT_SYMBOL(__page_cache_release);
244-
245244
/*
246245
* Batched page_cache_release(). Decrement the reference count on all the
247246
* passed pages. If it fell to zero then remove the page from the LRU and

mm/vmalloc.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@
2424
DEFINE_RWLOCK(vmlist_lock);
2525
struct vm_struct *vmlist;
2626

27+
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28+
int node);
29+
2730
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
2831
{
2932
pte_t *pte;
@@ -478,8 +481,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
478481
* allocator with @gfp_mask flags. Map them into contiguous
479482
* kernel virtual space, using a pagetable protection of @prot.
480483
*/
481-
void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
482-
int node)
484+
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
485+
int node)
483486
{
484487
struct vm_struct *area;
485488

@@ -493,7 +496,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
493496

494497
return __vmalloc_area_node(area, gfp_mask, prot, node);
495498
}
496-
EXPORT_SYMBOL(__vmalloc_node);
497499

498500
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
499501
{

0 commit comments

Comments
 (0)