patch-2.3.27 linux/include/linux/mm.h
Next file: linux/include/linux/module.h
Previous file: linux/include/linux/kernelcapi.h
Back to the patch index
Back to the overall index
- Lines: 37
- Date:
Thu Nov 11 17:02:14 1999
- Orig file:
v2.3.26/linux/include/linux/mm.h
- Orig date:
Sun Nov 7 16:37:34 1999
diff -u --recursive --new-file v2.3.26/linux/include/linux/mm.h linux/include/linux/mm.h
@@ -138,6 +138,7 @@
wait_queue_head_t wait;
struct page **pprev_hash;
struct buffer_head * buffers;
+ unsigned long virtual; /* nonzero if kmapped */
} mem_map_t;
#define get_page(p) atomic_inc(&(p)->count)
@@ -285,12 +286,16 @@
* This is timing-critical - most of the time in getting a new page
* goes to clearing the page. If you want a page without the clearing
* overhead, just use __get_free_page() directly..
+ *
+ * We have two allocation namespaces - the *get*page*() variants
+ * return virtual kernel addresses to the allocated page(s), the
+ * alloc_page*() variants return 'struct page *'.
*/
-extern struct page * __get_pages(int gfp_mask, unsigned long order);
#define __get_free_page(gfp_mask) __get_free_pages((gfp_mask),0)
#define __get_dma_pages(gfp_mask, order) __get_free_pages((gfp_mask) | GFP_DMA,(order))
-extern unsigned long FASTCALL(__get_free_pages(int gfp_mask, unsigned long gfp_order));
-extern struct page * get_free_highpage(int gfp_mask);
+extern unsigned long FASTCALL(__get_free_pages(int gfp_mask, unsigned long order));
+extern struct page * FASTCALL(alloc_pages(int gfp_mask, unsigned long order));
+#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
extern inline unsigned long get_zeroed_page(int gfp_mask)
{
@@ -335,7 +340,7 @@
extern int check_pgt_cache(void);
extern void paging_init(void);
-extern void free_area_init(unsigned long);
+extern void free_area_init(unsigned int * zones_size);
extern void mem_init(void);
extern void show_mem(void);
extern void oom(struct task_struct * tsk);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)