patch-2.3.38 linux/mm/vmscan.c
Next file: linux/net/README
Previous file: linux/mm/slab.c
Back to the patch index
Back to the overall index
- Lines: 178
- Date:
Thu Jan 6 16:21:23 2000
- Orig file:
v2.3.37/linux/mm/vmscan.c
- Orig date:
Wed Dec 29 13:13:21 1999
diff -u --recursive --new-file v2.3.37/linux/mm/vmscan.c linux/mm/vmscan.c
@@ -33,7 +33,7 @@
* using a process that no longer actually exists (it might
* have died while we slept).
*/
-static int try_to_swap_out(struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask)
+static int try_to_swap_out(struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask, zone_t *zone)
{
pte_t pte;
swp_entry_t entry;
@@ -60,8 +60,7 @@
if (PageReserved(page)
|| PageLocked(page)
- || ((gfp_mask & __GFP_DMA) && !PageDMA(page))
- || (!(gfp_mask & __GFP_HIGHMEM) && PageHighMem(page)))
+ || (zone && (!memclass(page->zone, zone))))
goto out_failed;
/*
@@ -196,7 +195,7 @@
* (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de
*/
-static inline int swap_out_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask)
+static inline int swap_out_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask, zone_t *zone)
{
pte_t * pte;
unsigned long pmd_end;
@@ -218,7 +217,7 @@
do {
int result;
vma->vm_mm->swap_address = address + PAGE_SIZE;
- result = try_to_swap_out(vma, address, pte, gfp_mask);
+ result = try_to_swap_out(vma, address, pte, gfp_mask, zone);
if (result)
return result;
address += PAGE_SIZE;
@@ -227,7 +226,7 @@
return 0;
}
-static inline int swap_out_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int gfp_mask)
+static inline int swap_out_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int gfp_mask, zone_t *zone)
{
pmd_t * pmd;
unsigned long pgd_end;
@@ -247,7 +246,7 @@
end = pgd_end;
do {
- int result = swap_out_pmd(vma, pmd, address, end, gfp_mask);
+ int result = swap_out_pmd(vma, pmd, address, end, gfp_mask, zone);
if (result)
return result;
address = (address + PMD_SIZE) & PMD_MASK;
@@ -256,7 +255,7 @@
return 0;
}
-static int swap_out_vma(struct vm_area_struct * vma, unsigned long address, int gfp_mask)
+static int swap_out_vma(struct vm_area_struct * vma, unsigned long address, int gfp_mask, zone_t *zone)
{
pgd_t *pgdir;
unsigned long end;
@@ -271,7 +270,7 @@
if (address >= end)
BUG();
do {
- int result = swap_out_pgd(vma, pgdir, address, end, gfp_mask);
+ int result = swap_out_pgd(vma, pgdir, address, end, gfp_mask, zone);
if (result)
return result;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
@@ -280,7 +279,7 @@
return 0;
}
-static int swap_out_mm(struct mm_struct * mm, int gfp_mask)
+static int swap_out_mm(struct mm_struct * mm, int gfp_mask, zone_t *zone)
{
unsigned long address;
struct vm_area_struct* vma;
@@ -301,7 +300,7 @@
address = vma->vm_start;
for (;;) {
- int result = swap_out_vma(vma, address, gfp_mask);
+ int result = swap_out_vma(vma, address, gfp_mask, zone);
if (result)
return result;
vma = vma->vm_next;
@@ -323,7 +322,7 @@
* N.B. This function returns only 0 or 1. Return values != 1 from
* the lower level routines result in continued processing.
*/
-static int swap_out(unsigned int priority, int gfp_mask)
+static int swap_out(unsigned int priority, int gfp_mask, zone_t *zone)
{
struct task_struct * p;
int counter;
@@ -384,7 +383,7 @@
int ret;
atomic_inc(&best->mm_count);
- ret = swap_out_mm(best, gfp_mask);
+ ret = swap_out_mm(best, gfp_mask, zone);
mmdrop(best);
if (!ret)
@@ -410,7 +409,7 @@
* cluster them so that we get good swap-out behaviour. See
* the "free_memory()" macro for details.
*/
-static int do_try_to_free_pages(unsigned int gfp_mask)
+static int do_try_to_free_pages(unsigned int gfp_mask, zone_t *zone)
{
int priority;
int count = SWAP_CLUSTER_MAX;
@@ -420,7 +419,7 @@
priority = 6;
do {
- while (shrink_mmap(priority, gfp_mask)) {
+ while (shrink_mmap(priority, gfp_mask, zone)) {
if (!--count)
goto done;
}
@@ -428,21 +427,21 @@
/* don't be too light against the d/i cache since
shrink_mmap() almost never fail when there's
really plenty of memory free. */
- count -= shrink_dcache_memory(priority, gfp_mask);
- count -= shrink_icache_memory(priority, gfp_mask);
+ count -= shrink_dcache_memory(priority, gfp_mask, zone);
+ count -= shrink_icache_memory(priority, gfp_mask, zone);
if (count <= 0)
goto done;
/* Try to get rid of some shared memory pages.. */
if (gfp_mask & __GFP_IO) {
- while (shm_swap(priority, gfp_mask)) {
+ while (shm_swap(priority, gfp_mask, zone)) {
if (!--count)
goto done;
}
}
/* Then, try to page stuff out.. */
- while (swap_out(priority, gfp_mask)) {
+ while (swap_out(priority, gfp_mask, zone)) {
if (!--count)
goto done;
}
@@ -506,7 +505,7 @@
allocations (not GFP_HIGHMEM ones). */
if (nr_free_buffer_pages() >= freepages.high)
break;
- if (!do_try_to_free_pages(GFP_KSWAPD))
+ if (!do_try_to_free_pages(GFP_KSWAPD, 0))
break;
run_task_queue(&tq_disk);
} while (!tsk->need_resched);
@@ -530,13 +529,13 @@
* can be done by just dropping cached pages without having
* any deadlock issues.
*/
-int try_to_free_pages(unsigned int gfp_mask)
+int try_to_free_pages(unsigned int gfp_mask, zone_t *zone)
{
int retval = 1;
wake_up_process(kswapd_process);
if (gfp_mask & __GFP_WAIT)
- retval = do_try_to_free_pages(gfp_mask);
+ retval = do_try_to_free_pages(gfp_mask, zone);
return retval;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)