patch-2.3.29 linux/mm/highmem.c
Next file: linux/mm/memory.c
Previous file: linux/mm/filemap.c
Back to the patch index
Back to the overall index
- Lines: 89
- Date:
Sat Nov 20 10:56:59 1999
- Orig file:
v2.3.28/linux/mm/highmem.c
- Orig date:
Thu Nov 18 20:25:38 1999
diff -u --recursive --new-file v2.3.28/linux/mm/highmem.c linux/mm/highmem.c
@@ -70,9 +70,9 @@
return page;
}
- vaddr = kmap(page);
+ vaddr = kmap(highpage);
copy_page((void *)vaddr, (void *)page_address(page));
- kunmap(page);
+ kunmap(highpage);
/* Preserve the caching of the swap_entry. */
highpage->index = page->index;
@@ -88,20 +88,6 @@
}
/*
- * Right now we initialize only a single pte table. It can be extended
- * easily, subsequent pte tables have to be allocated in one physical
- * chunk of RAM.
- */
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 2048
-#else
-#define LAST_PKMAP 4096
-#endif
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-
-/*
* Virtual_count is not a pure "count".
* 0 means that it is not mapped, and has not been mapped
* since a TLB flush - it is usable.
@@ -135,7 +121,7 @@
pkmap_count[i] = 0;
pte = pkmap_page_table[i];
if (pte_none(pte))
- continue;
+ BUG();
pte_clear(pkmap_page_table+i);
page = pte_page(pte);
page->virtual = 0;
@@ -167,30 +153,27 @@
current->state = TASK_UNINTERRUPTIBLE;
add_wait_queue(&pkmap_map_wait, &wait);
spin_unlock(&kmap_lock);
- // it's not quite possible to saturate the
- // pkmap pool right now.
- BUG();
schedule();
remove_wait_queue(&pkmap_map_wait, &wait);
spin_lock(&kmap_lock);
- }
- /* Somebody else might have mapped it while we slept */
- if (page->virtual)
- return page->virtual;
+ /* Somebody else might have mapped it while we slept */
+ if (page->virtual)
+ return page->virtual;
- /* Re-start */
- count = LAST_PKMAP;
+ /* Re-start */
+ count = LAST_PKMAP;
+ }
}
vaddr = PKMAP_ADDR(last_pkmap_nr);
pkmap_page_table[last_pkmap_nr] = mk_pte(page, kmap_prot);
-
/*
* Subtle! For some reason if we dont do this TLB flush then
* we get data corruption and weird behavior in dbench runs.
* But invlpg this should not be necessery ... Any ideas?
*/
__flush_tlb_one(vaddr);
+
pkmap_count[last_pkmap_nr] = 1;
page->virtual = vaddr;
@@ -201,8 +184,6 @@
{
unsigned long vaddr;
- if (!PageHighMem(page))
- BUG();
/*
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)