patch-2.3.23 linux/mm/mmap.c
Next file: linux/mm/mprotect.c
Previous file: linux/mm/mlock.c
Back to the patch index
Back to the overall index
- Lines: 133
- Date:
Tue Oct 19 10:22:19 1999
- Orig file:
v2.3.22/linux/mm/mmap.c
- Orig date:
Fri Oct 15 15:25:14 1999
diff -u --recursive --new-file v2.3.22/linux/mm/mmap.c linux/mm/mmap.c
@@ -62,7 +62,7 @@
if (sysctl_overcommit_memory)
return 1;
- free = atomic_read(&buffermem) >> PAGE_SHIFT;
+ free = atomic_read(&buffermem_pages);
free += atomic_read(&page_cache_size);
free += nr_free_pages;
free += nr_swap_pages;
@@ -323,8 +323,10 @@
*/
flags = vma->vm_flags;
addr = vma->vm_start; /* can addr have changed?? */
+ vmlist_modify_lock(mm);
insert_vm_struct(mm, vma);
merge_segments(mm, vma->vm_start, vma->vm_end);
+ vmlist_modify_unlock(mm);
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
@@ -527,11 +529,13 @@
}
/* Work out to one of the ends. */
- if (end == area->vm_end)
+ if (end == area->vm_end) {
area->vm_end = addr;
- else if (addr == area->vm_start) {
+ vmlist_modify_lock(current->mm);
+ } else if (addr == area->vm_start) {
area->vm_offset += (end - area->vm_start);
area->vm_start = end;
+ vmlist_modify_lock(current->mm);
} else {
/* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
/* Add end mapping -- leave beginning for below */
@@ -552,10 +556,12 @@
if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt);
area->vm_end = addr; /* Truncate area */
+ vmlist_modify_lock(current->mm);
insert_vm_struct(current->mm, mpnt);
}
insert_vm_struct(current->mm, area);
+ vmlist_modify_unlock(current->mm);
return extra;
}
@@ -655,6 +661,7 @@
npp = (prev ? &prev->vm_next : &mm->mmap);
free = NULL;
+ vmlist_modify_lock(mm);
for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
*npp = mpnt->vm_next;
mpnt->vm_next = free;
@@ -662,6 +669,8 @@
if (mm->mmap_avl)
avl_remove(mpnt, &mm->mmap_avl);
}
+ mm->mmap_cache = NULL; /* Kill the cache. */
+ vmlist_modify_unlock(mm);
/* Ok - we have the memory areas we should free on the 'free' list,
* so release them, and unmap the page range..
@@ -678,6 +687,11 @@
end = end > mpnt->vm_end ? mpnt->vm_end : end;
size = end - st;
+ /*
+ * The lock_kernel interlocks with kswapd try_to_swap_out
+ * invoking a driver swapout() method, and being able to
+ * guarantee vma existance.
+ */
lock_kernel();
if (mpnt->vm_ops && mpnt->vm_ops->unmap)
mpnt->vm_ops->unmap(mpnt, st, size);
@@ -702,7 +716,6 @@
free_pgtables(mm, prev, addr, addr+len);
- mm->mmap_cache = NULL; /* Kill the cache. */
return 0;
}
@@ -786,8 +799,10 @@
flags = vma->vm_flags;
addr = vma->vm_start;
+ vmlist_modify_lock(mm);
insert_vm_struct(mm, vma);
merge_segments(mm, vma->vm_start, vma->vm_end);
+ vmlist_modify_unlock(mm);
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
@@ -814,7 +829,9 @@
release_segments(mm);
mpnt = mm->mmap;
+ vmlist_modify_lock(mm);
mm->mmap = mm->mmap_avl = mm->mmap_cache = NULL;
+ vmlist_modify_unlock(mm);
mm->rss = 0;
mm->total_vm = 0;
mm->locked_vm = 0;
@@ -910,6 +927,7 @@
prev = mpnt;
mpnt = mpnt->vm_next;
}
+ mm->mmap_cache = NULL; /* Kill the cache. */
/* prev and mpnt cycle through the list, as long as
* start_addr < mpnt->vm_end && prev->vm_start < end_addr
@@ -946,7 +964,9 @@
if (mpnt->vm_ops && mpnt->vm_ops->close) {
mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
mpnt->vm_start = mpnt->vm_end;
+ vmlist_modify_unlock(mm);
mpnt->vm_ops->close(mpnt);
+ vmlist_modify_lock(mm);
}
mm->map_count--;
remove_shared_vm_struct(mpnt);
@@ -955,7 +975,6 @@
kmem_cache_free(vm_area_cachep, mpnt);
mpnt = prev;
}
- mm->mmap_cache = NULL; /* Kill the cache. */
}
void __init vma_init(void)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)