patch-2.1.43 linux/mm/page_alloc.c
Next file: linux/mm/page_io.c
Previous file: linux/mm/mremap.c
Back to the patch index
Back to the overall index
- Lines: 50
- Date:
Mon Jun 16 08:46:24 1997
- Orig file:
v2.1.42/linux/mm/page_alloc.c
- Orig date:
Wed May 28 10:51:33 1997
diff -u --recursive --new-file v2.1.42/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -97,7 +97,9 @@
*
* Hint: -mask = 1+~mask
*/
+#ifdef __SMP__
static spinlock_t page_alloc_lock;
+#endif
static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
{
@@ -131,9 +133,8 @@
void __free_page(struct page *page)
{
if (!PageReserved(page) && atomic_dec_and_test(&page->count)) {
- unsigned long map_nr = page->map_nr;
- delete_from_swap_cache(map_nr);
- free_pages_ok(map_nr, 0);
+ delete_from_swap_cache(page);
+ free_pages_ok(page->map_nr, 0);
}
}
@@ -146,7 +147,7 @@
if (PageReserved(map))
return;
if (atomic_dec_and_test(&map->count)) {
- delete_from_swap_cache(map_nr);
+ delete_from_swap_cache(map);
free_pages_ok(map_nr, order);
return;
}
@@ -278,8 +279,7 @@
min_free_pages = i;
free_pages_low = i + (i>>1);
free_pages_high = i + i;
- start_mem = init_swap_cache(start_mem, end_mem);
- mem_map = (mem_map_t *) start_mem;
+ mem_map = (mem_map_t *) LONG_ALIGN(start_mem);
p = mem_map + MAP_NR(end_mem);
start_mem = LONG_ALIGN((unsigned long) p);
memset(mem_map, 0, start_mem - (unsigned long) mem_map);
@@ -334,7 +334,7 @@
}
vma->vm_mm->rss++;
tsk->maj_flt++;
- if (!write_access && add_to_swap_cache(MAP_NR(page), entry)) {
+ if (!write_access && add_to_swap_cache(&mem_map[MAP_NR(page)], entry)) {
/* keep swap page allocated for the moment (swap cache) */
set_pte(page_table, mk_pte(page, vma->vm_page_prot));
return;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov