summaryrefslogtreecommitdiffstats
path: root/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgalloc.h
blob: de3916762af534cf1dbfdb0c9df9a2f6e99de57e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
/*
 * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
 */

#ifndef M68K_CF_PGALLOC_H
#define M68K_CF_PGALLOC_H
#include <linux/highmem.h>
#include <asm/coldfire.h>
#include <asm/page.h>
#include <asm/cf_tlbflush.h>
#include <asm/cf_cacheflush.h>

extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
	free_page((unsigned long) pte);
}

extern const char bad_pmd_string[];

extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
	unsigned long address)
{
	unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);

	if (!page)
		return NULL;

	memset((void *)page, 0, PAGE_SIZE);
	return (pte_t *) (page);
}

extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
{
	return (pmd_t *) pgd;
}

#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address)      ({ BUG(); ((pmd_t *)2); })

#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)

#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
	(unsigned long)(page_address(page)))

#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))

#define pmd_pgtable(pmd) pmd_page(pmd)

static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page, unsigned long address)
{
	__free_page(page);
}

#define __pmd_free_tlb(tlb, pmd, addr) do { } while (0)

static inline struct page *pte_alloc_one(struct mm_struct *mm,
	unsigned long address)
{
	struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
	pte_t *pte;

	if (!page)
		return NULL;

	pte = kmap(page);
//	if (pte) {
		clear_page(pte);
		__flush_page_to_ram(pte);
		flush_tlb_kernel_page(pte);
		nocache_page(pte);
//	}
	kunmap(page);

	return page;
}

extern inline void pte_free(struct mm_struct *mm, struct page *page)
{
	__free_page(page);
}

/*
 * In our implementation, each pgd entry contains 1 pmd that is never allocated
 * or freed.  pgd_present is always 1, so this should never be called. -NL
 */
#define pmd_free(mm, pmd) BUG()

extern inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	free_page((unsigned long) pgd);
}

extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
     pgd_t *new_pgd;

     new_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_NOWARN);
     if (!new_pgd)
	     return NULL;
     memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
     memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
     return new_pgd;
}

#define pgd_populate(mm, pmd, pte) BUG()

#endif /* M68K_CF_PGALLOC_H */