diff options
author | venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com> | 2009-01-09 16:13:11 -0800 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-13 19:13:01 +0100 |
commit | e4b866ed197cef9989348e0479fed8d864ea465b (patch) | |
tree | 0420e59a2312f6d1156ec85e6895cf6f322e0c6f | |
parent | afc7d20c8429f32f19d47367fdc36eeed2334ec3 (diff) | |
download | kernel-common-e4b866ed197cef9989348e0479fed8d864ea465b.tar.gz kernel-common-e4b866ed197cef9989348e0479fed8d864ea465b.tar.bz2 kernel-common-e4b866ed197cef9989348e0479fed8d864ea465b.zip |
x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param
Impact: cleanup
Change the protection parameter for track_pfn_vma_new() into a pgprot_t pointer.
Subsequent patch changes the x86 PAT handling to return a compatible
memtype in pgprot_t, if what was requested cannot be allowed due to conflicts.
No fuctionality change in this patch.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/mm/pat.c | 6 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 4 | ||||
-rw-r--r-- | mm/memory.c | 7 |
3 files changed, 9 insertions, 8 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 85cbd3cd3723..f88ac80530c0 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -741,7 +741,7 @@ cleanup_ret: * Note that this function can be called with caller trying to map only a * subrange/page inside the vma. */ -int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { int retval = 0; @@ -758,14 +758,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; - return reserve_pfn_range(paddr, vma_size, prot); + return reserve_pfn_range(paddr, vma_size, *prot); } /* reserve page by page using pfn and size */ base_paddr = (resource_size_t)pfn << PAGE_SHIFT; for (i = 0; i < size; i += PAGE_SIZE) { paddr = base_paddr + i; - retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); + retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot); if (retval) goto cleanup_ret; } diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 72ebe91005a8..8e6d0ca70aba 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, * track_pfn_vma_new is called when a _new_ pfn mapping is being established * for physical range indicated by pfn and size. */ -static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { return 0; @@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma, { } #else -extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size); extern int track_pfn_vma_copy(struct vm_area_struct *vma); extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, diff --git a/mm/memory.c b/mm/memory.c index d3ee2ea5615c..22bfa7a47a0b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int ret; + pgprot_t pgprot = vma->vm_page_prot; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like @@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; - if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) + if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE)) return -EINVAL; - ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); + ret = insert_pfn(vma, addr, pfn, pgprot); if (ret) untrack_pfn_vma(vma, pfn, PAGE_SIZE); @@ -1671,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; - err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); + err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); if (err) { /* * To indicate that track_pfn related cleanup is not |