Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/arm32 Trailing whitespace
details: https://anonhg.NetBSD.org/src/rev/6ba3c5cc274d
branches: trunk
changeset: 328659:6ba3c5cc274d
user: skrll <skrll%NetBSD.org@localhost>
date: Sat Apr 12 09:09:47 2014 +0000
description:
Trailing whitespace
diffstat:
sys/arch/arm/arm32/fault.c | 16 ++++----
sys/arch/arm/arm32/pmap.c | 88 +++++++++++++++++++++++-----------------------
2 files changed, 52 insertions(+), 52 deletions(-)
diffs (truncated from 416 to 300 lines):
diff -r baa035541ee7 -r 6ba3c5cc274d sys/arch/arm/arm32/fault.c
--- a/sys/arch/arm/arm32/fault.c Sat Apr 12 08:39:58 2014 +0000
+++ b/sys/arch/arm/arm32/fault.c Sat Apr 12 09:09:47 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: fault.c,v 1.99 2014/04/01 18:00:42 matt Exp $ */
+/* $NetBSD: fault.c,v 1.100 2014/04/12 09:11:47 skrll Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -81,7 +81,7 @@
#include "opt_kgdb.h"
#include <sys/types.h>
-__KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.99 2014/04/01 18:00:42 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.100 2014/04/12 09:11:47 skrll Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -114,7 +114,7 @@
#include <arch/arm/arm/disassem.h>
#include <arm/arm32/machdep.h>
-
+
extern char fusubailout[];
#ifdef DEBUG
@@ -318,7 +318,7 @@
* further down if we have to decode the current instruction.
*/
#ifdef THUMB_CODE
- /*
+ /*
* XXX: It would be nice to be able to support Thumb in the kernel
* at some point.
*/
@@ -416,7 +416,7 @@
if (CPU_IS_ARMV6_P() || CPU_IS_ARMV7_P()) {
ftype = (fsr & FAULT_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
} else if (IS_PERMISSION_FAULT(fsr)) {
- ftype = VM_PROT_WRITE;
+ ftype = VM_PROT_WRITE;
} else {
#ifdef THUMB_CODE
/* Fast track the ARM case. */
@@ -446,11 +446,11 @@
((insn & 0x0e1000b0) == 0x000000b0) || /* STR[HD]*/
((insn & 0x0a100000) == 0x08000000) || /* STM/CDT*/
((insn & 0x0f9000f0) == 0x01800090)) /* STREX[BDH] */
- ftype = VM_PROT_WRITE;
+ ftype = VM_PROT_WRITE;
else if ((insn & 0x0fb00ff0) == 0x01000090)/* SWP */
- ftype = VM_PROT_READ | VM_PROT_WRITE;
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
else
- ftype = VM_PROT_READ;
+ ftype = VM_PROT_READ;
}
}
diff -r baa035541ee7 -r 6ba3c5cc274d sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sat Apr 12 08:39:58 2014 +0000
+++ b/sys/arch/arm/arm32/pmap.c Sat Apr 12 09:09:47 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.285 2014/04/12 08:39:58 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.286 2014/04/12 09:09:47 skrll Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -216,7 +216,7 @@
#include <arm/locore.h>
//#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.285 2014/04/12 08:39:58 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.286 2014/04/12 09:09:47 skrll Exp $");
//#define PMAP_DEBUG
#ifdef PMAP_DEBUG
@@ -252,7 +252,7 @@
#define NPDEBUG(_lev_,_stat_) \
if (pmapdebug & (_lev_)) \
((_stat_))
-
+
#else /* PMAP_DEBUG */
#define NPDEBUG(_lev_,_stat_) /* Nothing */
#endif /* PMAP_DEBUG */
@@ -1196,7 +1196,7 @@
* => caller must call pmap_vac_me_harder() if writable status of a page
* may have changed.
* => we return the old flags
- *
+ *
* Modify a physical-virtual mapping in the pv table
*/
static u_int
@@ -1499,7 +1499,7 @@
* bucket/page table in place.
*
* Note that if a new L2 bucket/page was allocated, the caller *must*
- * increment the bucket occupancy counter appropriately *before*
+ * increment the bucket occupancy counter appropriately *before*
* releasing the pmap's lock to ensure no other thread or cpu deallocates
* the bucket/page in the meantime.
*/
@@ -1763,7 +1763,7 @@
* KR = # of kernel read only pages
* UW = # of user read/write pages
* UR = # of user read only pages
- *
+ *
* KC = kernel mapping is cacheable
* UC = user mapping is cacheable
*
@@ -1832,7 +1832,7 @@
struct pv_entry *pv;
pmap_t last_pmap = pm;
- /*
+ /*
* Pass one, see if there are both kernel and user pmaps for
* this page. Calculate whether there are user-writable or
* kernel-writable pages.
@@ -1845,7 +1845,7 @@
u_entries = md->urw_mappings + md->uro_mappings;
- /*
+ /*
* We know we have just been updating a kernel entry, so if
* all user pages are already cacheable, then there is nothing
* further to do.
@@ -1854,13 +1854,13 @@
return;
if (u_entries) {
- /*
+ /*
* Scan over the list again, for each entry, if it
* might not be set correctly, call pmap_vac_me_user
* to recalculate the settings.
*/
SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
- /*
+ /*
* We know kernel mappings will get set
* correctly in other calls. We also know
* that if the pmap is the same as last_pmap
@@ -1869,26 +1869,26 @@
if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
continue;
- /*
+ /*
* If there are kernel entries and this page
* is writable but non-cacheable, then we can
- * skip this entry also.
+ * skip this entry also.
*/
if (md->k_mappings &&
(pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
(PVF_NC | PVF_WRITE))
continue;
- /*
- * Similarly if there are no kernel-writable
- * entries and the page is already
+ /*
+ * Similarly if there are no kernel-writable
+ * entries and the page is already
* read-only/cacheable.
*/
if (md->krw_mappings == 0 &&
(pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
continue;
- /*
+ /*
* For some of the remaining cases, we know
* that we must recalculate, but for others we
* can't tell if they are correct or not, so
@@ -2090,7 +2090,7 @@
}
md->pvh_attrs &= ~PVF_WRITE;
/*
- * No KMPAGE and we exited early, so we must have
+ * No KMPAGE and we exited early, so we must have
* multiple color mappings.
*/
if (!bad_alias && pv != NULL)
@@ -2412,10 +2412,10 @@
if (maskbits & (PVF_WRITE|PVF_MOD)) {
#ifdef PMAP_CACHE_VIVT
if ((oflags & PVF_NC)) {
- /*
+ /*
* Entry is not cacheable:
*
- * Don't turn caching on again if this is a
+ * Don't turn caching on again if this is a
* modified emulation. This would be
* inconsitent with the settings created by
* pmap_vac_me_harder(). Otherwise, it's safe
@@ -2431,7 +2431,7 @@
}
} else
if (l2pte_writable_p(opte)) {
- /*
+ /*
* Entry is writable/cacheable: check if pmap
* is current if it is flush it, otherwise it
* won't be in the cache
@@ -2584,7 +2584,7 @@
if (pmap_is_current(pv->pv_pmap)) {
flags |= pv->pv_flags;
/*
- * The page is mapped non-cacheable in
+ * The page is mapped non-cacheable in
* this map. No need to flush the cache.
*/
if (pv->pv_flags & PVF_NC) {
@@ -2775,9 +2775,9 @@
* bus_dma will ignore uncached pages.
*/
if (scache_line_size != 0) {
- cpu_dcache_wb_range(dstp, PAGE_SIZE);
+ cpu_dcache_wb_range(dstp, PAGE_SIZE);
if (wbinv_p) {
- cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE);
+ cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE);
cpu_dcache_inv_range(dstp, PAGE_SIZE);
} else {
cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE);
@@ -2979,7 +2979,7 @@
/*
* pmap_t pmap_create(void)
- *
+ *
* Create a new pmap structure from scratch.
*/
pmap_t
@@ -3030,7 +3030,7 @@
/*
* int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
* u_int flags)
- *
+ *
* Insert the given physical page (p) at
* the specified virtual address (v) in the
* target physical map with the protection requested.
@@ -3145,7 +3145,7 @@
}
#ifdef ARM_MMU_EXTENDED
- /*
+ /*
* If the page has been cleaned, then the pvh_attrs
* will have PVF_EXEC set, so mark it execute so we
* don't get an access fault when trying to execute
@@ -3299,7 +3299,7 @@
if (opte == 0) {
l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE;
pm->pm_stats.resident_count++;
- }
+ }
UVMHIST_LOG(maphist, " opte %#x npte %#x", opte, npte, 0, 0);
@@ -3667,7 +3667,7 @@
: NULL,
kpm->pm_l2[L2_IDX(l1slot)]
? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva
- : NULL);
+ : NULL);
KASSERT(l2b->l2b_kva != NULL);
pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
@@ -4200,7 +4200,7 @@
/*
* Check the PTE itself.
- */
+ */
pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
const pt_entry_t opte = *ptep;
if ((opte & L2_S_PROT_U) == 0 || (opte & L2_XS_XN) == 0)
@@ -4355,7 +4355,7 @@
#endif
pmap_release_page_lock(md);
- /*
+ /*
* Re-enable write permissions for the page. No need to call
* pmap_vac_me_harder(), since this is just a
* modified-emulation fault, and the PVF_WRITE bit isn't
@@ -4815,7 +4815,7 @@
cpu_setttb(npm->pm_l1_pa, pai->pai_asid);
/*
* Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 have
Home |
Main Index |
Thread Index |
Old Index