diff options
Diffstat (limited to 'static/netbsd/man9/pmap.9')
| -rw-r--r-- | static/netbsd/man9/pmap.9 | 1243 |
1 files changed, 1243 insertions, 0 deletions
diff --git a/static/netbsd/man9/pmap.9 b/static/netbsd/man9/pmap.9 new file mode 100644 index 00000000..b8d87a43 --- /dev/null +++ b/static/netbsd/man9/pmap.9 @@ -0,0 +1,1243 @@ +.\" $NetBSD: pmap.9,v 1.48 2020/08/16 16:48:08 thorpej Exp $ +.\" +.\" Copyright (c) 2000, 2001, 2002, 2020 The NetBSD Foundation, Inc. +.\" All rights reserved. +.\" +.\" This code is derived from software contributed to The NetBSD Foundation +.\" by Jason R. Thorpe. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS +.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS +.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +.\" POSSIBILITY OF SUCH DAMAGE. +.\" +.Dd August 16, 2020 +.Dt PMAP 9 +.Os +.Sh NAME +.Nm pmap +.Nd machine-dependent portion of the virtual memory system +.Sh SYNOPSIS +.In sys/param.h +.In uvm/uvm_extern.h +.Ft void +.Fn "pmap_init" "void" +.Ft void +.Fn "pmap_virtual_space" "vaddr_t *vstartp" "vaddr_t *vendp" +.Ft vaddr_t +.Fn "pmap_steal_memory" "vsize_t size" "vaddr_t *vstartp" "vaddr_t *vendp" +.Ft pmap_t +.Fn "pmap_kernel" "void" +.Ft pmap_t +.Fn "pmap_create" "void" +.Ft void +.Fn "pmap_destroy" "pmap_t pmap" +.Ft void +.Fn "pmap_reference" "pmap_t pmap" +.Ft void +.Fn "pmap_fork" "pmap_t src_map" "pmap_t dst_map" +.Ft long +.Fn "pmap_resident_count" "pmap_t pmap" +.Ft long +.Fn "pmap_wired_count" "pmap_t pmap" +.Ft vaddr_t +.Fn "pmap_growkernel" "vaddr_t maxkvaddr" +.Ft int +.Fn "pmap_enter" "pmap_t pmap" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" \ + "u_int flags" +.Ft void +.Fn "pmap_remove" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" +.Ft bool +.Fn "pmap_remove_all" "pmap_t pmap" +.Ft void +.Fn "pmap_protect" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" "vm_prot_t prot" +.Ft void +.Fn "pmap_unwire" "pmap_t pmap" "vaddr_t va" +.Ft bool +.Fn "pmap_extract" "pmap_t pmap" "vaddr_t va" "paddr_t *pap" +.Ft void +.Fn "pmap_kenter_pa" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" "u_int flags" +.Ft void +.Fn "pmap_kremove" "vaddr_t va" "vsize_t size" +.Ft void +.Fn "pmap_copy" "pmap_t dst_map" "pmap_t src_map" "vaddr_t dst_addr" \ + "vsize_t len" "vaddr_t src_addr" +.Ft void +.Fn "pmap_update" "pmap_t pmap" +.Ft void +.Fn "pmap_activate" "struct lwp *l" +.Ft void +.Fn "pmap_deactivate" "struct lwp *l" +.Ft void +.Fn "pmap_zero_page" "paddr_t pa" +.Ft void +.Fn "pmap_copy_page" "paddr_t src" "paddr_t dst" +.Ft void +.Fn "pmap_page_protect" "struct vm_page *pg" "vm_prot_t prot" +.Ft bool +.Fn "pmap_clear_modify" "struct vm_page *pg" +.Ft bool +.Fn "pmap_clear_reference" "struct vm_page *pg" +.Ft bool +.Fn "pmap_is_modified" "struct vm_page *pg" +.Ft bool +.Fn "pmap_is_referenced" "struct vm_page *pg" +.Ft paddr_t +.Fn "pmap_phys_address" "paddr_t cookie" +.Ft vaddr_t +.Fn "PMAP_MAP_POOLPAGE" "paddr_t pa" +.Ft paddr_t +.Fn "PMAP_UNMAP_POOLPAGE" "vaddr_t va" +.Ft void +.Fn "PMAP_PREFER" "vaddr_t hint" "vaddr_t *vap" "vsize_t sz" "int td" +.Sh DESCRIPTION +The +.Nm +module is the machine-dependent portion of the +.Nx +virtual memory system +.Xr uvm 9 . +The purpose of the +.Nm +module is to manage physical address maps, to program the +memory management hardware on the system, and perform any +cache operations necessary to ensure correct operation of +the virtual memory system. +The +.Nm +module is also responsible for maintaining certain information +required by +.Xr uvm 9 . +.Pp +In order to cope with hardware architectures that make the +invalidation of virtual address mappings expensive (e.g., +TLB invalidations, TLB shootdown operations for multiple +processors), the +.Nm +module is allowed to delay mapping invalidation or protection +operations until such time as they are actually necessary. +The functions that are allowed to delay such actions are +.Fn pmap_enter , +.Fn pmap_remove , +.Fn pmap_protect , +.Fn pmap_kenter_pa , +and +.Fn pmap_kremove . +Callers of these functions must use the +.Fn pmap_update +function to notify the +.Nm +module that the mappings need to be made correct. +Since the +.Nm +module is provided with information as to which processors are +using a given physical map, the +.Nm +module may use whatever optimizations it has available to reduce +the expense of virtual-to-physical mapping synchronization. +.Ss HEADER FILES AND DATA STRUCTURES +Machine-dependent code must provide the header file +.In machine/pmap.h . +This file contains the definition of the +.Dv pmap +structure: +.Bd -literal -offset indent +struct pmap { + /* Contents defined by pmap implementation. */ +}; +typedef struct pmap *pmap_t; +.Ed +.Pp +This header file may also define other data structures that the +.Nm +implementation uses. +.Pp +Note that all prototypes for +.Nm +interface functions are provided by the header file +.In uvm/uvm_pmap.h . +It is possible to override this behavior by defining the +C pre-processor macro +.Dv PMAP_EXCLUDE_DECLS . +This may be used to add a layer of indirection to +.Nm +API calls, for handling different MMU types in a single +.Nm +module, for example. +If the +.Dv PMAP_EXCLUDE_DECLS +macro is defined, +.In machine/pmap.h +.Em must +provide function prototypes in a block like so: +.Bd -literal -offset indent +#ifdef _KERNEL /* not exposed to user namespace */ +__BEGIN_DECLS /* make safe for C++ */ +/* Prototypes go here. */ +__END_DECLS +#endif /* _KERNEL */ +.Ed +.Pp +The header file +.In uvm/uvm_pmap.h +defines a structure for tracking +.Nm +statistics (see below). +This structure is defined as: +.Bd -literal -offset indent +struct pmap_statistics { + long resident_count; /* number of mapped pages */ + long wired_count; /* number of wired pages */ +}; +.Ed +.Ss WIRED MAPPINGS +The +.Nm +module is based on the premise that all information contained +in the physical maps it manages is redundant. +That is, physical map information may be +.Dq forgotten +by the +.Nm +module in the event that it is necessary to do so; it can be rebuilt +by +.Xr uvm 9 +by taking a page fault. +There is one exception to this rule: so-called +.Dq wired +mappings may not be forgotten. +Wired mappings are those for which either no high-level information +exists with which to rebuild the mapping, or mappings which are needed +by critical sections of code where taking a page fault is unacceptable. +Information about which mappings are wired is provided to the +.Nm +module when a mapping is established. +.Ss MODIFIED/REFERENCED INFORMATION +The +.Nm +module is required to keep track of whether or not a page managed +by the virtual memory system has been referenced or modified. +This information is used by +.Xr uvm 9 +to determine what happens to the page when scanned by the +pagedaemon. +.Pp +Many CPUs provide hardware support for tracking +modified/referenced information. +However, many CPUs, particularly modern RISC CPUs, do not. +On CPUs which lack hardware support for modified/referenced tracking, the +.Nm +module must emulate it in software. +There are several strategies for doing this, and the best strategy +depends on the CPU. +.Pp +The +.Dq referenced +attribute is used by the pagedaemon to determine if a page is +.Dq active . +Active pages are not candidates for re-use in the page replacement algorithm. +Accurate referenced information is not required for correct operation; if +supplying referenced information for a page is not feasible, then the +.Nm +implementation should always consider the +.Dq referenced +attribute to be +.Dv false . +.Pp +The +.Dq modified +attribute is used by the pagedaemon to determine if a page needs +to be cleaned (written to backing store; swap space, a regular file, etc.). +Accurate modified information +.Em must +be provided by the +.Nm +module for correct operation of the virtual memory system. +.Pp +Note that modified/referenced information is only tracked for +pages managed by the virtual memory system (i.e., pages for +which a vm_page structure exists). +In addition, only +.Dq managed +mappings of those pages have modified/referenced tracking. +Mappings entered with the +.Fn pmap_enter +function are +.Dq managed +mappings. +It is possible for +.Dq unmanaged +mappings of a page to be created, using the +.Fn pmap_kenter_pa +function. +The use of +.Dq unmanaged +mappings should be limited to code which may execute in interrupt context +(for example, the kernel memory allocator), or to enter mappings for +physical addresses which are not managed by the virtual memory system. +.Dq Unmanaged +mappings may only be entered into the kernel's virtual address space. +This constraint is placed on the callers of the +.Fn pmap_kenter_pa +and +.Fn pmap_kremove +functions so that the +.Nm +implementation need not block interrupts when manipulating data +structures or holding locks. +.Pp +Also note that the modified/referenced information must be tracked +on a per-page basis; they are not attributes of a mapping, but attributes +of a page. +Therefore, even after all mappings for a given page have +been removed, the modified/referenced information for that page +.Em must +be preserved. +The only time the modified/referenced attributes may +be cleared is when the virtual memory system explicitly calls the +.Fn pmap_clear_modify +and +.Fn pmap_clear_reference +functions. +These functions must also change any internal state necessary to detect +the page being modified or referenced again after the modified or +referenced state is cleared. +(Prior to +.Nx 1.6 , +.Nm +implementations could get away without this because UVM (and Mach VM +before that) always called +.Fn pmap_page_protect +before clearing the modified or referenced state, but UVM has been changed +to not do this anymore, so all +.Nm +implementations must now handle this.) +.Ss STATISTICS +The +.Nm +is required to keep statistics as to the number of +.Dq resident +pages and the number of +.Dq wired +pages. +.Pp +A +.Dq resident +page is one for which a mapping exists. +This statistic is used to compute the resident size of a process and +enforce resource limits. +Only pages (whether managed by the virtual memory system or not) +which are mapped into a physical map should be counted in the resident +count. +.Pp +A +.Dq wired +page is one for which a wired mapping exists. +This statistic is used to enforce resource limits. +.Pp +Note that it is recommended (though not required) that the +.Nm +implementation use the +.Dv pmap_statistics +structure in the tracking of +.Nm +statistics by placing it inside the +.Dv pmap +structure and adjusting the counts when mappings are established, changed, +or removed. +This avoids potentially expensive data structure traversals when the +statistics are queried. +.Ss REQUIRED FUNCTIONS +This section describes functions that a +.Nm +module must provide to the virtual memory system. +.Bl -tag -width indent -offset indent +.It void Fn "pmap_init" "void" +This function initializes the +.Nm +module. +It is called by +.Fn uvm_init +to initialize any data structures that the module needs to +manage physical maps. +.It pmap_t Fn "pmap_kernel" "void" +A machine independent macro which expands to +.Va kernel_pmap_ptr . +This variable must be exported by the platform's pmap module and it +must point to the kernel pmap. +.It void Fn "pmap_virtual_space" "vaddr_t *vstartp" "vaddr_t *vendp" +The +.Fn pmap_virtual_space +function is called to determine the initial kernel virtual address +space beginning and end. +These values are used to create the kernel's virtual memory map. +The function must set +.Fa *vstartp +to the first kernel virtual address that will be managed by +.Xr uvm 9 , +and must set +.Fa *vendp +to the last kernel virtual address that will be managed by +.Xr uvm 9 . +.Pp +If the +.Fn pmap_growkernel +feature is used by a +.Nm +implementation, then +.Fa *vendp +should be set to the maximum kernel virtual address allowed by the +implementation. +If +.Fn pmap_growkernel +is not used, then +.Fa *vendp +.Em must +be set to the maximum kernel virtual address that can be mapped with +the resources currently allocated to map the kernel virtual address +space. +.It pmap_t Fn "pmap_create" "void" +Create a physical map and return it to the caller. +The reference count on the new map is 1. +.It void Fn "pmap_destroy" "pmap_t pmap" +Drop the reference count on the specified physical map. +If the reference count drops to 0, all resources associated with the +physical map are released and the physical map destroyed. +In the case of a drop-to-0, no mappings will exist in the map. +The +.Nm +implementation may assert this. +.It void Fn "pmap_reference" "pmap_t pmap" +Increment the reference count on the specified physical map. +.It long Fn "pmap_resident_count" "pmap_t pmap" +Query the +.Dq resident pages +statistic for +.Fa pmap . +.Pp +Note that this function may be provided as a C pre-processor macro. +.It long Fn "pmap_wired_count" "pmap_t pmap" +Query the +.Dq wired pages +statistic for +.Fa pmap . +.Pp +Note that this function may be provided as a C pre-processor macro. +.It int Fn "pmap_enter" "pmap_t pmap" "vaddr_t va" "paddr_t pa" \ + "vm_prot_t prot" "u_int flags" +Create a mapping in physical map +.Fa pmap +for the physical address +.Fa pa +at the virtual address +.Fa va +with protection specified by bits in +.Fa prot : +.Bl -tag -width "VM_PROT_EXECUTE " -offset indent +.It VM_PROT_READ +The mapping must allow reading. +.It VM_PROT_WRITE +The mapping must allow writing. +.It VM_PROT_EXECUTE +The page mapped contains instructions that will be executed by the +processor. +.El +.Pp +The +.Fa flags +argument contains protection bits (the same bits as used in the +.Fa prot +argument) indicating the type of access that caused the mapping to +be created. +This information may be used to seed modified/referenced +information for the page being mapped, possibly avoiding redundant faults +on platforms that track modified/referenced information in software. +Other information provided by +.Fa flags : +.Bl -tag -width "PMAP_CANFAIL " -offset indent +.It PMAP_WIRED +The mapping being created is a wired mapping. +.It PMAP_CANFAIL +The call to +.Fn pmap_enter +is allowed to fail. +If this flag is +.Em not +set, and the +.Fn pmap_enter +call is unable to create the mapping, perhaps due to insufficient +resources, the +.Nm +module must panic. +.It PMAP_NOCACHE +The mapping being created is +.Em not +cached. +Write accesses have a write-through policy. +No speculative memory accesses. +.It PMAP_WRITE_COMBINE +The mapping being created is +.Em not +cached. +Writes are combined and done in one burst. +Speculative read accesses may be allowed. +.It PMAP_WRITE_BACK +All accesses to the created mapping are cached. +On reads, cachelines become shared or exclusive if allocated on cache miss. +On writes, cachelines become modified on a cache miss. +.It PMAP_NOCACHE_OVR +Same as PMAP_NOCACHE but mapping is overrideable (e.g. on x86 by MTRRs). +.El +.Pp +The access type provided in the +.Fa flags +argument will never exceed the protection specified by +.Fa prot . +The +.Nm +implementation may assert this. +Note that on systems that do not provide hardware support for +tracking modified/referenced information, modified/referenced +information for the page +.Em must +be seeded with the access type provided in +.Fa flags +if the +.Dv PMAP_WIRED +flag is set. +This is to prevent a fault for the purpose of tracking +modified/referenced information from occurring while the system is in +a critical section where a fault would be unacceptable. +.Pp +Note that +.Fn pmap_enter +is sometimes called to enter a mapping at a virtual address +for which a mapping already exists. +In this situation, the implementation must take whatever action is +necessary to invalidate the previous mapping before entering the new one. +.Pp +Also note that +.Fn pmap_enter +is sometimes called to change the protection for a pre-existing +mapping, or to change the +.Dq wired +attribute for a pre-existing mapping. +.Pp +The +.Fn pmap_enter +function returns 0 on success or an error code indicating the mode +of failure. +.It void Fn "pmap_remove" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" +Remove mappings from the virtual address range +.Fa sva +to +.Fa eva +from the specified physical map. +.It bool Fn "pmap_remove_all" "pmap_t pmap" +This function is a hint to the +.Nm pmap +implementation that all entries in +.Fa pmap +will be removed before any more entries are entered. +Following this call, there will be +.Fn pmap_remove +calls resulting in every mapping being removed, followed by either +.Fn pmap_destroy +or +.Fn pmap_update . +No other +.Nm pmap +interfaces which take +.Fa pmap +as an argument will be called during this process. +Other interfaces which might need to access +.Fa pmap +(such as +.Fn pmap_page_protect ) +are permitted during this process. +.Pp +The +.Nm pmap +implementation is free to either remove all the +.Nm pmap Ns 's +mappings immediately in +.Fn pmap_remove_all , +or to use the knowledge of the upcoming +.Fn pmap_remove +calls to optimize the removals (or to just ignore this call). +.Pp +If all mappings in the address space have been removed, +.Fn pmap_remove_all +should return +.Dv true +to indicate that that the pmap is now empty. +In this case UVM will skip all subsequent calls to +.Fn pmap_remove +and +.Fn pmap_update +for the pmap, that would otherwise be required to clean it out. +If any mappings could possibly remain, +.Fn pmap_remove_all +must return +.Dv false . +.It void Fn "pmap_protect" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" \ + "vm_prot_t prot" +Set the protection of the mappings in the virtual address range +.Fa sva +to +.Fa eva +in the specified physical map. +.It void Fn "pmap_unwire" "pmap_t pmap" "vaddr_t va" +Clear the +.Dq wired +attribute on the mapping for virtual address +.Fa va . +.It bool Fn "pmap_extract" "pmap_t pmap" "vaddr_t va" "paddr_t *pap" +This function extracts a mapping from the specified physical map. +It serves two purposes: to determine if a mapping exists for the specified +virtual address, and to determine what physical address is mapped at the +specified virtual address. +The +.Fn pmap_extract +should return the physical address for any kernel-accessible address, +including KSEG-style direct-mapped kernel addresses. +.Pp +The +.Fn pmap_extract +function returns +.Dv false +if a mapping for +.Fa va +does not exist. +Otherwise, it returns +.Dv true +and places the physical address mapped at +.Fa va +into +.Fa *pap +if the +.Fa pap +argument is non-NULL. +.It void Fn "pmap_kenter_pa" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" \ + "u_int flags" +Enter an +.Dq unmanaged +mapping for physical address +.Fa pa +at virtual address +.Fa va +with protection specified by bits in +.Fa prot : +.Bl -tag -width "VM_PROT_EXECUTE " -offset indent +.It VM_PROT_READ +The mapping must allow reading. +.It VM_PROT_WRITE +The mapping must allow writing. +.It VM_PROT_EXECUTE +The page mapped contains instructions that will be executed by the +processor. +.El +.Pp +Information provided by +.Fa flags : +.Bl -tag -width "PMAP_NOCACHE " -offset indent +.It PMAP_NOCACHE +The mapping being created is +.Em not +cached. +Write accesses have a write-through policy. +No speculative memory accesses. +.It PMAP_WRITE_COMBINE +The mapping being created is +.Em not +cached. +Writes are combined and done in one burst. +Speculative read accesses may be allowed. +.It PMAP_WRITE_BACK +All accesses to the created mapping are cached. +On reads, cachelines become shared or exclusive if allocated on cache miss. +On writes, cachelines become modified on a cache miss. +.It PMAP_NOCACHE_OVR +Same as PMAP_NOCACHE but mapping is overrideable (e.g. on x86 by MTRRs). +.El +.Pp +Mappings of this type are always +.Dq wired , +and are unaffected by routines that alter the protection of pages +(such as +.Fn pmap_page_protect ) . +Such mappings are also not included in the gathering of modified/referenced +information about a page. +Mappings entered with +.Fn pmap_kenter_pa +by machine-independent code +.Em must not +have execute permission, as the +data structures required to track execute permission of a page may not +be available to +.Fn pmap_kenter_pa . +Machine-independent code is not allowed to enter a mapping with +.Fn pmap_kenter_pa +at a virtual address for which a valid mapping already exists. +Mappings created with +.Fn pmap_kenter_pa +may be removed only with a call to +.Fn pmap_kremove . +.Pp +Note that +.Fn pmap_kenter_pa +must be safe for use in interrupt context. +.Fn splvm +blocks interrupts that might cause +.Fn pmap_kenter_pa +to be called. +.It void Fn "pmap_kremove" "vaddr_t va" "vsize_t size" +Remove all mappings starting at virtual address +.Fa va +for +.Fa size +bytes from the kernel physical map. +All mappings that are removed must be the +.Dq unmanaged +type created with +.Fn pmap_kenter_pa . +The implementation may assert this. +.It void Fn "pmap_copy" "pmap_t dst_map" "pmap_t src_map" "vaddr_t dst_addr" \ + "vsize_t len" "vaddr_t src_addr" +This function copies the mappings starting at +.Fa src_addr +in +.Fa src_map +for +.Fa len +bytes into +.Fa dst_map +starting at +.Fa dst_addr . +.Pp +Note that while this function is required to be provided by a +.Nm +implementation, it is not actually required to do anything. +.Fn pmap_copy +is merely advisory (it is used in the +.Xr fork 2 +path to +.Dq pre-fault +the child's address space). +.It void Fn "pmap_update" "pmap_t pmap" +This function is used to inform the +.Nm +module that all physical mappings, for the specified pmap, must now be +correct. +That is, all delayed virtual-to-physical mappings updates (such as TLB +invalidation or address space identifier updates) must be completed. +This routine must be used after calls to +.Fn pmap_enter , +.Fn pmap_remove , +.Fn pmap_protect , +.Fn pmap_kenter_pa , +and +.Fn pmap_kremove +in order to ensure correct operation of the virtual memory system. +.Pp +If a +.Nm +implementation does not delay virtual-to-physical mapping updates, +.Fn pmap_update +has no operation. +In this case, the call may be deleted using a C pre-processor macro in +.In machine/pmap.h . +.It void Fn "pmap_activate" "struct lwp *l" +Activate the physical map used by the process behind lwp +.Fa l . +on the current CPU. +This is called by the virtual memory system when the +virtual memory context for a process is changed, and is also +used by the context switch code to program the memory management hardware +with the process's page table base, etc. +All calls to +.Fn pmap_activate +from machine-independent code are made with preemption disabled and with +.Fa l +as the current lwp. +.Pp +The +.Fn pmap_activate +call, like +.Fn pmap_deactivate , +must never block, as it is used for context switching. +.It void Fn "pmap_deactivate" "struct lwp *l" +Deactivate the physical map used by the process behind lwp +.Fa l . +It is generally used in conjunction with +.Fn pmap_activate . +Like +.Fn pmap_activate , +.Fn pmap_deactivate +is called by machine-independent code with preemption disabled and with +.Fa l +as the current lwp. +.Pp +As above, +.Fn pmap_deactivate +must never block. +.It void Fn "pmap_zero_page" "paddr_t pa" +Zero the PAGE_SIZE sized region starting at physical address +.Fa pa . +The +.Nm +implementation must take whatever steps are necessary to map the +page to a kernel-accessible address and zero the page. +It is suggested that implementations use an optimized zeroing algorithm, +as the performance of this function directly impacts page fault performance. +The implementation may assume that the region is +PAGE_SIZE aligned and exactly PAGE_SIZE bytes in length. +.Pp +Note that the cache configuration of the platform should also be +considered in the implementation of +.Fn pmap_zero_page . +For example, on systems with a physically-addressed cache, the cache +load caused by zeroing the page will not be wasted, as the zeroing is +usually done on-demand. +However, on systems with a virtually-addressed cached, the cache load +caused by zeroing the page +.Em will +be wasted, as the page will be mapped at a virtual address which is +different from that used to zero the page. +In the virtually-addressed cache case, care should also be taken to +avoid cache alias problems. +.It void Fn "pmap_copy_page" "paddr_t src" "paddr_t dst" +Copy the PAGE_SIZE sized region starting at physical address +.Fa src +to the same sized region starting at physical address +.Fa dst . +The +.Nm +implementation must take whatever steps are necessary to map the +source and destination pages to a kernel-accessible address and +perform the copy. +It is suggested that implementations use an optimized copy algorithm, +as the performance of this function directly impacts page fault performance. +The implementation may assume that both regions are PAGE_SIZE aligned +and exactly PAGE_SIZE bytes in length. +.Pp +The same cache considerations that apply to +.Fn pmap_zero_page +apply to +.Fn pmap_copy_page . +.It void Fn "pmap_page_protect" "struct vm_page *pg" "vm_prot_t prot" +Lower the permissions for all mappings of the page +.Fa pg +to +.Fa prot . +This function is used by the virtual memory system to implement +copy-on-write (called with VM_PROT_READ set in +.Fa prot ) +and to revoke all mappings when cleaning a page (called with +no bits set in +.Fa prot ) . +Access permissions must never be added to a page as a result of +this call. +.It bool Fn "pmap_clear_modify" "struct vm_page *pg" +Clear the +.Dq modified +attribute on the page +.Fa pg . +.Pp +The +.Fn pmap_clear_modify +function returns +.Dv true +or +.Dv false +indicating whether or not the +.Dq modified +attribute was set on the page before it was cleared. +.Pp +Note that this function may be provided as a C pre-processor macro. +.It bool Fn "pmap_clear_reference" "struct vm_page *pg" +Clear the +.Dq referenced +attribute on the page +.Fa pg . +.Pp +The +.Fn pmap_clear_reference +function returns +.Dv true +or +.Dv false +indicating whether or not the +.Dq referenced +attribute was set on the page before it was cleared. +.Pp +Note that this function may be provided as a C pre-processor macro. +.It bool Fn "pmap_is_modified" "struct vm_page *pg" +Test whether or not the +.Dq modified +attribute is set on page +.Fa pg . +.Pp +Note that this function may be provided as a C pre-processor macro. +.It bool Fn "pmap_is_referenced" "struct vm_page *pg" +Test whether or not the +.Dq referenced +attribute is set on page +.Fa pg . +.Pp +Note that this function may be provided as a C pre-processor macro. +.It paddr_t Fn "pmap_phys_address" "paddr_t cookie" +Convert a cookie returned by a device +.Fn mmap +function into a physical address. +This function is provided to accommodate systems which have physical +address spaces larger than can be directly addressed by the platform's +.Fa paddr_t +type. +The existence of this function is highly dubious, and it is +expected that this function will be removed from the +.Nm pmap +API in a future release of +.Nx . +.Pp +Note that this function may be provided as a C pre-processor macro. +.El +.Ss OPTIONAL FUNCTIONS +This section describes several optional functions in the +.Nm +API. +.Bl -tag -width indent -offset indent +.It vaddr_t Fn "pmap_steal_memory" "vsize_t size" "vaddr_t *vstartp" \ + "vaddr_t *vendp" +This function is a bootstrap memory allocator, which may be provided +as an alternative to the bootstrap memory allocator used within +.Xr uvm 9 +itself. +It is particularly useful on systems which provide for example a direct-mapped +memory segment. +This function works by stealing pages from the (to be) managed memory +pool, which has already been provided to +.Xr uvm 9 +in the vm_physmem[] array. +The pages are then mapped, or otherwise made accessible to the kernel, +in a machine-dependent way. +The memory must be zeroed by +.Fn pmap_steal_memory . +Note that memory allocated with +.Fn pmap_steal_memory +will never be freed, and mappings made by +.Fn pmap_steal_memory +must never be +.Dq forgotten . +.Pp +Note that +.Fn pmap_steal_memory +should not be used as a general-purpose early-startup memory +allocation routine. +It is intended to be used only by the +.Fn uvm_pageboot_alloc +routine and its supporting routines. +If you need to allocate memory before the virtual memory system is +initialized, use +.Fn uvm_pageboot_alloc . +See +.Xr uvm 9 +for more information. +.Pp +The +.Fn pmap_steal_memory +function returns the kernel-accessible address of the allocated memory. +If no memory can be allocated, or if allocated memory cannot be mapped, +the function must panic. +.Pp +If the +.Fn pmap_steal_memory +function uses address space from the range provided to +.Xr uvm 9 +by the +.Fn pmap_virtual_space +call, then +.Fn pmap_steal_memory +must adjust +.Fa *vstartp +and +.Fa *vendp +upon return. +.Pp +The +.Fn pmap_steal_memory +function is enabled by defining the C pre-processor macro +.Dv PMAP_STEAL_MEMORY +in +.In machine/pmap.h . +.It vaddr_t Fn "pmap_growkernel" "vaddr_t maxkvaddr" +Management of the kernel virtual address space is complicated by the +fact that it is not always safe to wait for resources with which to +map a kernel virtual address. +However, it is not always desirable to pre-allocate all resources +necessary to map the entire kernel virtual address space. +.Pp +The +.Fn pmap_growkernel +interface is designed to help alleviate this problem. +The virtual memory startup code may choose to allocate an initial set +of mapping resources (e.g., page tables) and set an internal variable +indicating how much kernel virtual address space can be mapped using +those initial resources. +Then, when the virtual memory system wishes to map something +at an address beyond that initial limit, it calls +.Fn pmap_growkernel +to pre-allocate more sources with which to create the mapping. +Note that once additional kernel virtual address space mapping resources +have been allocated, they should not be freed; it is likely they will +be needed again. +.Pp +The +.Fn pmap_growkernel +function returns the new maximum kernel virtual address that can be mapped +with the resources it has available. +If new resources cannot be allocated, +.Fn pmap_growkernel +must panic. +.Pp +The +.Fn pmap_growkernel +function is enabled by defining the C pre-processor macro +.Dv PMAP_GROWKERNEL +in +.In machine/pmap.h . +.It void Fn "pmap_fork" "pmap_t src_map" "pmap_t dst_map" +Some +.Nm +implementations may need to keep track of other information not +directly related to the virtual address space. +For example, on the i386 port, the Local Descriptor Table state of a +process is associated with the pmap (this is due to the fact that +applications manipulate the Local Descriptor Table directly expect it +to be logically associated with the virtual memory state of the process). +.Pp +The +.Fn pmap_fork +function is provided as a way to associate information from +.Fa src_map +with +.Fa dst_map +when a +.Dv vmspace +is forked. +.Fn pmap_fork +is called from +.Fn uvmspace_fork . +.Pp +The +.Fn pmap_fork +function is enabled by defining the C pre-processor macro +.Dv PMAP_FORK +in +.In machine/pmap.h . +.It vaddr_t Fn "PMAP_MAP_POOLPAGE" "paddr_t pa" +This function is used by the +.Xr pool 9 +memory pool manager. +Pools allocate backing pages one at a time. +This is provided as a means to use hardware features such as a +direct-mapped memory segment to map the pages used by the +.Xr pool 9 +allocator. +This can lead to better performance by e.g. reducing TLB contention. +.Pp +.Fn PMAP_MAP_POOLPAGE +returns the kernel-accessible address of the page being mapped. +It must always succeed. +.Pp +The use of +.Fn PMAP_MAP_POOLPAGE +is enabled by defining it as a C pre-processor macro in +.In machine/pmap.h . +If +.Fn PMAP_MAP_POOLPAGE +is defined, +.Fn PMAP_UNMAP_POOLPAGE +must also be defined. +.Pp +The following is an example of how to define +.Fn PMAP_MAP_POOLPAGE : +.Bd -literal -offset indent +#define PMAP_MAP_POOLPAGE(pa) MIPS_PHYS_TO_KSEG0((pa)) +.Ed +.Pp +This takes the physical address of a page and returns the KSEG0 +address of that page on a MIPS processor. +.It paddr_t Fn "PMAP_UNMAP_POOLPAGE" "vaddr_t va" +This function is the inverse of +.Fn PMAP_MAP_POOLPAGE . +.Pp +.Fn PMAP_UNMAP_POOLPAGE +returns the physical address of the page corresponding to the +provided kernel-accessible address. +.Pp +The use of +.Fn PMAP_UNMAP_POOLPAGE +is enabled by defining it as a C pre-processor macro in +.In machine/pmap.h . +If +.Fn PMAP_UNMAP_POOLPAGE +is defined, +.Fn PMAP_MAP_POOLPAGE +must also be defined. +.Pp +The following is an example of how to define +.Fn PMAP_UNMAP_POOLPAGE : +.Bd -literal -offset indent +#define PMAP_UNMAP_POOLPAGE(pa) MIPS_KSEG0_TO_PHYS((va)) +.Ed +.Pp +This takes the KSEG0 address of a previously-mapped pool page +and returns the physical address of that page on a MIPS processor. +.It void Fn "PMAP_PREFER" "vaddr_t hint" "vaddr_t *vap" "vsize_t sz" "int td" +This function is used by +.Xr uvm_map 9 +to adjust a virtual address being allocated in order to avoid +cache alias problems. +If necessary, the virtual address pointed by +.Fa vap +will be advanced. +.Fa hint +is an object offset which will be mapped into the resulting virtual address, and +.Fa sz +is size of the region being mapped in bytes. +.Fa td +indicates if the machine dependent pmap uses the topdown VM. +.Pp +The use of +.Fn PMAP_PREFER +is enabled by defining it as a C pre-processor macro in +.In machine/pmap.h . +.It void Fn "pmap_procwr" "struct proc *p" "vaddr_t va" "vsize_t size" +Synchronize CPU instruction caches of the specified range. +The address space is designated by +.Fa p . +This function is typically used to flush instruction caches +after code modification. +.Pp +The use of +.Fn pmap_procwr +is enabled by defining a C pre-processor macro +.Dv PMAP_NEED_PROCWR +in +.In machine/pmap.h . +.El +.Sh SEE ALSO +.Xr uvm 9 +.Sh HISTORY +The +.Nm +module was originally part of the design of the virtual memory system +in the Mach Operating System. +The goal was to provide a clean separation between the machine-independent +and the machine-dependent portions of the virtual memory system, in +stark contrast to the original +.Bx 3 +virtual memory system, which was specific to the VAX. +.Pp +Between +.Bx 4.3 +and +.Bx 4.4 , +the Mach virtual memory system, including the +.Nm +API, was ported to +.Bx +and included in the +.Bx 4.4 +release. +.Pp +.Nx +inherited the +.Bx +version of the Mach virtual memory system. +.Nx 1.4 +was the first +.Nx +release with the new +.Xr uvm 9 +virtual memory system, which included several changes to the +.Nm +API. +Since the introduction of +.Xr uvm 9 , +the +.Nm +API has evolved further. +.Sh AUTHORS +The original Mach VAX +.Nm +module was written by +.An Avadis Tevanian, Jr. +and +.An Michael Wayne Young . +.Pp +.An Mike Hibler +did the integration of the Mach virtual memory system into +.Bx 4.4 +and implemented a +.Nm +module for the Motorola 68020+68851/68030/68040. +.Pp +The +.Nm +API as it exists in +.Nx +is derived from +.Bx 4.4 , +and has been modified by +.An Chuck Cranor , +.An Charles M. Hannum , +.An Chuck Silvers , +.An Wolfgang Solfrank , +.An Bill Sommerfeld , +and +.An Jason R. Thorpe . +.Pp +The author of this document is +.An Jason R. Thorpe +.Aq thorpej@NetBSD.org . +.Sh BUGS +The use and definition of +.Fn pmap_activate +and +.Fn pmap_deactivate +needs to be reexamined. +.Pp +The use of +.Fn pmap_copy +needs to be reexamined. +Empirical evidence suggests that performance of the system suffers when +.Fn pmap_copy +actually performs its defined function. +This is largely due to the fact that the copy of the virtual-to-physical +mappings is wasted if the process calls +.Xr execve 2 +after +.Xr fork 2 . +For this reason, it is recommended that +.Nm +implementations leave the body of the +.Fn pmap_copy +function empty for now. |
