Print this page
OS-6546 Use PCID if KPTI is enabled


 237 extern void setup_vaddr_for_ppcopy(struct cpu *);
 238 extern void teardown_vaddr_for_ppcopy(struct cpu *);
 239 extern void clear_boot_mappings(uintptr_t, uintptr_t);
 240 
 241 /*
 242  * magic value to indicate that all TLB entries should be demapped.
 243  */
 244 #define DEMAP_ALL_ADDR  (~(uintptr_t)0)
 245 
 246 /*
 247  * not in any include file???
 248  */
 249 extern void halt(char *fmt);
 250 
 251 /*
 252  * x86 specific routines for use online in setup or i86pc/vm files
 253  */
 254 extern void hat_kern_alloc(caddr_t segmap_base, size_t segmap_size,
 255         caddr_t ekernelheap);
 256 extern void hat_kern_setup(void);
 257 extern void hat_tlb_inval(struct hat *hat, uintptr_t va);
 258 extern void hat_pte_unmap(htable_t *ht, uint_t entry, uint_t flags,
 259         x86pte_t old_pte, void *pte_ptr, boolean_t tlb);
 260 extern void hat_init_finish(void);
 261 extern caddr_t hat_kpm_pfn2va(pfn_t pfn);
 262 extern pfn_t hat_kpm_va2pfn(caddr_t);
 263 extern page_t *hat_kpm_vaddr2page(caddr_t);
 264 extern uintptr_t hat_kernelbase(uintptr_t);
 265 extern void hat_kmap_init(uintptr_t base, size_t len);
 266 
 267 extern hment_t *hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry);
 268 
 269 #if defined(__amd64)
 270 extern void hati_cpu_punchin(cpu_t *cpu, uintptr_t va, uint_t attrs);
 271 extern void mmu_calc_user_slots(void);
 272 #endif

 273 
 274 #if !defined(__xpv)
 275 /*
 276  * routines to deal with delayed TLB invalidations for idle CPUs
 277  */
 278 extern void tlb_going_idle(void);
 279 extern void tlb_service(void);
 280 #endif
 281 
 282 /*
 283  * Hat switch function invoked to load a new context into %cr3
 284  */
 285 extern void hat_switch(struct hat *hat);












 286 
 287 #ifdef __xpv
 288 /*
 289  * Interfaces to use around code that maps/unmaps grant table references.
 290  */
 291 extern void hat_prepare_mapping(hat_t *, caddr_t, uint64_t *);
 292 extern void hat_release_mapping(hat_t *, caddr_t);
 293 
 294 #define XPV_DISALLOW_MIGRATE()  xen_block_migrate()
 295 #define XPV_ALLOW_MIGRATE()     xen_allow_migrate()
 296 
 297 #else
 298 
 299 #define XPV_DISALLOW_MIGRATE()  /* nothing */
 300 #define XPV_ALLOW_MIGRATE()     /* nothing */
 301 
 302 #define pfn_is_foreign(pfn)     __lintzero
 303 
 304 #endif
















 305 

 306 
 307 #endif  /* _KERNEL */
 308 
 309 #ifdef  __cplusplus
 310 }
 311 #endif
 312 
 313 #endif  /* _VM_HAT_I86_H */


 237 extern void setup_vaddr_for_ppcopy(struct cpu *);
 238 extern void teardown_vaddr_for_ppcopy(struct cpu *);
 239 extern void clear_boot_mappings(uintptr_t, uintptr_t);
 240 
 241 /*
 242  * magic value to indicate that all TLB entries should be demapped.
 243  */
 244 #define DEMAP_ALL_ADDR  (~(uintptr_t)0)
 245 
 246 /*
 247  * not in any include file???
 248  */
 249 extern void halt(char *fmt);
 250 
 251 /*
 252  * x86 specific routines for use online in setup or i86pc/vm files
 253  */
 254 extern void hat_kern_alloc(caddr_t segmap_base, size_t segmap_size,
 255         caddr_t ekernelheap);
 256 extern void hat_kern_setup(void);

 257 extern void hat_pte_unmap(htable_t *ht, uint_t entry, uint_t flags,
 258         x86pte_t old_pte, void *pte_ptr, boolean_t tlb);
 259 extern void hat_init_finish(void);
 260 extern caddr_t hat_kpm_pfn2va(pfn_t pfn);
 261 extern pfn_t hat_kpm_va2pfn(caddr_t);
 262 extern page_t *hat_kpm_vaddr2page(caddr_t);
 263 extern uintptr_t hat_kernelbase(uintptr_t);
 264 extern void hat_kmap_init(uintptr_t base, size_t len);
 265 
 266 extern hment_t *hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry);
 267 


 268 extern void mmu_calc_user_slots(void);
 269 extern void hat_tlb_inval(struct hat *hat, uintptr_t va);
 270 extern void hat_switch(struct hat *hat);
 271 
 272 #define TLB_RANGE_LEN(r)        ((r)->tr_cnt << LEVEL_SHIFT((r)->tr_level))






 273 
 274 /*
 275  * A range of virtual pages for purposes of demapping.
 276  */
 277 typedef struct tlb_range {
 278         uintptr_t tr_va;        /* address of page */
 279         ulong_t tr_cnt;         /* number of pages in range */
 280         int8_t  tr_level;       /* page table level */
 281 } tlb_range_t;
 282 
 283 #if defined(__xpv)
 284 
 285 #define XPV_DISALLOW_MIGRATE()  xen_block_migrate()
 286 #define XPV_ALLOW_MIGRATE()     xen_allow_migrate()
 287 
 288 #define mmu_flush_tlb_page(va)  mmu_invlpg((caddr_t)va)
 289 #define mmu_flush_tlb_kpage(va) mmu_invlpg((caddr_t)va)
 290 

 291 /*
 292  * Interfaces to use around code that maps/unmaps grant table references.
 293  */
 294 extern void hat_prepare_mapping(hat_t *, caddr_t, uint64_t *);
 295 extern void hat_release_mapping(hat_t *, caddr_t);
 296 



 297 #else
 298 
 299 #define XPV_DISALLOW_MIGRATE()  /* nothing */
 300 #define XPV_ALLOW_MIGRATE()     /* nothing */
 301 
 302 #define pfn_is_foreign(pfn)     __lintzero
 303 
 304 typedef enum flush_tlb_type {
 305         FLUSH_TLB_ALL = 1,
 306         FLUSH_TLB_NONGLOBAL = 2,
 307         FLUSH_TLB_RANGE = 3,
 308 } flush_tlb_type_t;
 309 
 310 extern void mmu_flush_tlb(flush_tlb_type_t, tlb_range_t *);
 311 extern void mmu_flush_tlb_kpage(uintptr_t);
 312 extern void mmu_flush_tlb_page(uintptr_t);
 313 
 314 extern void hati_cpu_punchin(cpu_t *cpu, uintptr_t va, uint_t attrs);
 315 
 316 /*
 317  * routines to deal with delayed TLB invalidations for idle CPUs
 318  */
 319 extern void tlb_going_idle(void);
 320 extern void tlb_service(void);
 321 
 322 #endif /* !__xpv */
 323 
 324 #endif  /* _KERNEL */
 325 
 326 #ifdef  __cplusplus
 327 }
 328 #endif
 329 
 330 #endif  /* _VM_HAT_I86_H */