-
Notifications
You must be signed in to change notification settings - Fork 31
/
Copy pathvm_map.h
1799 lines (1542 loc) · 56.2 KB
/
vm_map.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2000-2021 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
* All Rights Reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or [email protected]
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
*/
/*
* File: vm/vm_map.h
* Author: Avadis Tevanian, Jr., Michael Wayne Young
* Date: 1985
*
* Virtual memory map module definitions.
*
* Contributors:
* avie, dlb, mwyoung
*/
#ifndef _VM_VM_MAP_H_
#define _VM_VM_MAP_H_
#include <mach/mach_types.h>
#include <mach/kern_return.h>
#include <mach/boolean.h>
#include <mach/vm_types.h>
#include <mach/vm_prot.h>
#include <mach/vm_inherit.h>
#include <mach/vm_behavior.h>
#include <mach/vm_param.h>
#include <mach/sdt.h>
#include <vm/pmap.h>
#include <os/overflow.h>
#ifdef KERNEL_PRIVATE
#include <sys/cdefs.h>
#ifdef XNU_KERNEL_PRIVATE
#include <vm/vm_protos.h>
#endif /* XNU_KERNEL_PRIVATE */
__BEGIN_DECLS
extern void vm_map_reference(vm_map_t map);
extern vm_map_t current_map(void);
/* Setup reserved areas in a new VM map */
extern kern_return_t vm_map_exec(
vm_map_t new_map,
task_t task,
boolean_t is64bit,
void *fsroot,
cpu_type_t cpu,
cpu_subtype_t cpu_subtype,
boolean_t reslide,
boolean_t is_driverkit);
__END_DECLS
#ifdef MACH_KERNEL_PRIVATE
#include <mach_assert.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <kern/locks.h>
#include <kern/zalloc.h>
#include <kern/macro_help.h>
#include <kern/thread.h>
#include <os/refcnt.h>
#define current_map_fast() (current_thread()->map)
#define current_map() (current_map_fast())
#include <vm/vm_map_store.h>
/*
* Types defined:
*
* vm_map_t the high-level address map data structure.
* vm_map_entry_t an entry in an address map.
* vm_map_version_t a timestamp of a map, for use with vm_map_lookup
* vm_map_copy_t represents memory copied from an address map,
* used for inter-map copy operations
*/
typedef struct vm_map_entry *vm_map_entry_t;
#define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
/*
* Type: vm_map_object_t [internal use only]
*
* Description:
* The target of an address mapping, either a virtual
* memory object or a sub map (of the kernel map).
*/
typedef union vm_map_object {
vm_object_t vmo_object; /* object object */
vm_map_t vmo_submap; /* belongs to another map */
} vm_map_object_t;
#define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
#define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
#define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
#define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
#if VM_NAMED_ENTRY_LIST
extern queue_head_t vm_named_entry_list;
#endif /* VM_NAMED_ENTRY_LIST */
/*
* Type: vm_named_entry_t [internal use only]
*
* Description:
* Description of a mapping to a memory cache object.
*
* Implementation:
* While the handle to this object is used as a means to map
* and pass around the right to map regions backed by pagers
* of all sorts, the named_entry itself is only manipulated
* by the kernel. Named entries hold information on the
* right to map a region of a cached object. Namely,
* the target cache object, the beginning and ending of the
* region to be mapped, and the permissions, (read, write)
* with which it can be mapped.
*
*/
struct vm_named_entry {
decl_lck_mtx_data(, Lock); /* Synchronization */
union {
vm_map_t map; /* map backing submap */
vm_map_copy_t copy; /* a VM map copy */
} backing;
vm_object_offset_t offset; /* offset into object */
vm_object_size_t size; /* size of region */
vm_object_offset_t data_offset; /* offset to first byte of data */
vm_prot_t protection; /* access permissions */
int ref_count; /* Number of references */
unsigned int /* Is backing.xxx : */
/* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */
/* boolean_t */ internal:1, /* ... an internal object */
/* boolean_t */ is_sub_map:1, /* ... a submap? */
/* boolean_t */ is_copy:1; /* ... a VM map copy */
#if VM_NAMED_ENTRY_LIST
queue_chain_t named_entry_list;
int named_entry_alias;
mach_port_t named_entry_port;
#define NAMED_ENTRY_BT_DEPTH 16
void *named_entry_bt[NAMED_ENTRY_BT_DEPTH];
#endif /* VM_NAMED_ENTRY_LIST */
};
/*
* Type: vm_map_entry_t [internal use only]
*
* Description:
* A single mapping within an address map.
*
* Implementation:
* Address map entries consist of start and end addresses,
* a VM object (or sub map) and offset into that object,
* and user-exported inheritance and protection information.
* Control information for virtual copy operations is also
* stored in the address map entry.
*/
struct vm_map_links {
struct vm_map_entry *prev; /* previous entry */
struct vm_map_entry *next; /* next entry */
vm_map_offset_t start; /* start address */
vm_map_offset_t end; /* end address */
};
/*
* Bit 3 of the protection and max_protection bitfields in a vm_map_entry
* does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
* to convert between the "packed" representation in the vm_map_entry's fields
* and the equivalent bits defined in vm_prot_t.
*/
#if defined(__x86_64__)
#define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
#else
#define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY)
#endif
/*
* FOOTPRINT ACCOUNTING:
* The "memory footprint" is better described in the pmap layer.
*
* At the VM level, these 2 vm_map_entry_t fields are relevant:
* iokit_mapped:
* For an "iokit_mapped" entry, we add the size of the entry to the
* footprint when the entry is entered into the map and we subtract that
* size when the entry is removed. No other accounting should take place.
* "use_pmap" should be FALSE but is not taken into account.
* use_pmap: (only when is_sub_map is FALSE)
* This indicates if we should ask the pmap layer to account for pages
* in this mapping. If FALSE, we expect that another form of accounting
* is being used (e.g. "iokit_mapped" or the explicit accounting of
* non-volatile purgable memory).
*
* So the logic is mostly:
* if entry->is_sub_map == TRUE
* anything in a submap does not count for the footprint
* else if entry->iokit_mapped == TRUE
* footprint includes the entire virtual size of this entry
* else if entry->use_pmap == FALSE
* tell pmap NOT to account for pages being pmap_enter()'d from this
* mapping (i.e. use "alternate accounting")
* else
* pmap will account for pages being pmap_enter()'d from this mapping
* as it sees fit (only if anonymous, etc...)
*/
struct vm_map_entry {
struct vm_map_links links; /* links to other entries */
#define vme_prev links.prev
#define vme_next links.next
#define vme_start links.start
#define vme_end links.end
struct vm_map_store store;
union vm_map_object vme_object; /* object I point to */
vm_object_offset_t vme_offset; /* offset into object */
unsigned int
/* boolean_t */ is_shared:1, /* region is shared */
/* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
/* boolean_t */ in_transition:1, /* Entry being changed */
/* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
/* vm_behavior_t */ behavior:2, /* user paging behavior hint */
/* behavior is not defined for submap type */
/* boolean_t */ needs_copy:1, /* object need to be copied? */
/* Only in task maps: */
/* vm_prot_t-like */ protection:4, /* protection code, bit3=UEXEC */
/* vm_prot_t-like */ max_protection:4, /* maximum protection, bit3=UEXEC */
/* vm_inherit_t */ inheritance:2, /* inheritance */
/* boolean_t */ use_pmap:1, /*
* use_pmap is overloaded:
* if "is_sub_map":
* use a nested pmap?
* else (i.e. if object):
* use pmap accounting
* for footprint?
*/
/* boolean_t */ no_cache:1, /* should new pages be cached? */
/* boolean_t */ permanent:1, /* mapping can not be removed */
/* boolean_t */ superpage_size:1, /* use superpages of a certain size */
/* boolean_t */ map_aligned:1, /* align to map's page size */
/* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
* this entry it is being deleted
* without unwiring them */
/* boolean_t */ used_for_jit:1,
/* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
/* iokit accounting: use the virtual size rather than resident size: */
/* boolean_t */ iokit_acct:1,
/* boolean_t */ vme_resilient_codesign:1,
/* boolean_t */ vme_resilient_media:1,
/* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
/* boolean_t */ vme_no_copy_on_read:1,
/* boolean_t */ translated_allow_execute:1, /* execute in translated processes */
/* boolean_t */ __padding:1;
unsigned short wired_count; /* can be paged if = 0 */
unsigned short user_wired_count; /* for vm_wire */
#if DEBUG
#define MAP_ENTRY_CREATION_DEBUG (1)
#define MAP_ENTRY_INSERTION_DEBUG (1)
#endif
#if MAP_ENTRY_CREATION_DEBUG
struct vm_map_header *vme_creation_maphdr;
uintptr_t vme_creation_bt[16];
#endif
#if MAP_ENTRY_INSERTION_DEBUG
vm_map_offset_t vme_start_original;
vm_map_offset_t vme_end_original;
uintptr_t vme_insertion_bt[16];
#endif
};
#define VME_SUBMAP_PTR(entry) \
(&((entry)->vme_object.vmo_submap))
#define VME_SUBMAP(entry) \
((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
#define VME_OBJECT_PTR(entry) \
(&((entry)->vme_object.vmo_object))
#define VME_OBJECT(entry) \
((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry)))
#define VME_OFFSET(entry) \
((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK)
#define VME_ALIAS_MASK (FOURK_PAGE_MASK)
#define VME_ALIAS(entry) \
((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
static inline void
VME_OBJECT_SET(
vm_map_entry_t entry,
vm_object_t object)
{
entry->vme_object.vmo_object = object;
if (object != VM_OBJECT_NULL && !object->internal) {
entry->vme_resilient_media = FALSE;
}
entry->vme_resilient_codesign = FALSE;
entry->used_for_jit = FALSE;
}
static inline void
VME_SUBMAP_SET(
vm_map_entry_t entry,
vm_map_t submap)
{
entry->vme_object.vmo_submap = submap;
}
static inline void
VME_OFFSET_SET(
vm_map_entry_t entry,
vm_object_offset_t offset)
{
unsigned int alias;
alias = VME_ALIAS(entry);
assert((offset & FOURK_PAGE_MASK) == 0);
entry->vme_offset = offset | alias;
}
/*
* IMPORTANT:
* The "alias" field can be updated while holding the VM map lock
* "shared". It's OK as along as it's the only field that can be
* updated without the VM map "exclusive" lock.
*/
static inline void
VME_ALIAS_SET(
vm_map_entry_t entry,
int alias)
{
vm_object_offset_t offset;
offset = VME_OFFSET(entry);
entry->vme_offset = offset | ((unsigned int)alias & VME_ALIAS_MASK);
}
static inline void
VME_OBJECT_SHADOW(
vm_map_entry_t entry,
vm_object_size_t length)
{
vm_object_t object;
vm_object_offset_t offset;
object = VME_OBJECT(entry);
offset = VME_OFFSET(entry);
vm_object_shadow(&object, &offset, length);
if (object != VME_OBJECT(entry)) {
VME_OBJECT_SET(entry, object);
entry->use_pmap = TRUE;
}
if (offset != VME_OFFSET(entry)) {
VME_OFFSET_SET(entry, offset);
}
}
/*
* Convenience macros for dealing with superpages
* SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
*/
#define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
#define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
#define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
#define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
/*
* wired_counts are unsigned short. This value is used to safeguard
* against any mishaps due to runaway user programs.
*/
#define MAX_WIRE_COUNT 65535
/*
* Type: struct vm_map_header
*
* Description:
* Header for a vm_map and a vm_map_copy.
*/
struct vm_map_header {
struct vm_map_links links; /* first, last, min, max */
int nentries; /* Number of entries */
boolean_t entries_pageable;
/* are map entries pageable? */
#ifdef VM_MAP_STORE_USE_RB
struct rb_head rb_head_store;
#endif
int page_shift; /* page shift */
};
#define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
#define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
#define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
/*
* Type: vm_map_t [exported; contents invisible]
*
* Description:
* An address map -- a directory relating valid
* regions of a task's address space to the corresponding
* virtual memory objects.
*
* Implementation:
* Maps are doubly-linked lists of map entries, sorted
* by address. One hint is used to start
* searches again from the last successful search,
* insertion, or removal. Another hint is used to
* quickly find free space.
*/
struct _vm_map {
lck_rw_t lock; /* map lock */
struct vm_map_header hdr; /* Map entry header */
#define min_offset hdr.links.start /* start of range */
#define max_offset hdr.links.end /* end of range */
pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap; /* Physical map */
vm_map_size_t size; /* virtual size */
uint64_t size_limit; /* rlimit on address space size */
uint64_t data_limit; /* rlimit on data size */
vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
#if XNU_TARGET_OS_OSX
vm_map_offset_t vmmap_high_start;
#endif /* XNU_TARGET_OS_OSX */
union {
/*
* If map->disable_vmentry_reuse == TRUE:
* the end address of the highest allocated vm_map_entry_t.
*/
vm_map_offset_t vmu1_highest_entry_end;
/*
* For a nested VM map:
* the lowest address in this nested VM map that we would
* expect to be unnested under normal operation (i.e. for
* regular copy-on-write on DATA section).
*/
vm_map_offset_t vmu1_lowest_unnestable_start;
} vmu1;
#define highest_entry_end vmu1.vmu1_highest_entry_end
#define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
decl_lck_mtx_data(, s_lock); /* Lock ref, res fields */
lck_mtx_ext_t s_lock_ext;
vm_map_entry_t hint; /* hint for quick lookups */
union {
struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
} vmmap_u_1;
#define hole_hint vmmap_u_1.vmmap_hole_hint
#define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
union {
vm_map_entry_t _first_free; /* First free space hint */
struct vm_map_links* _holes; /* links all holes between entries */
} f_s; /* Union for free space data structures being used */
#define first_free f_s._first_free
#define holes_list f_s._holes
struct os_refcnt map_refcnt; /* Reference count */
unsigned int
/* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
/* boolean_t */ wiring_required:1, /* All memory wired? */
/* boolean_t */ no_zero_fill:1, /* No zero fill absent pages */
/* boolean_t */ mapped_in_other_pmaps:1, /* has this submap been mapped in maps that use a different pmap */
/* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
/* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
/* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
/* boolean_t */ holelistenabled:1,
/* boolean_t */ is_nested_map:1,
/* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
/* boolean_t */ jit_entry_exists:1,
/* boolean_t */ has_corpse_footprint:1,
/* boolean_t */ terminated:1,
/* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
/* boolean_t */ cs_enforcement:1, /* code-signing enforcement */
/* boolean_t */ cs_debugged:1, /* code-signed but debugged */
/* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */
/* boolean_t */ single_jit:1, /* only allow one JIT mapping */
/* reserved */ pad:14;
unsigned int timestamp; /* Version number */
};
#define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
#define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
#define vm_map_first_entry(map) ((map)->hdr.links.next)
#define vm_map_last_entry(map) ((map)->hdr.links.prev)
/*
* Type: vm_map_version_t [exported; contents invisible]
*
* Description:
* Map versions may be used to quickly validate a previous
* lookup operation.
*
* Usage note:
* Because they are bulky objects, map versions are usually
* passed by reference.
*
* Implementation:
* Just a timestamp for the main map.
*/
typedef struct vm_map_version {
unsigned int main_timestamp;
} vm_map_version_t;
/*
* Type: vm_map_copy_t [exported; contents invisible]
*
* Description:
* A map copy object represents a region of virtual memory
* that has been copied from an address map but is still
* in transit.
*
* A map copy object may only be used by a single thread
* at a time.
*
* Implementation:
* There are three formats for map copy objects.
* The first is very similar to the main
* address map in structure, and as a result, some
* of the internal maintenance functions/macros can
* be used with either address maps or map copy objects.
*
* The map copy object contains a header links
* entry onto which the other entries that represent
* the region are chained.
*
* The second format is a single vm object. This was used
* primarily in the pageout path - but is not currently used
* except for placeholder copy objects (see vm_map_copy_copy()).
*
* The third format is a kernel buffer copy object - for data
* small enough that physical copies were the most efficient
* method. This method uses a zero-sized array unioned with
* other format-specific data in the 'c_u' member. This unsized
* array overlaps the other elements and allows us to use this
* extra structure space for physical memory copies. On 64-bit
* systems this saves ~64 bytes per vm_map_copy.
*/
struct vm_map_copy {
int type;
#define VM_MAP_COPY_ENTRY_LIST 1
#define VM_MAP_COPY_OBJECT 2
#define VM_MAP_COPY_KERNEL_BUFFER 3
vm_object_offset_t offset;
vm_map_size_t size;
union {
struct vm_map_header hdr; /* ENTRY_LIST */
vm_object_t object; /* OBJECT */
void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */
} c_u;
};
#define cpy_hdr c_u.hdr
#define cpy_object c_u.object
#define cpy_kdata c_u.kdata
#define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
#define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
#define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
/*
* Useful macros for entry list copy objects
*/
#define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
#define vm_map_copy_first_entry(copy) \
((copy)->cpy_hdr.links.next)
#define vm_map_copy_last_entry(copy) \
((copy)->cpy_hdr.links.prev)
extern kern_return_t
vm_map_copy_adjust_to_target(
vm_map_copy_t copy_map,
vm_map_offset_t offset,
vm_map_size_t size,
vm_map_t target_map,
boolean_t copy,
vm_map_copy_t *target_copy_map_p,
vm_map_offset_t *overmap_start_p,
vm_map_offset_t *overmap_end_p,
vm_map_offset_t *trimmed_start_p);
/*
* Macros: vm_map_lock, etc. [internal use only]
* Description:
* Perform locking on the data portion of a map.
* When multiple maps are to be locked, order by map address.
* (See vm_map.c::vm_remap())
*/
#define vm_map_lock_init(map) \
((map)->timestamp = 0 , \
lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
#define vm_map_lock(map) \
MACRO_BEGIN \
DTRACE_VM(vm_map_lock_w); \
lck_rw_lock_exclusive(&(map)->lock); \
MACRO_END
#define vm_map_unlock(map) \
MACRO_BEGIN \
DTRACE_VM(vm_map_unlock_w); \
(map)->timestamp++; \
lck_rw_done(&(map)->lock); \
MACRO_END
#define vm_map_lock_read(map) \
MACRO_BEGIN \
DTRACE_VM(vm_map_lock_r); \
lck_rw_lock_shared(&(map)->lock); \
MACRO_END
#define vm_map_unlock_read(map) \
MACRO_BEGIN \
DTRACE_VM(vm_map_unlock_r); \
lck_rw_done(&(map)->lock); \
MACRO_END
#define vm_map_lock_write_to_read(map) \
MACRO_BEGIN \
DTRACE_VM(vm_map_lock_downgrade); \
(map)->timestamp++; \
lck_rw_lock_exclusive_to_shared(&(map)->lock); \
MACRO_END
__attribute__((always_inline))
int vm_map_lock_read_to_write(vm_map_t map);
__attribute__((always_inline))
boolean_t vm_map_try_lock(vm_map_t map);
__attribute__((always_inline))
boolean_t vm_map_try_lock_read(vm_map_t map);
int vm_self_region_page_shift(vm_map_t target_map);
int vm_self_region_page_shift_safely(vm_map_t target_map);
#if MACH_ASSERT || DEBUG
#define vm_map_lock_assert_held(map) \
lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
#define vm_map_lock_assert_shared(map) \
lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
#define vm_map_lock_assert_exclusive(map) \
lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
#define vm_map_lock_assert_notheld(map) \
lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
#else /* MACH_ASSERT || DEBUG */
#define vm_map_lock_assert_held(map)
#define vm_map_lock_assert_shared(map)
#define vm_map_lock_assert_exclusive(map)
#define vm_map_lock_assert_notheld(map)
#endif /* MACH_ASSERT || DEBUG */
/*
* Exported procedures that operate on vm_map_t.
*/
/* Initialize the module */
extern void vm_map_init(void);
/* Allocate a range in the specified virtual address map and
* return the entry allocated for that range. */
extern kern_return_t vm_map_find_space(
vm_map_t map,
vm_map_address_t *address, /* OUT */
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
vm_map_entry_t *o_entry); /* OUT */
/* flags for vm_map_find_space */
#define VM_MAP_FIND_LAST_FREE 0x01
extern void vm_map_clip_start(
vm_map_t map,
vm_map_entry_t entry,
vm_map_offset_t endaddr);
extern void vm_map_clip_end(
vm_map_t map,
vm_map_entry_t entry,
vm_map_offset_t endaddr);
extern boolean_t vm_map_entry_should_cow_for_true_share(
vm_map_entry_t entry);
/* Lookup map entry containing or the specified address in the given map */
extern boolean_t vm_map_lookup_entry(
vm_map_t map,
vm_map_address_t address,
vm_map_entry_t *entry); /* OUT */
extern void vm_map_copy_remap(
vm_map_t map,
vm_map_entry_t where,
vm_map_copy_t copy,
vm_map_offset_t adjustment,
vm_prot_t cur_prot,
vm_prot_t max_prot,
vm_inherit_t inheritance);
/* Find the VM object, offset, and protection for a given virtual address
* in the specified map, assuming a page fault of the type specified. */
extern kern_return_t vm_map_lookup_locked(
vm_map_t *var_map, /* IN/OUT */
vm_map_address_t vaddr,
vm_prot_t fault_type,
int object_lock_type,
vm_map_version_t *out_version, /* OUT */
vm_object_t *object, /* OUT */
vm_object_offset_t *offset, /* OUT */
vm_prot_t *out_prot, /* OUT */
boolean_t *wired, /* OUT */
vm_object_fault_info_t fault_info, /* OUT */
vm_map_t *real_map, /* OUT */
bool *contended); /* OUT */
/* Verifies that the map has not changed since the given version. */
extern boolean_t vm_map_verify(
vm_map_t map,
vm_map_version_t *version); /* REF */
extern vm_map_entry_t vm_map_entry_insert(
vm_map_t map,
vm_map_entry_t insp_entry,
vm_map_offset_t start,
vm_map_offset_t end,
vm_object_t object,
vm_object_offset_t offset,
vm_map_kernel_flags_t vmk_flags,
boolean_t needs_copy,
boolean_t is_shared,
boolean_t in_transition,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_behavior_t behavior,
vm_inherit_t inheritance,
unsigned short wired_count,
boolean_t no_cache,
boolean_t permanent,
boolean_t no_copy_on_read,
unsigned int superpage_size,
boolean_t clear_map_aligned,
boolean_t is_submap,
boolean_t used_for_jit,
int alias,
boolean_t translated_allow_execute);
/*
* Functions implemented as macros
*/
#define vm_map_min(map) ((map)->min_offset)
/* Lowest valid address in
* a map */
#define vm_map_max(map) ((map)->max_offset)
/* Highest valid address */
#define vm_map_pmap(map) ((map)->pmap)
/* Physical map associated
* with this address map */
/* Gain a reference to an existing map */
extern void vm_map_reference(
vm_map_t map);
/*
* Submap object. Must be used to create memory to be put
* in a submap by vm_map_submap.
*/
extern vm_object_t vm_submap_object;
/*
* Wait and wakeup macros for in_transition map entries.
*/
#define vm_map_entry_wait(map, interruptible) \
((map)->timestamp++ , \
lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
(event_t)&(map)->hdr, interruptible))
#define vm_map_entry_wakeup(map) \
thread_wakeup((event_t)(&(map)->hdr))
/* simplify map entries */
extern void vm_map_simplify_entry(
vm_map_t map,
vm_map_entry_t this_entry);
extern void vm_map_simplify(
vm_map_t map,
vm_map_offset_t start);
/* Move the information in a map copy object to a new map copy object */
extern vm_map_copy_t vm_map_copy_copy(
vm_map_copy_t copy);
/* Create a copy object from an object. */
extern kern_return_t vm_map_copyin_object(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
vm_map_copy_t *copy_result); /* OUT */
extern kern_return_t vm_map_random_address_for_size(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t size);
/* Enter a mapping */
extern kern_return_t vm_map_enter(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
vm_object_t object,
vm_object_offset_t offset,
boolean_t needs_copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance);
#if __arm64__
extern kern_return_t vm_map_enter_fourk(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
vm_object_t object,
vm_object_offset_t offset,
boolean_t needs_copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance);
#endif /* __arm64__ */
/* XXX should go away - replaced with regular enter of contig object */
extern kern_return_t vm_map_enter_cpm(
vm_map_t map,
vm_map_address_t *addr,
vm_map_size_t size,
int flags);
extern kern_return_t vm_map_remap(
vm_map_t target_map,
vm_map_offset_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
vm_map_t src_map,
vm_map_offset_t memory_address,
boolean_t copy,
vm_prot_t *cur_protection,
vm_prot_t *max_protection,
vm_inherit_t inheritance);
/*
* Read and write from a kernel buffer to a specified map.
*/
extern kern_return_t vm_map_write_user(
vm_map_t map,
void *src_p,
vm_map_offset_t dst_addr,
vm_size_t size);
extern kern_return_t vm_map_read_user(
vm_map_t map,
vm_map_offset_t src_addr,
void *dst_p,
vm_size_t size);
/* Create a new task map using an existing task map as a template. */
extern vm_map_t vm_map_fork(
ledger_t ledger,
vm_map_t old_map,
int options);
#define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
#define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
#define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
/* Change inheritance */
extern kern_return_t vm_map_inherit(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_inherit_t new_inheritance);
/* Add or remove machine-dependent attributes from map regions */
extern kern_return_t vm_map_machine_attribute(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_machine_attribute_t attribute,
vm_machine_attribute_val_t* value); /* IN/OUT */
extern kern_return_t vm_map_msync(
vm_map_t map,
vm_map_address_t address,
vm_map_size_t size,
vm_sync_t sync_flags);
/* Set paging behavior */
extern kern_return_t vm_map_behavior_set(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_behavior_t new_behavior);
extern kern_return_t vm_map_region(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t *size,
vm_region_flavor_t flavor,
vm_region_info_t info,
mach_msg_type_number_t *count,
mach_port_t *object_name);
extern kern_return_t vm_map_region_recurse_64(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t *size,
natural_t *nesting_depth,
vm_region_submap_info_64_t info,
mach_msg_type_number_t *count);