-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
Copy pathruntime.rs
1042 lines (971 loc) · 41.7 KB
/
runtime.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use crate::externals::MemoryCreator;
use crate::trampoline::{MemoryCreatorProxy, StoreInstanceHandle};
use anyhow::{bail, Result};
use std::cell::RefCell;
use std::cmp;
use std::convert::TryFrom;
use std::fmt;
use std::path::Path;
use std::rc::{Rc, Weak};
use std::sync::Arc;
use wasmparser::{OperatorValidatorConfig, ValidatingParserConfig};
use wasmtime_environ::settings::{self, Configurable};
use wasmtime_environ::{CacheConfig, Tunables};
use wasmtime_jit::{native, CompilationStrategy, Compiler};
use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent};
use wasmtime_runtime::{
debug_builtins, InstanceHandle, RuntimeMemoryCreator, SignalHandler, VMInterrupts,
};
// Runtime Environment
// Configuration
/// Global configuration options used to create an [`Engine`] and customize its
/// behavior.
///
/// This structure exposed a builder-like interface and is primarily consumed by
/// [`Engine::new()`]
#[derive(Clone)]
pub struct Config {
pub(crate) flags: settings::Builder,
pub(crate) validating_config: ValidatingParserConfig,
pub(crate) tunables: Tunables,
pub(crate) strategy: CompilationStrategy,
pub(crate) cache_config: CacheConfig,
pub(crate) profiler: Arc<dyn ProfilingAgent>,
pub(crate) memory_creator: Option<MemoryCreatorProxy>,
pub(crate) max_wasm_stack: usize,
}
impl Config {
/// Creates a new configuration object with the default configuration
/// specified.
pub fn new() -> Config {
let mut tunables = Tunables::default();
if cfg!(windows) {
// For now, use a smaller footprint on Windows so that we don't
// don't outstrip the paging file.
tunables.static_memory_bound = cmp::min(tunables.static_memory_bound, 0x100);
tunables.static_memory_offset_guard_size =
cmp::min(tunables.static_memory_offset_guard_size, 0x10000);
}
let mut flags = settings::builder();
// There are two possible traps for division, and this way
// we get the proper one if code traps.
flags
.enable("avoid_div_traps")
.expect("should be valid flag");
// Invert cranelift's default-on verification to instead default off.
flags
.set("enable_verifier", "false")
.expect("should be valid flag");
// Turn on cranelift speed optimizations by default
flags
.set("opt_level", "speed")
.expect("should be valid flag");
// We don't use probestack as a stack limit mechanism
flags
.set("enable_probestack", "false")
.expect("should be valid flag");
Config {
tunables,
validating_config: ValidatingParserConfig {
operator_config: OperatorValidatorConfig {
enable_threads: false,
enable_reference_types: false,
enable_bulk_memory: false,
enable_simd: false,
enable_multi_value: false,
},
},
flags,
strategy: CompilationStrategy::Auto,
cache_config: CacheConfig::new_cache_disabled(),
profiler: Arc::new(NullProfilerAgent),
memory_creator: None,
max_wasm_stack: 1 << 20,
}
}
/// Configures whether DWARF debug information will be emitted during
/// compilation.
///
/// By default this option is `false`.
pub fn debug_info(&mut self, enable: bool) -> &mut Self {
self.tunables.debug_info = enable;
self
}
/// Configures whether functions and loops will be interruptable via the
/// [`Store::interrupt_handle`] method.
///
/// For more information see the documentation on
/// [`Store::interrupt_handle`].
///
/// By default this option is `false`.
pub fn interruptable(&mut self, enable: bool) -> &mut Self {
self.tunables.interruptable = enable;
self
}
/// Configures the maximum amount of native stack space available to
/// executing WebAssembly code.
///
/// WebAssembly code currently executes on the native call stack for its own
/// call frames. WebAssembly, however, also has well-defined semantics on
/// stack overflow. This is intended to be a knob which can help configure
/// how much native stack space a wasm module is allowed to consume. Note
/// that the number here is not super-precise, but rather wasm will take at
/// most "pretty close to this much" stack space.
///
/// If a wasm call (or series of nested wasm calls) take more stack space
/// than the `size` specified then a stack overflow trap will be raised.
///
/// By default this option is 1 MB.
pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
self.max_wasm_stack = size;
self
}
/// Configures whether the WebAssembly threads proposal will be enabled for
/// compilation.
///
/// The [WebAssembly threads proposal][threads] is not currently fully
/// standardized and is undergoing development. Additionally the support in
/// wasmtime itself is still being worked on. Support for this feature can
/// be enabled through this method for appropriate wasm modules.
///
/// This feature gates items such as shared memories and atomic
/// instructions. Note that enabling the threads feature will
/// also enable the bulk memory feature.
///
/// This is `false` by default.
///
/// > **Note**: Wasmtime does not implement everything for the wasm threads
/// > spec at this time, so bugs, panics, and possibly segfaults should be
/// > expected. This should not be enabled in a production setting right
/// > now.
///
/// [threads]: https://github.com/webassembly/threads
pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
self.validating_config.operator_config.enable_threads = enable;
// The threads proposal depends on the bulk memory proposal
if enable {
self.wasm_bulk_memory(true);
}
self
}
/// Configures whether the WebAssembly reference types proposal will be
/// enabled for compilation.
///
/// The [WebAssembly reference types proposal][proposal] is not currently
/// fully standardized and is undergoing development. Additionally the
/// support in wasmtime itself is still being worked on. Support for this
/// feature can be enabled through this method for appropriate wasm
/// modules.
///
/// This feature gates items such as the `anyref` type and multiple tables
/// being in a module. Note that enabling the reference types feature will
/// also enable the bulk memory feature.
///
/// This is `false` by default.
///
/// > **Note**: Wasmtime does not implement everything for the reference
/// > types proposal spec at this time, so bugs, panics, and possibly
/// > segfaults should be expected. This should not be enabled in a
/// > production setting right now.
///
/// [proposal]: https://github.com/webassembly/reference-types
pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
self.validating_config
.operator_config
.enable_reference_types = enable;
// The reference types proposal depends on the bulk memory proposal
if enable {
self.wasm_bulk_memory(true);
}
self
}
/// Configures whether the WebAssembly SIMD proposal will be
/// enabled for compilation.
///
/// The [WebAssembly SIMD proposal][proposal] is not currently
/// fully standardized and is undergoing development. Additionally the
/// support in wasmtime itself is still being worked on. Support for this
/// feature can be enabled through this method for appropriate wasm
/// modules.
///
/// This feature gates items such as the `v128` type and all of its
/// operators being in a module.
///
/// This is `false` by default.
///
/// > **Note**: Wasmtime does not implement everything for the wasm simd
/// > spec at this time, so bugs, panics, and possibly segfaults should be
/// > expected. This should not be enabled in a production setting right
/// > now.
///
/// [proposal]: https://github.com/webassembly/simd
pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
self.validating_config.operator_config.enable_simd = enable;
let val = if enable { "true" } else { "false" };
self.flags
.set("enable_simd", val)
.expect("should be valid flag");
self
}
/// Configures whether the WebAssembly bulk memory operations proposal will
/// be enabled for compilation.
///
/// The [WebAssembly bulk memory operations proposal][proposal] is not
/// currently fully standardized and is undergoing development.
/// Additionally the support in wasmtime itself is still being worked on.
/// Support for this feature can be enabled through this method for
/// appropriate wasm modules.
///
/// This feature gates items such as the `memory.copy` instruction, passive
/// data/table segments, etc, being in a module.
///
/// This is `false` by default.
///
/// [proposal]: https://github.com/webassembly/bulk-memory-operations
pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
self.validating_config.operator_config.enable_bulk_memory = enable;
self
}
/// Configures whether the WebAssembly multi-value proposal will
/// be enabled for compilation.
///
/// The [WebAssembly multi-value proposal][proposal] is not
/// currently fully standardized and is undergoing development.
/// Additionally the support in wasmtime itself is still being worked on.
/// Support for this feature can be enabled through this method for
/// appropriate wasm modules.
///
/// This feature gates functions and blocks returning multiple values in a
/// module, for example.
///
/// This is `false` by default.
///
/// [proposal]: https://github.com/webassembly/multi-value
pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
self.validating_config.operator_config.enable_multi_value = enable;
self
}
/// Configures which compilation strategy will be used for wasm modules.
///
/// This method can be used to configure which compiler is used for wasm
/// modules, and for more documentation consult the [`Strategy`] enumeration
/// and its documentation.
///
/// The default value for this is `Strategy::Auto`.
///
/// # Errors
///
/// Some compilation strategies require compile-time options of `wasmtime`
/// itself to be set, but if they're not set and the strategy is specified
/// here then an error will be returned.
pub fn strategy(&mut self, strategy: Strategy) -> Result<&mut Self> {
self.strategy = match strategy {
Strategy::Auto => CompilationStrategy::Auto,
Strategy::Cranelift => CompilationStrategy::Cranelift,
#[cfg(feature = "lightbeam")]
Strategy::Lightbeam => CompilationStrategy::Lightbeam,
#[cfg(not(feature = "lightbeam"))]
Strategy::Lightbeam => {
anyhow::bail!("lightbeam compilation strategy wasn't enabled at compile time");
}
};
Ok(self)
}
/// Creates a default profiler based on the profiling strategy choosen
///
/// Profiler creation calls the type's default initializer where the purpose is
/// really just to put in place the type used for profiling.
pub fn profiler(&mut self, profile: ProfilingStrategy) -> Result<&mut Self> {
self.profiler = match profile {
ProfilingStrategy::JitDump => Arc::new(JitDumpAgent::new()?) as Arc<dyn ProfilingAgent>,
ProfilingStrategy::VTune => Arc::new(VTuneAgent::new()?) as Arc<dyn ProfilingAgent>,
ProfilingStrategy::None => Arc::new(NullProfilerAgent),
};
Ok(self)
}
/// Configures whether the debug verifier of Cranelift is enabled or not.
///
/// When Cranelift is used as a code generation backend this will configure
/// it to have the `enable_verifier` flag which will enable a number of debug
/// checks inside of Cranelift. This is largely only useful for the
/// developers of wasmtime itself.
///
/// The default value for this is `false`
pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
let val = if enable { "true" } else { "false" };
self.flags
.set("enable_verifier", val)
.expect("should be valid flag");
self
}
/// Configures the Cranelift code generator optimization level.
///
/// When the Cranelift code generator is used you can configure the
/// optimization level used for generated code in a few various ways. For
/// more information see the documentation of [`OptLevel`].
///
/// The default value for this is `OptLevel::None`.
pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
let val = match level {
OptLevel::None => "none",
OptLevel::Speed => "speed",
OptLevel::SpeedAndSize => "speed_and_size",
};
self.flags
.set("opt_level", val)
.expect("should be valid flag");
self
}
/// Configures whether Cranelift should perform a NaN-canonicalization pass.
///
/// When Cranelift is used as a code generation backend this will configure
/// it to replace NaNs with a single canonical value. This is useful for users
/// requiring entirely deterministic WebAssembly computation.
/// This is not required by the WebAssembly spec, so it is not enabled by default.
///
/// The default value for this is `false`
pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
let val = if enable { "true" } else { "false" };
self.flags
.set("enable_nan_canonicalization", val)
.expect("should be valid flag");
self
}
/// Loads cache configuration specified at `path`.
///
/// This method will read the file specified by `path` on the filesystem and
/// attempt to load cache configuration from it. This method can also fail
/// due to I/O errors, misconfiguration, syntax errors, etc. For expected
/// syntax in the configuration file see the [documentation online][docs].
///
/// By default cache configuration is not enabled or loaded.
///
/// # Errors
///
/// This method can fail due to any error that happens when loading the file
/// pointed to by `path` and attempting to load the cache configuration.
///
/// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
pub fn cache_config_load(&mut self, path: impl AsRef<Path>) -> Result<&mut Self> {
self.cache_config = wasmtime_environ::CacheConfig::from_file(Some(path.as_ref()))?;
Ok(self)
}
/// Loads cache configuration from the system default path.
///
/// This commit is the same as [`Config::cache_config_load`] except that it
/// does not take a path argument and instead loads the default
/// configuration present on the system. This is located, for example, on
/// Unix at `$HOME/.config/wasmtime/config.toml` and is typically created
/// with the `wasmtime config new` command.
///
/// By default cache configuration is not enabled or loaded.
///
/// # Errors
///
/// This method can fail due to any error that happens when loading the
/// default system configuration. Note that it is not an error if the
/// default config file does not exist, in which case the default settings
/// for an enabled cache are applied.
///
/// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
pub fn cache_config_load_default(&mut self) -> Result<&mut Self> {
self.cache_config = wasmtime_environ::CacheConfig::from_file(None)?;
Ok(self)
}
/// Sets a custom memory creator
pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
self.memory_creator = Some(MemoryCreatorProxy { mem_creator });
self
}
/// Configures the maximum size, in bytes, where a linear memory is
/// considered static, above which it'll be considered dynamic.
///
/// This function configures the threshold for wasm memories whether they're
/// implemented as a dynamically relocatable chunk of memory or a statically
/// located chunk of memory. The `max_size` parameter here is the size, in
/// bytes, where if the maximum size of a linear memory is below `max_size`
/// then it will be statically allocated with enough space to never have to
/// move. If the maximum size of a linear memory is larger than `max_size`
/// then wasm memory will be dynamically located and may move in memory
/// through growth operations.
///
/// Specifying a `max_size` of 0 means that all memories will be dynamic and
/// may be relocated through `memory.grow`. Also note that if any wasm
/// memory's maximum size is below `max_size` then it will still reserve
/// `max_size` bytes in the virtual memory space.
///
/// ## Static vs Dynamic Memory
///
/// Linear memories represent contiguous arrays of bytes, but they can also
/// be grown through the API and wasm instructions. When memory is grown if
/// space hasn't been preallocated then growth may involve relocating the
/// base pointer in memory. Memories in Wasmtime are classified in two
/// different ways:
///
/// * **static** - these memories preallocate all space necessary they'll
/// ever need, meaning that the base pointer of these memories is never
/// moved. Static memories may take more virtual memory space because of
/// pre-reserving space for memories.
///
/// * **dynamic** - these memories are not preallocated and may move during
/// growth operations. Dynamic memories consume less virtual memory space
/// because they don't need to preallocate space for future growth.
///
/// Static memories can be optimized better in JIT code because once the
/// base address is loaded in a function it's known that we never need to
/// reload it because it never changes, `memory.grow` is generally a pretty
/// fast operation because the wasm memory is never relocated, and under
/// some conditions bounds checks can be elided on memory accesses.
///
/// Dynamic memories can't be quite as heavily optimized because the base
/// address may need to be reloaded more often, they may require relocating
/// lots of data on `memory.grow`, and dynamic memories require
/// unconditional bounds checks on all memory accesses.
///
/// ## Should you use static or dynamic memory?
///
/// In general you probably don't need to change the value of this property.
/// The defaults here are optimized for each target platform to consume a
/// reasonable amount of physical memory while also generating speedy
/// machine code.
///
/// One of the main reasons you may want to configure this today is if your
/// environment can't reserve virtual memory space for each wasm linear
/// memory. On 64-bit platforms wasm memories require a 6GB reservation by
/// default, and system limits may prevent this in some scenarios. In this
/// case you may wish to force memories to be allocated dynamically meaning
/// that the virtual memory footprint of creating a wasm memory should be
/// exactly what's used by the wasm itself.
///
/// For 32-bit memories a static memory must contain at least 4GB of
/// reserved address space plus a guard page to elide any bounds checks at
/// all. Smaller static memories will use similar bounds checks as dynamic
/// memories.
///
/// ## Default
///
/// The default value for this property depends on the host platform. For
/// 64-bit platforms there's lots of address space available, so the default
/// configured here is 4GB. WebAssembly linear memories currently max out at
/// 4GB which means that on 64-bit platforms Wasmtime by default always uses
/// a static memory. This, coupled with a sufficiently sized guard region,
/// should produce the fastest JIT code on 64-bit platforms, but does
/// require a large address space reservation for each wasm memory.
///
/// For 32-bit platforms this value defaults to 1GB. This means that wasm
/// memories whose maximum size is less than 1GB will be allocated
/// statically, otherwise they'll be considered dynamic.
pub fn static_memory_maximum_size(&mut self, max_size: u64) -> &mut Self {
let max_pages = max_size / u64::from(wasmtime_environ::WASM_PAGE_SIZE);
self.tunables.static_memory_bound = u32::try_from(max_pages).unwrap_or(u32::max_value());
self
}
/// Configures the size, in bytes, of the guard region used at the end of a
/// static memory's address space reservation.
///
/// All WebAssembly loads/stores are bounds-checked and generate a trap if
/// they're out-of-bounds. Loads and stores are often very performance
/// critical, so we want the bounds check to be as fast as possible!
/// Accelerating these memory accesses is the motivation for a guard after a
/// memory allocation.
///
/// Memories (both static and dynamic) can be configured with a guard at the
/// end of them which consists of unmapped virtual memory. This unmapped
/// memory will trigger a memory access violation (e.g. segfault) if
/// accessed. This allows JIT code to elide bounds checks if it can prove
/// that an access, if out of bounds, would hit the guard region. This means
/// that having such a guard of unmapped memory can remove the need for
/// bounds checks in JIT code.
///
/// For the difference between static and dynamic memories, see the
/// [`Config::static_memory_maximum_size`].
///
/// ## How big should the guard be?
///
/// In general, like with configuring `static_memory_maximum_size`, you
/// probably don't want to change this value from the defaults. Otherwise,
/// though, the size of the guard region affects the number of bounds checks
/// needed for generated wasm code. More specifically, loads/stores with
/// immediate offsets will generate bounds checks based on how big the guard
/// page is.
///
/// For 32-bit memories a 4GB static memory is required to even start
/// removing bounds checks. A 4GB guard size will guarantee that the module
/// has zero bounds checks for memory accesses. A 2GB guard size will
/// eliminate all bounds checks with an immediate offset less than 2GB. A
/// guard size of zero means that all memory accesses will still have bounds
/// checks.
///
/// ## Default
///
/// The default value for this property is 2GB on 64-bit platforms. This
/// allows eliminating almost all bounds checks on loads/stores with an
/// immediate offset of less than 2GB. On 32-bit platforms this defaults to
/// 64KB.
///
/// ## Static vs Dynamic Guard Size
///
/// Note that for now the static memory guard size must be at least as large
/// as the dynamic memory guard size, so configuring this property to be
/// smaller than the dynamic memory guard size will have no effect.
pub fn static_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
let guard_size = round_up_to_pages(guard_size);
let guard_size = cmp::max(guard_size, self.tunables.dynamic_memory_offset_guard_size);
self.tunables.static_memory_offset_guard_size = guard_size;
self
}
/// Configures the size, in bytes, of the guard region used at the end of a
/// dynamic memory's address space reservation.
///
/// For the difference between static and dynamic memories, see the
/// [`Config::static_memory_maximum_size`]
///
/// For more information about what a guard is, see the documentation on
/// [`Config::static_memory_guard_size`].
///
/// Note that the size of the guard region for dynamic memories is not super
/// critical for performance. Making it reasonably-sized can improve
/// generated code slightly, but for maximum performance you'll want to lean
/// towards static memories rather than dynamic anyway.
///
/// Also note that the dynamic memory guard size must be smaller than the
/// static memory guard size, so if a large dynamic memory guard is
/// specified then the static memory guard size will also be automatically
/// increased.
///
/// ## Default
///
/// This value defaults to 64KB.
pub fn dynamic_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
let guard_size = round_up_to_pages(guard_size);
self.tunables.dynamic_memory_offset_guard_size = guard_size;
self.tunables.static_memory_offset_guard_size =
cmp::max(guard_size, self.tunables.static_memory_offset_guard_size);
self
}
}
fn round_up_to_pages(val: u64) -> u64 {
let page_size = region::page::size() as u64;
debug_assert!(page_size.is_power_of_two());
val.checked_add(page_size - 1)
.map(|val| val & !(page_size - 1))
.unwrap_or(u64::max_value() / page_size + 1)
}
impl Default for Config {
fn default() -> Config {
Config::new()
}
}
impl fmt::Debug for Config {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let features = &self.validating_config.operator_config;
f.debug_struct("Config")
.field("debug_info", &self.tunables.debug_info)
.field("strategy", &self.strategy)
.field("wasm_threads", &features.enable_threads)
.field("wasm_reference_types", &features.enable_reference_types)
.field("wasm_bulk_memory", &features.enable_bulk_memory)
.field("wasm_simd", &features.enable_simd)
.field("wasm_multi_value", &features.enable_multi_value)
.field(
"flags",
&settings::Flags::new(self.flags.clone()).to_string(),
)
.finish()
}
}
/// Possible Compilation strategies for a wasm module.
///
/// This is used as an argument to the [`Config::strategy`] method.
#[non_exhaustive]
#[derive(Clone, Debug)]
pub enum Strategy {
/// An indicator that the compilation strategy should be automatically
/// selected.
///
/// This is generally what you want for most projects and indicates that the
/// `wasmtime` crate itself should make the decision about what the best
/// code generator for a wasm module is.
///
/// Currently this always defaults to Cranelift, but the default value will
/// change over time.
Auto,
/// Currently the default backend, Cranelift aims to be a reasonably fast
/// code generator which generates high quality machine code.
Cranelift,
/// A single-pass code generator that is faster than Cranelift but doesn't
/// produce as high-quality code.
///
/// To successfully pass this argument to [`Config::strategy`] the
/// `lightbeam` feature of this crate must be enabled.
Lightbeam,
}
/// Possible optimization levels for the Cranelift codegen backend.
#[non_exhaustive]
#[derive(Clone, Debug)]
pub enum OptLevel {
/// No optimizations performed, minimizes compilation time by disabling most
/// optimizations.
None,
/// Generates the fastest possible code, but may take longer.
Speed,
/// Similar to `speed`, but also performs transformations aimed at reducing
/// code size.
SpeedAndSize,
}
/// Select which profiling technique to support.
#[derive(Debug, Clone, Copy)]
pub enum ProfilingStrategy {
/// No profiler support.
None,
/// Collect profiling info for "jitdump" file format, used with `perf` on
/// Linux.
JitDump,
/// Collect profiling info using the "ittapi", used with `VTune` on Linux.
VTune,
}
// Engine
/// An `Engine` which is a global context for compilation and management of wasm
/// modules.
///
/// An engine can be safely shared across threads and is a cheap cloneable
/// handle to the actual engine. The engine itself will be deallocate once all
/// references to it have gone away.
///
/// Engines store global configuration preferences such as compilation settings,
/// enabled features, etc. You'll likely only need at most one of these for a
/// program.
///
/// ## Engines and `Clone`
///
/// Using `clone` on an `Engine` is a cheap operation. It will not create an
/// entirely new engine, but rather just a new reference to the existing engine.
/// In other words it's a shallow copy, not a deep copy.
///
/// ## Engines and `Default`
///
/// You can create an engine with default configuration settings using
/// `Engine::default()`. Be sure to consult the documentation of [`Config`] for
/// default settings.
#[derive(Default, Clone)]
pub struct Engine {
config: Arc<Config>,
}
impl Engine {
/// Creates a new [`Engine`] with the specified compilation and
/// configuration settings.
pub fn new(config: &Config) -> Engine {
debug_builtins::ensure_exported();
Engine {
config: Arc::new(config.clone()),
}
}
/// Returns the configuration settings that this engine is using.
pub fn config(&self) -> &Config {
&self.config
}
}
// Store
/// A `Store` is a shared cache of information between WebAssembly modules.
///
/// Each `Module` is compiled into a `Store` and a `Store` is associated with an
/// [`Engine`]. You'll use a `Store` to attach to a number of global items in
/// the production of various items for wasm modules.
///
/// # Stores and `Clone`
///
/// Using `clone` on a `Store` is a cheap operation. It will not create an
/// entirely new store, but rather just a new reference to the existing object.
/// In other words it's a shallow copy, not a deep copy.
///
/// ## Stores and `Default`
///
/// You can create a store with default configuration settings using
/// `Store::default()`. This will create a brand new [`Engine`] with default
/// ocnfiguration (see [`Config`] for more information).
#[derive(Clone)]
pub struct Store {
inner: Rc<StoreInner>,
}
pub(crate) struct StoreInner {
engine: Engine,
compiler: RefCell<Compiler>,
instances: RefCell<Vec<InstanceHandle>>,
signal_handler: RefCell<Option<Box<SignalHandler<'static>>>>,
}
impl Store {
/// Creates a new store to be associated with the given [`Engine`].
pub fn new(engine: &Engine) -> Store {
// Ensure that wasmtime_runtime's signal handlers are configured. Note
// that at the `Store` level it means we should perform this
// once-per-thread. Platforms like Unix, however, only require this
// once-per-program. In any case this is safe to call many times and
// each one that's not relevant just won't do anything.
wasmtime_runtime::init_traps();
let isa = native::builder().finish(settings::Flags::new(engine.config.flags.clone()));
let compiler = Compiler::new(
isa,
engine.config.strategy,
engine.config.cache_config.clone(),
engine.config.tunables.clone(),
);
Store {
inner: Rc::new(StoreInner {
engine: engine.clone(),
compiler: RefCell::new(compiler),
instances: RefCell::new(Vec::new()),
signal_handler: RefCell::new(None),
}),
}
}
pub(crate) fn from_inner(inner: Rc<StoreInner>) -> Store {
Store { inner }
}
/// Returns the [`Engine`] that this store is associated with.
pub fn engine(&self) -> &Engine {
&self.inner.engine
}
/// Returns an optional reference to a ['RuntimeMemoryCreator']
pub(crate) fn memory_creator(&self) -> Option<&dyn RuntimeMemoryCreator> {
self.engine().config.memory_creator.as_ref().map(|x| x as _)
}
pub(crate) fn compiler(&self) -> std::cell::Ref<'_, Compiler> {
self.inner.compiler.borrow()
}
pub(crate) fn compiler_mut(&self) -> std::cell::RefMut<'_, Compiler> {
self.inner.compiler.borrow_mut()
}
pub(crate) unsafe fn add_instance(&self, handle: InstanceHandle) -> StoreInstanceHandle {
self.inner.instances.borrow_mut().push(handle.clone());
StoreInstanceHandle {
store: self.clone(),
handle,
}
}
pub(crate) fn existing_instance_handle(&self, handle: InstanceHandle) -> StoreInstanceHandle {
debug_assert!(self
.inner
.instances
.borrow()
.iter()
.any(|i| i.vmctx_ptr() == handle.vmctx_ptr()));
StoreInstanceHandle {
store: self.clone(),
handle,
}
}
pub(crate) fn weak(&self) -> Weak<StoreInner> {
Rc::downgrade(&self.inner)
}
pub(crate) fn signal_handler(&self) -> std::cell::Ref<'_, Option<Box<SignalHandler<'static>>>> {
self.inner.signal_handler.borrow()
}
pub(crate) fn signal_handler_mut(
&self,
) -> std::cell::RefMut<'_, Option<Box<SignalHandler<'static>>>> {
self.inner.signal_handler.borrow_mut()
}
/// Returns whether the stores `a` and `b` refer to the same underlying
/// `Store`.
///
/// Because the `Store` type is reference counted multiple clones may point
/// to the same underlying storage, and this method can be used to determine
/// whether two stores are indeed the same.
pub fn same(a: &Store, b: &Store) -> bool {
Rc::ptr_eq(&a.inner, &b.inner)
}
/// Creates an [`InterruptHandle`] which can be used to interrupt the
/// execution of instances within this `Store`.
///
/// An [`InterruptHandle`] handle is a mechanism of ensuring that guest code
/// doesn't execute for too long. For example it's used to prevent wasm
/// programs for executing infinitely in infinite loops or recursive call
/// chains.
///
/// The [`InterruptHandle`] type is sendable to other threads so you can
/// interact with it even while the thread with this `Store` is executing
/// wasm code.
///
/// There's one method on an interrupt handle:
/// [`InterruptHandle::interrupt`]. This method is used to generate an
/// interrupt and cause wasm code to exit "soon".
///
/// ## When are interrupts delivered?
///
/// The term "interrupt" here refers to one of two different behaviors that
/// are interrupted in wasm:
///
/// * The head of every loop in wasm has a check to see if it's interrupted.
/// * The prologue of every function has a check to see if it's interrupted.
///
/// This interrupt mechanism makes no attempt to signal interrupts to
/// native code. For example if a host function is blocked, then sending
/// an interrupt will not interrupt that operation.
///
/// Interrupts are consumed as soon as possible when wasm itself starts
/// executing. This means that if you interrupt wasm code then it basically
/// guarantees that the next time wasm is executing on the target thread it
/// will return quickly (either normally if it were already in the process
/// of returning or with a trap from the interrupt). Once an interrupt
/// trap is generated then an interrupt is consumed, and further execution
/// will not be interrupted (unless another interrupt is set).
///
/// When implementing interrupts you'll want to ensure that the delivery of
/// interrupts into wasm code is also handled in your host imports and
/// functionality. Host functions need to either execute for bounded amounts
/// of time or you'll need to arrange for them to be interrupted as well.
///
/// ## Return Value
///
/// This function returns a `Result` since interrupts are not always
/// enabled. Interrupts are enabled via the [`Config::interruptable`]
/// method, and if this store's [`Config`] hasn't been configured to enable
/// interrupts then an error is returned.
///
/// ## Examples
///
/// ```
/// # use anyhow::Result;
/// # use wasmtime::*;
/// # fn main() -> Result<()> {
/// // Enable interruptable code via `Config` and then create an interrupt
/// // handle which we'll use later to interrupt running code.
/// let engine = Engine::new(Config::new().interruptable(true));
/// let store = Store::new(&engine);
/// let interrupt_handle = store.interrupt_handle()?;
///
/// // Compile and instantiate a small example with an infinite loop.
/// let module = Module::new(&store, r#"
/// (func (export "run") (loop br 0))
/// "#)?;
/// let instance = Instance::new(&module, &[])?;
/// let run = instance
/// .get_func("run")
/// .ok_or(anyhow::format_err!("failed to find `run` function export"))?
/// .get0::<()>()?;
///
/// // Spin up a thread to send us an interrupt in a second
/// std::thread::spawn(move || {
/// std::thread::sleep(std::time::Duration::from_secs(1));
/// interrupt_handle.interrupt();
/// });
///
/// let trap = run().unwrap_err();
/// assert!(trap.message().contains("wasm trap: interrupt"));
/// # Ok(())
/// # }
/// ```
pub fn interrupt_handle(&self) -> Result<InterruptHandle> {
if self.engine().config.tunables.interruptable {
Ok(InterruptHandle {
interrupts: self.compiler().interrupts().clone(),
})
} else {
bail!("interrupts aren't enabled for this `Store`")
}
}
}
impl Default for Store {
fn default() -> Store {
Store::new(&Engine::default())
}
}
impl Drop for StoreInner {
fn drop(&mut self) {
for instance in self.instances.get_mut().iter() {
unsafe {
instance.dealloc();
}
}
}
}
/// A threadsafe handle used to interrupt instances executing within a
/// particular `Store`.
///
/// This structure is created by the [`Store::interrupt_handle`] method.
pub struct InterruptHandle {
interrupts: Arc<VMInterrupts>,
}
impl InterruptHandle {
/// Flags that execution within this handle's original [`Store`] should be
/// interrupted.
///
/// This will not immediately interrupt execution of wasm modules, but
/// rather it will interrupt wasm execution of loop headers and wasm
/// execution of function entries. For more information see
/// [`Store::interrupt_handle`].
pub fn interrupt(&self) {
self.interrupts.interrupt()
}
}
fn _assert_send_sync() {
fn _assert<T: Send + Sync>() {}
_assert::<Engine>();
_assert::<Config>();
_assert::<InterruptHandle>();
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Module;
use tempfile::TempDir;
#[test]
fn cache_accounts_for_opt_level() -> Result<()> {
let td = TempDir::new()?;
let config_path = td.path().join("config.toml");
std::fs::write(
&config_path,
&format!(
"
[cache]
enabled = true
directory = '{}'
",
td.path().join("cache").display()
),
)?;
let mut cfg = Config::new();
cfg.cranelift_opt_level(OptLevel::None)
.cache_config_load(&config_path)?;
let store = Store::new(&Engine::new(&cfg));
Module::new(&store, "(module (func))")?;
assert_eq!(store.engine().config.cache_config.cache_hits(), 0);
assert_eq!(store.engine().config.cache_config.cache_misses(), 1);