]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_data_structures/src/sync.rs
New upstream version 1.70.0+dfsg1
[rustc.git] / compiler / rustc_data_structures / src / sync.rs
CommitLineData
353b0b11
FG
1//! This module defines various operations and types that are implemented in
2//! one way for the serial compiler, and another way the parallel compiler.
ff7c6d11 3//!
353b0b11
FG
4//! Operations
5//! ----------
6//! The parallel versions of operations use Rayon to execute code in parallel,
7//! while the serial versions degenerate straightforwardly to serial execution.
8//! The operations include `join`, `parallel`, `par_iter`, and `par_for_each`.
ff7c6d11 9//!
353b0b11
FG
10//! Types
11//! -----
12//! The parallel versions of types provide various kinds of synchronization,
13//! while the serial compiler versions do not.
ff7c6d11 14//!
353b0b11
FG
15//! The following table shows how the types are implemented internally. Except
16//! where noted otherwise, the type in column one is defined as a
17//! newtype around the type from column two or three.
ff7c6d11 18//!
353b0b11
FG
19//! | Type | Serial version | Parallel version |
20//! | ----------------------- | ------------------- | ------------------------------- |
21//! | `Lrc<T>` | `rc::Rc<T>` | `sync::Arc<T>` |
22//! |` Weak<T>` | `rc::Weak<T>` | `sync::Weak<T>` |
23//! | | | |
24//! | `AtomicBool` | `Cell<bool>` | `atomic::AtomicBool` |
25//! | `AtomicU32` | `Cell<u32>` | `atomic::AtomicU32` |
26//! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` |
27//! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` |
28//! | | | |
29//! | `Lock<T>` | `RefCell<T>` | `parking_lot::Mutex<T>` |
30//! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` |
31//! | `MTLock<T>` [^1] | `T` | `Lock<T>` |
32//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` |
33//! | | | |
34//! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` |
ff7c6d11 35//!
353b0b11
FG
36//! [^1] `MTLock` is similar to `Lock`, but the serial version avoids the cost
37//! of a `RefCell`. This is appropriate when interior mutability is not
38//! required.
94b46f34 39//!
353b0b11 40//! [^2] `MTLockRef` is a typedef.
ff7c6d11 41
353b0b11 42use crate::owned_slice::OwnedSlice;
83c7162d 43use std::collections::HashMap;
dfeec247 44use std::hash::{BuildHasher, Hash};
83c7162d 45use std::ops::{Deref, DerefMut};
064997fb 46use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
ff7c6d11 47
0731742a 48pub use std::sync::atomic::Ordering;
dfeec247 49pub use std::sync::atomic::Ordering::SeqCst;
0731742a 50
353b0b11 51pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
9ffffee4
FG
52
53mod vec;
54
ff7c6d11 55cfg_if! {
9fa01778 56 if #[cfg(not(parallel_compiler))] {
353b0b11
FG
57 pub unsafe auto trait Send {}
58 pub unsafe auto trait Sync {}
ff7c6d11 59
353b0b11
FG
60 unsafe impl<T> Send for T {}
61 unsafe impl<T> Sync for T {}
ff7c6d11 62
0731742a
XL
63 use std::ops::Add;
64
416331ca 65 /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
17df50a5
XL
66 /// It has explicit ordering arguments and is only intended for use with
67 /// the native atomic types.
416331ca
XL
68 /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
69 /// as it's not intended to be used separately.
f2b60f7d 70 #[derive(Debug, Default)]
0731742a
XL
71 pub struct Atomic<T: Copy>(Cell<T>);
72
73 impl<T: Copy> Atomic<T> {
9fa01778 74 #[inline]
0731742a
XL
75 pub fn new(v: T) -> Self {
76 Atomic(Cell::new(v))
77 }
0731742a 78
416331ca 79 #[inline]
0731742a
XL
80 pub fn into_inner(self) -> T {
81 self.0.into_inner()
82 }
83
9fa01778 84 #[inline]
0731742a
XL
85 pub fn load(&self, _: Ordering) -> T {
86 self.0.get()
87 }
88
9fa01778 89 #[inline]
0731742a
XL
90 pub fn store(&self, val: T, _: Ordering) {
91 self.0.set(val)
92 }
93
416331ca 94 #[inline]
0731742a
XL
95 pub fn swap(&self, val: T, _: Ordering) -> T {
96 self.0.replace(val)
97 }
416331ca 98 }
0731742a 99
353b0b11
FG
100 impl Atomic<bool> {
101 pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
102 let result = self.0.get() | val;
103 self.0.set(val);
104 result
105 }
106 }
107
416331ca
XL
108 impl<T: Copy + PartialEq> Atomic<T> {
109 #[inline]
0731742a
XL
110 pub fn compare_exchange(&self,
111 current: T,
112 new: T,
113 _: Ordering,
114 _: Ordering)
115 -> Result<T, T> {
116 let read = self.0.get();
117 if read == current {
118 self.0.set(new);
119 Ok(read)
120 } else {
121 Err(read)
122 }
123 }
124 }
125
126 impl<T: Add<Output=T> + Copy> Atomic<T> {
416331ca 127 #[inline]
0731742a
XL
128 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
129 let old = self.0.get();
130 self.0.set(old + val);
131 old
132 }
133 }
134
135 pub type AtomicUsize = Atomic<usize>;
136 pub type AtomicBool = Atomic<bool>;
9fa01778 137 pub type AtomicU32 = Atomic<u32>;
0731742a
XL
138 pub type AtomicU64 = Atomic<u64>;
139
e74abb32
XL
140 pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
141 where A: FnOnce() -> RA,
142 B: FnOnce() -> RB
143 {
144 (oper_a(), oper_b())
145 }
146
9fa01778
XL
147 #[macro_export]
148 macro_rules! parallel {
149 ($($blocks:tt),*) => {
532ac7d7
XL
150 // We catch panics here ensuring that all the blocks execute.
151 // This makes behavior consistent with the parallel compiler.
152 let mut panic = None;
153 $(
154 if let Err(p) = ::std::panic::catch_unwind(
155 ::std::panic::AssertUnwindSafe(|| $blocks)
156 ) {
157 if panic.is_none() {
158 panic = Some(p);
159 }
160 }
161 )*
162 if let Some(panic) = panic {
163 ::std::panic::resume_unwind(panic);
164 }
9fa01778
XL
165 }
166 }
167
9c376795 168 pub use Iterator as ParallelIterator;
94b46f34
XL
169
170 pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
171 t.into_iter()
172 }
173
064997fb 174 pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
532ac7d7
XL
175 // We catch panics here ensuring that all the loop iterations execute.
176 // This makes behavior consistent with the parallel compiler.
177 let mut panic = None;
178 t.into_iter().for_each(|i| {
179 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
180 if panic.is_none() {
181 panic = Some(p);
182 }
183 }
184 });
185 if let Some(panic) = panic {
186 resume_unwind(panic);
187 }
188 }
189
353b0b11 190 pub type MetadataRef = OwnedSlice;
ff7c6d11
XL
191
192 pub use std::rc::Rc as Lrc;
94b46f34 193 pub use std::rc::Weak as Weak;
ff7c6d11 194 pub use std::cell::Ref as ReadGuard;
b7449926 195 pub use std::cell::Ref as MappedReadGuard;
ff7c6d11 196 pub use std::cell::RefMut as WriteGuard;
b7449926 197 pub use std::cell::RefMut as MappedWriteGuard;
ff7c6d11 198 pub use std::cell::RefMut as LockGuard;
b7449926 199 pub use std::cell::RefMut as MappedLockGuard;
ff7c6d11 200
923072b8 201 pub use std::cell::OnceCell;
f9f354fc 202
0531ce1d 203 use std::cell::RefCell as InnerRwLock;
ff7c6d11
XL
204 use std::cell::RefCell as InnerLock;
205
206 use std::cell::Cell;
207
94b46f34
XL
208 #[derive(Debug)]
209 pub struct WorkerLocal<T>(OneThread<T>);
210
211 impl<T> WorkerLocal<T> {
212 /// Creates a new worker local where the `initial` closure computes the
213 /// value this worker local should take for each thread in the thread pool.
214 #[inline]
215 pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
216 WorkerLocal(OneThread::new(f(0)))
217 }
218
219 /// Returns the worker-local value for each thread
220 #[inline]
221 pub fn into_inner(self) -> Vec<T> {
222 vec![OneThread::into_inner(self.0)]
223 }
224 }
225
226 impl<T> Deref for WorkerLocal<T> {
227 type Target = T;
228
229 #[inline(always)]
230 fn deref(&self) -> &T {
487cf647 231 &self.0
94b46f34
XL
232 }
233 }
234
353b0b11 235 pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
94b46f34 236
0bf4aa26 237 #[derive(Debug, Default)]
ff7c6d11
XL
238 pub struct MTLock<T>(T);
239
240 impl<T> MTLock<T> {
241 #[inline(always)]
242 pub fn new(inner: T) -> Self {
243 MTLock(inner)
244 }
245
246 #[inline(always)]
247 pub fn into_inner(self) -> T {
248 self.0
249 }
250
251 #[inline(always)]
252 pub fn get_mut(&mut self) -> &mut T {
253 &mut self.0
254 }
255
256 #[inline(always)]
257 pub fn lock(&self) -> &T {
258 &self.0
259 }
260
261 #[inline(always)]
94b46f34
XL
262 pub fn lock_mut(&mut self) -> &mut T {
263 &mut self.0
ff7c6d11
XL
264 }
265 }
266
267 // FIXME: Probably a bad idea (in the threaded case)
268 impl<T: Clone> Clone for MTLock<T> {
269 #[inline]
270 fn clone(&self) -> Self {
271 MTLock(self.0.clone())
272 }
273 }
ff7c6d11
XL
274 } else {
275 pub use std::marker::Send as Send;
276 pub use std::marker::Sync as Sync;
277
278 pub use parking_lot::RwLockReadGuard as ReadGuard;
b7449926 279 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
ff7c6d11 280 pub use parking_lot::RwLockWriteGuard as WriteGuard;
b7449926 281 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
ff7c6d11
XL
282
283 pub use parking_lot::MutexGuard as LockGuard;
b7449926 284 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
ff7c6d11 285
923072b8 286 pub use std::sync::OnceLock as OnceCell;
f9f354fc 287
9fa01778 288 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
0731742a 289
ff7c6d11 290 pub use std::sync::Arc as Lrc;
94b46f34 291 pub use std::sync::Weak as Weak;
ff7c6d11 292
353b0b11 293 pub type MTLockRef<'a, T> = &'a MTLock<T>;
94b46f34 294
0bf4aa26 295 #[derive(Debug, Default)]
94b46f34
XL
296 pub struct MTLock<T>(Lock<T>);
297
298 impl<T> MTLock<T> {
299 #[inline(always)]
300 pub fn new(inner: T) -> Self {
301 MTLock(Lock::new(inner))
302 }
303
304 #[inline(always)]
305 pub fn into_inner(self) -> T {
306 self.0.into_inner()
307 }
308
309 #[inline(always)]
310 pub fn get_mut(&mut self) -> &mut T {
311 self.0.get_mut()
312 }
313
314 #[inline(always)]
9fa01778 315 pub fn lock(&self) -> LockGuard<'_, T> {
94b46f34
XL
316 self.0.lock()
317 }
318
319 #[inline(always)]
9fa01778 320 pub fn lock_mut(&self) -> LockGuard<'_, T> {
94b46f34
XL
321 self.lock()
322 }
323 }
ff7c6d11
XL
324
325 use parking_lot::Mutex as InnerLock;
0531ce1d 326 use parking_lot::RwLock as InnerRwLock;
ff7c6d11 327
83c7162d 328 use std::thread;
94b46f34
XL
329 pub use rayon::{join, scope};
330
532ac7d7
XL
331 /// Runs a list of blocks in parallel. The first block is executed immediately on
332 /// the current thread. Use that for the longest running block.
9fa01778
XL
333 #[macro_export]
334 macro_rules! parallel {
532ac7d7
XL
335 (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
336 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
9fa01778 337 };
532ac7d7 338 (impl $fblock:tt [$($blocks:tt,)*] []) => {
9fa01778
XL
339 ::rustc_data_structures::sync::scope(|s| {
340 $(
341 s.spawn(|_| $blocks);
342 )*
532ac7d7 343 $fblock;
9fa01778
XL
344 })
345 };
532ac7d7
XL
346 ($fblock:tt, $($blocks:tt),*) => {
347 // Reverse the order of the later blocks since Rayon executes them in reverse order
9fa01778
XL
348 // when using a single thread. This ensures the execution order matches that
349 // of a single threaded rustc
532ac7d7 350 parallel!(impl $fblock [] [$($blocks),*]);
9fa01778
XL
351 };
352 }
353
94b46f34
XL
354 pub use rayon_core::WorkerLocal;
355
356 pub use rayon::iter::ParallelIterator;
357 use rayon::iter::IntoParallelIterator;
358
359 pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
360 t.into_par_iter()
361 }
83c7162d 362
532ac7d7
XL
363 pub fn par_for_each_in<T: IntoParallelIterator>(
364 t: T,
74b04a01 365 for_each: impl Fn(T::Item) + Sync + Send,
532ac7d7 366 ) {
064997fb
FG
367 let ps: Vec<_> = t.into_par_iter().map(|i| catch_unwind(AssertUnwindSafe(|| for_each(i)))).collect();
368 ps.into_iter().for_each(|p| if let Err(panic) = p {
369 resume_unwind(panic)
370 });
532ac7d7
XL
371 }
372
353b0b11 373 pub type MetadataRef = OwnedSlice;
ff7c6d11
XL
374
375 /// This makes locks panic if they are already held.
376 /// It is only useful when you are running in a single thread
377 const ERROR_CHECKING: bool = false;
ff7c6d11
XL
378 }
379}
380
381pub fn assert_sync<T: ?Sized + Sync>() {}
0731742a 382pub fn assert_send<T: ?Sized + Send>() {}
0531ce1d 383pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
ff7c6d11
XL
384pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
385
83c7162d
XL
386pub trait HashMapExt<K, V> {
387 /// Same as HashMap::insert, but it may panic if there's already an
388 /// entry for `key` with a value not equal to `value`
389 fn insert_same(&mut self, key: K, value: V);
390}
391
392impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
393 fn insert_same(&mut self, key: K, value: V) {
394 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
395 }
396}
397
ff7c6d11
XL
398#[derive(Debug)]
399pub struct Lock<T>(InnerLock<T>);
400
401impl<T> Lock<T> {
402 #[inline(always)]
403 pub fn new(inner: T) -> Self {
404 Lock(InnerLock::new(inner))
405 }
406
407 #[inline(always)]
408 pub fn into_inner(self) -> T {
409 self.0.into_inner()
410 }
411
412 #[inline(always)]
413 pub fn get_mut(&mut self) -> &mut T {
414 self.0.get_mut()
415 }
416
9fa01778 417 #[cfg(parallel_compiler)]
94b46f34 418 #[inline(always)]
9fa01778 419 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
94b46f34
XL
420 self.0.try_lock()
421 }
422
9fa01778 423 #[cfg(not(parallel_compiler))]
94b46f34 424 #[inline(always)]
9fa01778 425 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
94b46f34
XL
426 self.0.try_borrow_mut().ok()
427 }
428
9fa01778 429 #[cfg(parallel_compiler)]
ff7c6d11 430 #[inline(always)]
487cf647 431 #[track_caller]
9fa01778 432 pub fn lock(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
433 if ERROR_CHECKING {
434 self.0.try_lock().expect("lock was already held")
435 } else {
436 self.0.lock()
437 }
438 }
439
9fa01778 440 #[cfg(not(parallel_compiler))]
ff7c6d11 441 #[inline(always)]
487cf647 442 #[track_caller]
9fa01778 443 pub fn lock(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
444 self.0.borrow_mut()
445 }
446
0531ce1d 447 #[inline(always)]
487cf647 448 #[track_caller]
0531ce1d
XL
449 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
450 f(&mut *self.lock())
451 }
452
ff7c6d11 453 #[inline(always)]
487cf647 454 #[track_caller]
9fa01778 455 pub fn borrow(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
456 self.lock()
457 }
458
459 #[inline(always)]
487cf647 460 #[track_caller]
9fa01778 461 pub fn borrow_mut(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
462 self.lock()
463 }
464}
465
0531ce1d
XL
466impl<T: Default> Default for Lock<T> {
467 #[inline]
468 fn default() -> Self {
469 Lock::new(T::default())
470 }
471}
472
29967ef6 473#[derive(Debug, Default)]
0531ce1d
XL
474pub struct RwLock<T>(InnerRwLock<T>);
475
476impl<T> RwLock<T> {
477 #[inline(always)]
478 pub fn new(inner: T) -> Self {
479 RwLock(InnerRwLock::new(inner))
480 }
481
482 #[inline(always)]
483 pub fn into_inner(self) -> T {
484 self.0.into_inner()
485 }
486
487 #[inline(always)]
488 pub fn get_mut(&mut self) -> &mut T {
489 self.0.get_mut()
490 }
491
9fa01778 492 #[cfg(not(parallel_compiler))]
0531ce1d 493 #[inline(always)]
487cf647 494 #[track_caller]
9fa01778 495 pub fn read(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
496 self.0.borrow()
497 }
498
9fa01778 499 #[cfg(parallel_compiler)]
0531ce1d 500 #[inline(always)]
9fa01778 501 pub fn read(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
502 if ERROR_CHECKING {
503 self.0.try_read().expect("lock was already held")
504 } else {
505 self.0.read()
506 }
507 }
508
509 #[inline(always)]
487cf647 510 #[track_caller]
0531ce1d
XL
511 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
512 f(&*self.read())
513 }
514
9fa01778 515 #[cfg(not(parallel_compiler))]
83c7162d 516 #[inline(always)]
9fa01778 517 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
83c7162d
XL
518 self.0.try_borrow_mut().map_err(|_| ())
519 }
520
9fa01778 521 #[cfg(parallel_compiler)]
83c7162d 522 #[inline(always)]
9fa01778 523 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
83c7162d
XL
524 self.0.try_write().ok_or(())
525 }
526
9fa01778 527 #[cfg(not(parallel_compiler))]
0531ce1d 528 #[inline(always)]
487cf647 529 #[track_caller]
9fa01778 530 pub fn write(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
531 self.0.borrow_mut()
532 }
533
9fa01778 534 #[cfg(parallel_compiler)]
0531ce1d 535 #[inline(always)]
9fa01778 536 pub fn write(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
537 if ERROR_CHECKING {
538 self.0.try_write().expect("lock was already held")
539 } else {
540 self.0.write()
541 }
542 }
543
544 #[inline(always)]
487cf647 545 #[track_caller]
0531ce1d
XL
546 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
547 f(&mut *self.write())
548 }
549
550 #[inline(always)]
487cf647 551 #[track_caller]
9fa01778 552 pub fn borrow(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
553 self.read()
554 }
555
556 #[inline(always)]
487cf647 557 #[track_caller]
9fa01778 558 pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
559 self.write()
560 }
064997fb 561
064997fb
FG
562 #[cfg(not(parallel_compiler))]
563 #[inline(always)]
564 pub fn leak(&self) -> &T {
565 ReadGuard::leak(self.read())
566 }
567
568 #[cfg(parallel_compiler)]
569 #[inline(always)]
570 pub fn leak(&self) -> &T {
571 let guard = self.read();
572 let ret = unsafe { &*(&*guard as *const T) };
573 std::mem::forget(guard);
574 ret
575 }
0531ce1d
XL
576}
577
578// FIXME: Probably a bad idea
579impl<T: Clone> Clone for RwLock<T> {
580 #[inline]
581 fn clone(&self) -> Self {
582 RwLock::new(self.borrow().clone())
583 }
584}
83c7162d
XL
585
586/// A type which only allows its inner value to be used in one thread.
587/// It will panic if it is used on multiple threads.
e74abb32 588#[derive(Debug)]
83c7162d 589pub struct OneThread<T> {
9fa01778 590 #[cfg(parallel_compiler)]
83c7162d
XL
591 thread: thread::ThreadId,
592 inner: T,
593}
594
9fa01778 595#[cfg(parallel_compiler)]
83c7162d 596unsafe impl<T> std::marker::Sync for OneThread<T> {}
9fa01778 597#[cfg(parallel_compiler)]
83c7162d
XL
598unsafe impl<T> std::marker::Send for OneThread<T> {}
599
600impl<T> OneThread<T> {
601 #[inline(always)]
602 fn check(&self) {
9fa01778 603 #[cfg(parallel_compiler)]
83c7162d
XL
604 assert_eq!(thread::current().id(), self.thread);
605 }
606
607 #[inline(always)]
608 pub fn new(inner: T) -> Self {
609 OneThread {
9fa01778 610 #[cfg(parallel_compiler)]
83c7162d
XL
611 thread: thread::current().id(),
612 inner,
613 }
614 }
615
616 #[inline(always)]
617 pub fn into_inner(value: Self) -> T {
618 value.check();
619 value.inner
620 }
621}
622
623impl<T> Deref for OneThread<T> {
624 type Target = T;
625
626 fn deref(&self) -> &T {
627 self.check();
628 &self.inner
629 }
630}
631
632impl<T> DerefMut for OneThread<T> {
633 fn deref_mut(&mut self) -> &mut T {
634 self.check();
635 &mut self.inner
636 }
637}