]>
Commit | Line | Data |
---|---|---|
064997fb FG |
1 | //! The context or environment in which the language server functions. In our |
2 | //! server implementation this is know as the `WorldState`. | |
3 | //! | |
4 | //! Each tick provides an immutable snapshot of the state as `WorldSnapshot`. | |
5 | ||
6 | use std::{sync::Arc, time::Instant}; | |
7 | ||
8 | use crossbeam_channel::{unbounded, Receiver, Sender}; | |
9 | use flycheck::FlycheckHandle; | |
10 | use ide::{Analysis, AnalysisHost, Cancellable, Change, FileId}; | |
11 | use ide_db::base_db::{CrateId, FileLoader, SourceDatabase}; | |
12 | use lsp_types::{SemanticTokens, Url}; | |
13 | use parking_lot::{Mutex, RwLock}; | |
14 | use proc_macro_api::ProcMacroServer; | |
15 | use project_model::{CargoWorkspace, ProjectWorkspace, Target, WorkspaceBuildScripts}; | |
16 | use rustc_hash::FxHashMap; | |
f2b60f7d | 17 | use stdx::hash::NoHashHashMap; |
064997fb FG |
18 | use vfs::AnchoredPathBuf; |
19 | ||
20 | use crate::{ | |
21 | config::Config, | |
22 | diagnostics::{CheckFixes, DiagnosticCollection}, | |
23 | from_proto, | |
24 | line_index::{LineEndings, LineIndex}, | |
25 | lsp_ext, | |
26 | main_loop::Task, | |
27 | mem_docs::MemDocs, | |
28 | op_queue::OpQueue, | |
29 | reload::{self, SourceRootConfig}, | |
30 | task_pool::TaskPool, | |
31 | to_proto::url_from_abs_path, | |
32 | Result, | |
33 | }; | |
34 | ||
35 | // Enforces drop order | |
36 | pub(crate) struct Handle<H, C> { | |
37 | pub(crate) handle: H, | |
38 | pub(crate) receiver: C, | |
39 | } | |
40 | ||
41 | pub(crate) type ReqHandler = fn(&mut GlobalState, lsp_server::Response); | |
42 | pub(crate) type ReqQueue = lsp_server::ReqQueue<(String, Instant), ReqHandler>; | |
43 | ||
44 | /// `GlobalState` is the primary mutable state of the language server | |
45 | /// | |
46 | /// The most interesting components are `vfs`, which stores a consistent | |
47 | /// snapshot of the file systems, and `analysis_host`, which stores our | |
48 | /// incremental salsa database. | |
49 | /// | |
50 | /// Note that this struct has more than one impl in various modules! | |
51 | pub(crate) struct GlobalState { | |
52 | sender: Sender<lsp_server::Message>, | |
53 | req_queue: ReqQueue, | |
54 | pub(crate) task_pool: Handle<TaskPool<Task>, Receiver<Task>>, | |
55 | pub(crate) loader: Handle<Box<dyn vfs::loader::Handle>, Receiver<vfs::loader::Message>>, | |
56 | pub(crate) config: Arc<Config>, | |
57 | pub(crate) analysis_host: AnalysisHost, | |
58 | pub(crate) diagnostics: DiagnosticCollection, | |
59 | pub(crate) mem_docs: MemDocs, | |
60 | pub(crate) semantic_tokens_cache: Arc<Mutex<FxHashMap<Url, SemanticTokens>>>, | |
61 | pub(crate) shutdown_requested: bool, | |
62 | pub(crate) proc_macro_changed: bool, | |
63 | pub(crate) last_reported_status: Option<lsp_ext::ServerStatusParams>, | |
64 | pub(crate) source_root_config: SourceRootConfig, | |
65 | pub(crate) proc_macro_clients: Vec<Result<ProcMacroServer, String>>, | |
66 | ||
2b03887a | 67 | pub(crate) flycheck: Arc<[FlycheckHandle]>, |
064997fb FG |
68 | pub(crate) flycheck_sender: Sender<flycheck::Message>, |
69 | pub(crate) flycheck_receiver: Receiver<flycheck::Message>, | |
70 | ||
f2b60f7d | 71 | pub(crate) vfs: Arc<RwLock<(vfs::Vfs, NoHashHashMap<FileId, LineEndings>)>>, |
064997fb FG |
72 | pub(crate) vfs_config_version: u32, |
73 | pub(crate) vfs_progress_config_version: u32, | |
74 | pub(crate) vfs_progress_n_total: usize, | |
75 | pub(crate) vfs_progress_n_done: usize, | |
76 | ||
77 | /// `workspaces` field stores the data we actually use, while the `OpQueue` | |
78 | /// stores the result of the last fetch. | |
79 | /// | |
80 | /// If the fetch (partially) fails, we do not update the current value. | |
81 | /// | |
82 | /// The handling of build data is subtle. We fetch workspace in two phases: | |
83 | /// | |
84 | /// *First*, we run `cargo metadata`, which gives us fast results for | |
85 | /// initial analysis. | |
86 | /// | |
87 | /// *Second*, we run `cargo check` which runs build scripts and compiles | |
88 | /// proc macros. | |
89 | /// | |
90 | /// We need both for the precise analysis, but we want rust-analyzer to be | |
91 | /// at least partially available just after the first phase. That's because | |
92 | /// first phase is much faster, and is much less likely to fail. | |
93 | /// | |
94 | /// This creates a complication -- by the time the second phase completes, | |
95 | /// the results of the fist phase could be invalid. That is, while we run | |
96 | /// `cargo check`, the user edits `Cargo.toml`, we notice this, and the new | |
97 | /// `cargo metadata` completes before `cargo check`. | |
98 | /// | |
99 | /// An additional complication is that we want to avoid needless work. When | |
100 | /// the user just adds comments or whitespace to Cargo.toml, we do not want | |
101 | /// to invalidate any salsa caches. | |
102 | pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>, | |
487cf647 | 103 | pub(crate) fetch_workspaces_queue: OpQueue<Option<Vec<anyhow::Result<ProjectWorkspace>>>>, |
064997fb FG |
104 | pub(crate) fetch_build_data_queue: |
105 | OpQueue<(Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>)>, | |
106 | ||
107 | pub(crate) prime_caches_queue: OpQueue<()>, | |
108 | } | |
109 | ||
110 | /// An immutable snapshot of the world's state at a point in time. | |
111 | pub(crate) struct GlobalStateSnapshot { | |
112 | pub(crate) config: Arc<Config>, | |
113 | pub(crate) analysis: Analysis, | |
114 | pub(crate) check_fixes: CheckFixes, | |
115 | mem_docs: MemDocs, | |
116 | pub(crate) semantic_tokens_cache: Arc<Mutex<FxHashMap<Url, SemanticTokens>>>, | |
f2b60f7d | 117 | vfs: Arc<RwLock<(vfs::Vfs, NoHashHashMap<FileId, LineEndings>)>>, |
064997fb | 118 | pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>, |
f2b60f7d | 119 | pub(crate) proc_macros_loaded: bool, |
2b03887a | 120 | pub(crate) flycheck: Arc<[FlycheckHandle]>, |
064997fb FG |
121 | } |
122 | ||
123 | impl std::panic::UnwindSafe for GlobalStateSnapshot {} | |
124 | ||
125 | impl GlobalState { | |
126 | pub(crate) fn new(sender: Sender<lsp_server::Message>, config: Config) -> GlobalState { | |
127 | let loader = { | |
128 | let (sender, receiver) = unbounded::<vfs::loader::Message>(); | |
129 | let handle: vfs_notify::NotifyHandle = | |
130 | vfs::loader::Handle::spawn(Box::new(move |msg| sender.send(msg).unwrap())); | |
131 | let handle = Box::new(handle) as Box<dyn vfs::loader::Handle>; | |
132 | Handle { handle, receiver } | |
133 | }; | |
134 | ||
135 | let task_pool = { | |
136 | let (sender, receiver) = unbounded(); | |
9c376795 | 137 | let handle = TaskPool::new_with_threads(sender, config.main_loop_num_threads()); |
064997fb FG |
138 | Handle { handle, receiver } |
139 | }; | |
140 | ||
141 | let analysis_host = AnalysisHost::new(config.lru_capacity()); | |
142 | let (flycheck_sender, flycheck_receiver) = unbounded(); | |
143 | let mut this = GlobalState { | |
144 | sender, | |
145 | req_queue: ReqQueue::default(), | |
146 | task_pool, | |
147 | loader, | |
148 | config: Arc::new(config.clone()), | |
149 | analysis_host, | |
150 | diagnostics: Default::default(), | |
151 | mem_docs: MemDocs::default(), | |
152 | semantic_tokens_cache: Arc::new(Default::default()), | |
153 | shutdown_requested: false, | |
154 | proc_macro_changed: false, | |
155 | last_reported_status: None, | |
156 | source_root_config: SourceRootConfig::default(), | |
157 | proc_macro_clients: vec![], | |
158 | ||
2b03887a | 159 | flycheck: Arc::new([]), |
064997fb FG |
160 | flycheck_sender, |
161 | flycheck_receiver, | |
162 | ||
f2b60f7d | 163 | vfs: Arc::new(RwLock::new((vfs::Vfs::default(), NoHashHashMap::default()))), |
064997fb FG |
164 | vfs_config_version: 0, |
165 | vfs_progress_config_version: 0, | |
166 | vfs_progress_n_total: 0, | |
167 | vfs_progress_n_done: 0, | |
168 | ||
169 | workspaces: Arc::new(Vec::new()), | |
170 | fetch_workspaces_queue: OpQueue::default(), | |
171 | prime_caches_queue: OpQueue::default(), | |
172 | ||
173 | fetch_build_data_queue: OpQueue::default(), | |
174 | }; | |
175 | // Apply any required database inputs from the config. | |
176 | this.update_configuration(config); | |
177 | this | |
178 | } | |
179 | ||
180 | pub(crate) fn process_changes(&mut self) -> bool { | |
181 | let _p = profile::span("GlobalState::process_changes"); | |
f2b60f7d | 182 | let mut workspace_structure_change = None; |
064997fb | 183 | |
9ffffee4 | 184 | let mut file_changes = FxHashMap::default(); |
064997fb FG |
185 | let (change, changed_files) = { |
186 | let mut change = Change::new(); | |
187 | let (vfs, line_endings_map) = &mut *self.vfs.write(); | |
2b03887a | 188 | let mut changed_files = vfs.take_changes(); |
064997fb FG |
189 | if changed_files.is_empty() { |
190 | return false; | |
191 | } | |
192 | ||
9ffffee4 FG |
193 | // We need to fix up the changed events a bit. If we have a create or modify for a file |
194 | // id that is followed by a delete we actually skip observing the file text from the | |
195 | // earlier event, to avoid problems later on. | |
196 | for changed_file in &changed_files { | |
2b03887a FG |
197 | use vfs::ChangeKind::*; |
198 | ||
9ffffee4 FG |
199 | file_changes |
200 | .entry(changed_file.file_id) | |
201 | .and_modify(|(change, just_created)| { | |
202 | // None -> Delete => keep | |
203 | // Create -> Delete => collapse | |
204 | // | |
205 | match (change, just_created, changed_file.change_kind) { | |
206 | // latter `Delete` wins | |
207 | (change, _, Delete) => *change = Delete, | |
208 | // merge `Create` with `Create` or `Modify` | |
209 | (Create, _, Create | Modify) => {} | |
210 | // collapse identical `Modify`es | |
211 | (Modify, _, Modify) => {} | |
212 | // equivalent to `Modify` | |
213 | (change @ Delete, just_created, Create) => { | |
214 | *change = Modify; | |
215 | *just_created = true; | |
216 | } | |
217 | // shouldn't occur, but collapse into `Create` | |
218 | (change @ Delete, just_created, Modify) => { | |
219 | *change = Create; | |
220 | *just_created = true; | |
221 | } | |
222 | // shouldn't occur, but collapse into `Modify` | |
223 | (Modify, _, Create) => {} | |
224 | } | |
225 | }) | |
226 | .or_insert(( | |
227 | changed_file.change_kind, | |
228 | matches!(changed_file.change_kind, Create), | |
229 | )); | |
230 | } | |
2b03887a | 231 | |
9ffffee4 FG |
232 | changed_files.extend( |
233 | file_changes | |
234 | .into_iter() | |
235 | .filter(|(_, (change_kind, just_created))| { | |
236 | !matches!((change_kind, just_created), (vfs::ChangeKind::Delete, true)) | |
237 | }) | |
238 | .map(|(file_id, (change_kind, _))| vfs::ChangedFile { file_id, change_kind }), | |
239 | ); | |
240 | ||
241 | // A file was added or deleted | |
242 | let mut has_structure_changes = false; | |
064997fb FG |
243 | for file in &changed_files { |
244 | if let Some(path) = vfs.file_path(file.file_id).as_path() { | |
245 | let path = path.to_path_buf(); | |
246 | if reload::should_refresh_for_change(&path, file.change_kind) { | |
f2b60f7d | 247 | workspace_structure_change = Some(path); |
064997fb | 248 | } |
064997fb FG |
249 | if file.is_created_or_deleted() { |
250 | has_structure_changes = true; | |
251 | } | |
252 | } | |
253 | ||
f2b60f7d | 254 | // Clear native diagnostics when their file gets deleted |
064997fb FG |
255 | if !file.exists() { |
256 | self.diagnostics.clear_native_for(file.file_id); | |
257 | } | |
258 | ||
259 | let text = if file.exists() { | |
260 | let bytes = vfs.file_contents(file.file_id).to_vec(); | |
261 | String::from_utf8(bytes).ok().and_then(|text| { | |
262 | let (text, line_endings) = LineEndings::normalize(text); | |
263 | line_endings_map.insert(file.file_id, line_endings); | |
264 | Some(Arc::new(text)) | |
265 | }) | |
266 | } else { | |
267 | None | |
268 | }; | |
269 | change.change_file(file.file_id, text); | |
270 | } | |
271 | if has_structure_changes { | |
272 | let roots = self.source_root_config.partition(vfs); | |
273 | change.set_roots(roots); | |
274 | } | |
275 | (change, changed_files) | |
276 | }; | |
277 | ||
278 | self.analysis_host.apply_change(change); | |
279 | ||
f2b60f7d FG |
280 | { |
281 | let raw_database = self.analysis_host.raw_database(); | |
282 | // FIXME: ideally we should only trigger a workspace fetch for non-library changes | |
283 | // but somethings going wrong with the source root business when we add a new local | |
284 | // crate see https://github.com/rust-lang/rust-analyzer/issues/13029 | |
285 | if let Some(path) = workspace_structure_change { | |
286 | self.fetch_workspaces_queue | |
287 | .request_op(format!("workspace vfs file change: {}", path.display())); | |
288 | } | |
289 | self.proc_macro_changed = | |
290 | changed_files.iter().filter(|file| !file.is_created_or_deleted()).any(|file| { | |
291 | let crates = raw_database.relevant_crates(file.file_id); | |
292 | let crate_graph = raw_database.crate_graph(); | |
293 | ||
294 | crates.iter().any(|&krate| crate_graph[krate].is_proc_macro) | |
295 | }); | |
296 | } | |
064997fb | 297 | |
064997fb FG |
298 | true |
299 | } | |
300 | ||
301 | pub(crate) fn snapshot(&self) -> GlobalStateSnapshot { | |
302 | GlobalStateSnapshot { | |
303 | config: Arc::clone(&self.config), | |
304 | workspaces: Arc::clone(&self.workspaces), | |
305 | analysis: self.analysis_host.analysis(), | |
306 | vfs: Arc::clone(&self.vfs), | |
307 | check_fixes: Arc::clone(&self.diagnostics.check_fixes), | |
308 | mem_docs: self.mem_docs.clone(), | |
309 | semantic_tokens_cache: Arc::clone(&self.semantic_tokens_cache), | |
f2b60f7d | 310 | proc_macros_loaded: !self.fetch_build_data_queue.last_op_result().0.is_empty(), |
2b03887a | 311 | flycheck: self.flycheck.clone(), |
064997fb FG |
312 | } |
313 | } | |
314 | ||
315 | pub(crate) fn send_request<R: lsp_types::request::Request>( | |
316 | &mut self, | |
317 | params: R::Params, | |
318 | handler: ReqHandler, | |
319 | ) { | |
320 | let request = self.req_queue.outgoing.register(R::METHOD.to_string(), params, handler); | |
321 | self.send(request.into()); | |
322 | } | |
323 | ||
324 | pub(crate) fn complete_request(&mut self, response: lsp_server::Response) { | |
325 | let handler = self | |
326 | .req_queue | |
327 | .outgoing | |
328 | .complete(response.id.clone()) | |
329 | .expect("received response for unknown request"); | |
330 | handler(self, response) | |
331 | } | |
332 | ||
333 | pub(crate) fn send_notification<N: lsp_types::notification::Notification>( | |
334 | &mut self, | |
335 | params: N::Params, | |
336 | ) { | |
337 | let not = lsp_server::Notification::new(N::METHOD.to_string(), params); | |
338 | self.send(not.into()); | |
339 | } | |
340 | ||
341 | pub(crate) fn register_request( | |
342 | &mut self, | |
343 | request: &lsp_server::Request, | |
344 | request_received: Instant, | |
345 | ) { | |
346 | self.req_queue | |
347 | .incoming | |
348 | .register(request.id.clone(), (request.method.clone(), request_received)); | |
349 | } | |
350 | ||
351 | pub(crate) fn respond(&mut self, response: lsp_server::Response) { | |
352 | if let Some((method, start)) = self.req_queue.incoming.complete(response.id.clone()) { | |
353 | if let Some(err) = &response.error { | |
354 | if err.message.starts_with("server panicked") { | |
355 | self.poke_rust_analyzer_developer(format!("{}, check the log", err.message)) | |
356 | } | |
357 | } | |
358 | ||
359 | let duration = start.elapsed(); | |
360 | tracing::debug!("handled {} - ({}) in {:0.2?}", method, response.id, duration); | |
361 | self.send(response.into()); | |
362 | } | |
363 | } | |
364 | ||
365 | pub(crate) fn cancel(&mut self, request_id: lsp_server::RequestId) { | |
366 | if let Some(response) = self.req_queue.incoming.cancel(request_id) { | |
367 | self.send(response.into()); | |
368 | } | |
369 | } | |
370 | ||
2b03887a FG |
371 | pub(crate) fn is_completed(&self, request: &lsp_server::Request) -> bool { |
372 | self.req_queue.incoming.is_completed(&request.id) | |
373 | } | |
374 | ||
064997fb FG |
375 | fn send(&mut self, message: lsp_server::Message) { |
376 | self.sender.send(message).unwrap() | |
377 | } | |
378 | } | |
379 | ||
380 | impl Drop for GlobalState { | |
381 | fn drop(&mut self) { | |
382 | self.analysis_host.request_cancellation(); | |
383 | } | |
384 | } | |
385 | ||
386 | impl GlobalStateSnapshot { | |
387 | pub(crate) fn url_to_file_id(&self, url: &Url) -> Result<FileId> { | |
388 | url_to_file_id(&self.vfs.read().0, url) | |
389 | } | |
390 | ||
391 | pub(crate) fn file_id_to_url(&self, id: FileId) -> Url { | |
392 | file_id_to_url(&self.vfs.read().0, id) | |
393 | } | |
394 | ||
395 | pub(crate) fn file_line_index(&self, file_id: FileId) -> Cancellable<LineIndex> { | |
396 | let endings = self.vfs.read().1[&file_id]; | |
397 | let index = self.analysis.file_line_index(file_id)?; | |
487cf647 | 398 | let res = LineIndex { index, endings, encoding: self.config.position_encoding() }; |
064997fb FG |
399 | Ok(res) |
400 | } | |
401 | ||
402 | pub(crate) fn url_file_version(&self, url: &Url) -> Option<i32> { | |
403 | let path = from_proto::vfs_path(url).ok()?; | |
404 | Some(self.mem_docs.get(&path)?.version) | |
405 | } | |
406 | ||
407 | pub(crate) fn anchored_path(&self, path: &AnchoredPathBuf) -> Url { | |
408 | let mut base = self.vfs.read().0.file_path(path.anchor); | |
409 | base.pop(); | |
410 | let path = base.join(&path.path).unwrap(); | |
411 | let path = path.as_path().unwrap(); | |
412 | url_from_abs_path(path) | |
413 | } | |
414 | ||
2b03887a FG |
415 | pub(crate) fn file_id_to_file_path(&self, file_id: FileId) -> vfs::VfsPath { |
416 | self.vfs.read().0.file_path(file_id) | |
417 | } | |
418 | ||
064997fb FG |
419 | pub(crate) fn cargo_target_for_crate_root( |
420 | &self, | |
421 | crate_id: CrateId, | |
422 | ) -> Option<(&CargoWorkspace, Target)> { | |
423 | let file_id = self.analysis.crate_root(crate_id).ok()?; | |
424 | let path = self.vfs.read().0.file_path(file_id); | |
425 | let path = path.as_path()?; | |
426 | self.workspaces.iter().find_map(|ws| match ws { | |
427 | ProjectWorkspace::Cargo { cargo, .. } => { | |
428 | cargo.target_by_root(path).map(|it| (cargo, it)) | |
429 | } | |
430 | ProjectWorkspace::Json { .. } => None, | |
431 | ProjectWorkspace::DetachedFiles { .. } => None, | |
432 | }) | |
433 | } | |
434 | } | |
435 | ||
436 | pub(crate) fn file_id_to_url(vfs: &vfs::Vfs, id: FileId) -> Url { | |
437 | let path = vfs.file_path(id); | |
438 | let path = path.as_path().unwrap(); | |
439 | url_from_abs_path(path) | |
440 | } | |
441 | ||
442 | pub(crate) fn url_to_file_id(vfs: &vfs::Vfs, url: &Url) -> Result<FileId> { | |
443 | let path = from_proto::vfs_path(url)?; | |
9c376795 | 444 | let res = vfs.file_id(&path).ok_or_else(|| format!("file not found: {path}"))?; |
064997fb FG |
445 | Ok(res) |
446 | } |