Skip to main content

Mountain/Binary/Main/
Entry.rs

1#![allow(unused_imports)]
2
3//! # Entry (Binary/Main)
4//!
5//! ## RESPONSIBILITIES
6//!
7//! Main application entry point that orchestrates the complete application
8//! lifecycle. This function coordinates:
9//! - Tokio runtime creation and management
10//! - CLI argument parsing
11//! - Application state initialization
12//! - Tauri application builder setup
13//! - Service initialization (Vine, Cocoon, Configuration)
14//! - Graceful shutdown handling
15//!
16//! ## ARCHITECTURAL ROLE
17//!
18//! The Entry module is the **primary entry point** in Mountain's architecture:
19//!
20//! ```text
21//! main.rs ──► Binary::Main::Entry::Fn()
22//!                                    │
23//!                                    ▼
24//! AppLifecycle ──► Service Initialization ──► Tauri App Run
25//!                                           │
26//!                                           ▼
27//!                                   Graceful Shutdown
28//! ```
29//!
30//! ## KEY COMPONENTS
31//!
32//! - **Fn()**: Main entry point exported as `Binary::Main::Fn()`
33//! - Tokio runtime management
34//! - Application state initialization via StateBuild
35//! - Tauri builder configuration via TauriBuild
36//! - Service orchestration (Vine, Cocoon, Configuration)
37//! - Event-driven lifecycle management
38//!
39//! ## ERROR HANDLING
40//!
41//! - Panics on fatal errors (Tokio runtime failure, Tauri build failure)
42//! - Logs errors for service initialization failures
43//! - Graceful degradation for non-critical service failures
44//!
45//! ## LOGGING
46//!
47//! Uses the TraceStep! macro for checkpoint logging at TRACE level.
48//! Additional logging at DEBUG, INFO, WARN, and ERROR levels throughout.
49//!
50//! ## PERFORMANCE CONSIDERATIONS
51//!
52//! - Tokio multi-threaded runtime for optimal performance
53//! - Asynchronous service initialization
54//! - Lazy initialization where possible
55//!
56//! ## TODO
57//! - [ ] Add crash recovery mechanism
58//! - [ ] Implement proper error dialog for startup failures
59//! - [ ] Add startup performance metrics
60
61use std::sync::{
62	Arc,
63	atomic::{AtomicBool, Ordering},
64};
65
66use tauri::{App, Manager, RunEvent, Wry};
67use Echo::Scheduler::{Scheduler::Scheduler, SchedulerBuilder::SchedulerBuilder};
68
69use crate::dev_log;
70use crate::{
71	// Crate root imports
72	ApplicationState::State::ApplicationState::ApplicationState,
73	Binary::Build::DnsCommands::{
74		StartupTime::init_dns_startup_time,
75		dns_get_forward_allowlist::dns_get_forward_allowlist,
76		dns_get_health_status::dns_get_health_status,
77		dns_get_server_info::dns_get_server_info,
78		dns_get_zone_info::dns_get_zone_info,
79		dns_health_check::dns_health_check,
80		dns_resolve::dns_resolve,
81		dns_test_resolution::dns_test_resolution,
82	},
83	// Binary submodule imports
84	Binary::Build::LocalhostPlugin::LocalhostPlugin as LocalhostPluginFn,
85	Binary::Build::LoggingPlugin::LoggingPlugin as LoggingPluginFn,
86	Binary::Build::Scheme::{self, DnsPort, init_service_registry, land_scheme_handler, register_land_service},
87	Binary::Build::ServiceRegistry::ServiceRegistry as ServiceRegistryFn,
88	Binary::Build::TauriBuild::TauriBuild as TauriBuildFn,
89	Binary::Build::WindowBuild::WindowBuild as WindowBuildFn,
90	Binary::Extension::ExtensionPopulate::ExtensionPopulate as ExtensionPopulateFn,
91	Binary::Extension::ScanPathConfigure::ScanPathConfigure as ScanPathConfigureFn,
92	Binary::Initialize::CliParse::Parse as CliParseFn,
93	Binary::Initialize::LogLevel::Resolve as ResolveLogLevel,
94	Binary::Initialize::PortSelector::BuildUrl as BuildPortUrl,
95	Binary::Initialize::PortSelector::Select as SelectPort,
96	Binary::Initialize::StateBuild::Build as BuildStateFn,
97	Binary::Register::AdvancedFeaturesRegister::AdvancedFeaturesRegister as AdvancedFeaturesRegisterFn,
98	Binary::Register::CommandRegister::CommandRegister as CommandRegisterFn,
99	Binary::Register::IPCServerRegister::IPCServerRegister as IPCServerRegisterFn,
100	Binary::Register::StatusReporterRegister::StatusReporterRegister as StatusReporterRegisterFn,
101	Binary::Register::WindSyncRegister::WindSyncRegister as WindSyncRegisterFn,
102	Binary::Service::CocoonStart::CocoonStart as CocoonStartFn,
103	Binary::Service::ConfigurationInitialize::ConfigurationInitialize as ConfigurationInitializeFn,
104	Binary::Service::VineStart::VineStart as VineStartFn,
105	Binary::Shutdown::RuntimeShutdown::RuntimeShutdown as RuntimeShutdownFn,
106	Binary::Shutdown::SchedulerShutdown::SchedulerShutdown as SchedulerShutdownFn,
107	Command,
108	Environment::MountainEnvironment::MountainEnvironment,
109	ProcessManagement::InitializationData,
110	RunTime::ApplicationRunTime::ApplicationRunTime,
111	Track,
112};
113use super::AppLifecycle::AppLifecycleSetup;
114
115// Note: Tauri commands are used with fully qualified paths in generate_handler
116// because the __cmd_* macros generated by #[tauri::command] are module-local.
117
118/// Logs a checkpoint message at TRACE level.
119macro_rules! TraceStep {
120
121	($($arg:tt)*) => {{
122
123		dev_log!("lifecycle", $($arg)*);
124	}};
125}
126
127/// The main function that orchestrates the application lifecycle.
128///
129/// This function:
130/// 1. Creates a Tokio runtime
131/// 2. Parses CLI arguments
132/// 3. Builds application state
133/// 4. Creates a scheduler
134/// 5. Selects a port for the local server
135/// 6. Resolves the log level
136/// 7. Sets up the Tauri builder
137/// 8. Configures the application lifecycle
138/// 9. Runs the Tauri application
139/// 10. Handles graceful shutdown
140pub fn Fn() {
141	// Initialize the native keyring store (Keychain on macOS) before any
142	// code path that calls SecretProvider. keyring-core 1.0 requires an
143	// explicit store set via set_default_store() before Entry::new() can
144	// create or look up credentials. omitting this causes "No default
145	// store has been set, so cannot search or create entries" on every
146	// secrets.get call, which in turn prevents extensions (e.g. Roo Code)
147	// from completing their webview initialisation.
148	//
149	// The `not_keyutils` parameter only matters on Linux - macOS ignores
150	// it and always routes to the native Keychain.
151	match keyring::use_native_store(false) {
152		Ok(()) => dev_log!("lifecycle", "[Boot] [Keyring] Native store initialized for secret management"),
153		Err(E) => {
154			dev_log!(
155				"lifecycle",
156				"warn: [Boot] [Keyring] Failed to initialize native store ({}); secret operations will fall back to \
157				 no-op",
158				E
159			)
160		},
161	}
162
163	// Open `Mountain.dev.log` up front. Forces `InitFileSink` to create
164	// the session log header on disk before any other code can panic, so
165	// an early crash still leaves a file with a timestamp + pid + tag
166	// context for post-mortem. Env vars are read from the shell here (the
167	// `.env.Land` load below may add MORE keys but never overrides
168	// Trace / Record because `set_var` only runs when a
169	// key is currently unset). Harmless to call: the inner `OnceLock`
170	// gates repeat invocations.
171	crate::IPC::DevLog::InitEager::Fn();
172
173	// -------------------------------------------------------------------------
174	// [Boot] [Env] Enhance the process environment with the user's
175	// interactive-shell PATH / NVM_DIR / HOMEBREW_PREFIX / JAVA_HOME / …
176	// before any child process is spawned. macOS GUI launches (Finder,
177	// Dock, Spotlight, `open <bundle>.app`) start the app with a minimal
178	// env where Homebrew, NVM, and similar are not on PATH; without this
179	// step the Cocoon node binary, language servers, and `git` calls
180	// from extensions all fall back to system defaults (or fail).
181	//
182	// Skip entirely when launched from a TTY (terminal already has the
183	// right env). On macOS, `std::io::stdin().is_terminal()` is the
184	// canonical check - waits for is-terminal 0.5; in the interim,
185	// probe `TERM_PROGRAM` env var which macOS Terminal.app and iTerm2
186	// both set. `TERM=xterm-256color` alone is unreliable (pipelines
187	// set it too). No-op when skip or the shell probe fails/times out.
188	// -------------------------------------------------------------------------
189	let IsTtyLaunch =
190		std::env::var("TERM_PROGRAM").is_ok() || std::env::var("TERM").map_or(false, |V| V != "dumb" && V != "unknown");
191
192	if !IsTtyLaunch {
193		crate::Environment::Utility::EnhanceShellEnvironment::Fn();
194	}
195
196	// -------------------------------------------------------------------------
197	// [Boot] [Env] Load .env.Land into process env so standalone binary
198	// invocations pick up Product*, Tier*, Network* vars without requiring
199	// the shell to pre-source the env file. Skip when launched from a TTY
200	// (terminal already has the right env).
201	// -------------------------------------------------------------------------
202	if !IsTtyLaunch {
203		{
204			fn LoadEnvFile(Path:&std::path::Path) -> bool {
205				let Ok(Content) = std::fs::read_to_string(Path) else {
206					return false;
207				};
208
209				for Line in Content.lines() {
210					let Trimmed = Line.trim();
211
212					if Trimmed.is_empty() || Trimmed.starts_with('#') {
213						continue;
214					}
215
216					if let Some((Key, Value)) = Trimmed.split_once('=') {
217						let CleanKey = Key.trim();
218
219						let CleanValue = Value.trim().trim_matches('"').trim_matches('\'');
220
221						if std::env::var_os(CleanKey).is_none() {
222							// SAFETY: set_var is called once per key during bootstrap
223							// before any threads read env (Tokio runtime starts later
224							// in this function).
225							unsafe { std::env::set_var(CleanKey, CleanValue) };
226						}
227					}
228				}
229
230				true
231			}
232
233			let mut Candidates:Vec<std::path::PathBuf> = Vec::new();
234
235			if let Ok(Cwd) = std::env::current_dir() {
236				Candidates.push(Cwd.join(".env.Land"));
237
238				if let Some(Parent) = Cwd.parent() {
239					Candidates.push(Parent.join(".env.Land"));
240				}
241
242				Candidates.push(Cwd.join(".env.Land.Sample"));
243
244				if let Some(Parent) = Cwd.parent() {
245					Candidates.push(Parent.join(".env.Land.Sample"));
246				}
247			}
248
249			// Repo-layout probe: Target/debug/<bin> → four hops up lands at Land/.
250			if let Ok(Exe) = std::env::current_exe() {
251				let Ancestors:Vec<&std::path::Path> = Exe.ancestors().collect();
252
253				for Candidate in Ancestors.iter().take(6) {
254					Candidates.push(Candidate.join(".env.Land"));
255
256					Candidates.push(Candidate.join(".env.Land.Sample"));
257				}
258			}
259
260			let mut Loaded = false;
261
262			for Candidate in Candidates {
263				if Candidate.exists() && LoadEnvFile(&Candidate) {
264					crate::dev_log!("lifecycle", "[Boot] [Env] Loaded env from {}", Candidate.display());
265
266					Loaded = true;
267
268					break;
269				}
270			}
271
272			if !Loaded {
273				crate::dev_log!(
274					"lifecycle",
275					"[Boot] [Env] No .env.Land / .env.Land.Sample found - using defaults"
276				);
277			}
278		}
279	}
280
281	// -------------------------------------------------------------------------
282	// [Boot] [Tier] Resolved tier banner (Plan A Wave 1.7 runtime banner)
283	// -------------------------------------------------------------------------
284	crate::LandFixTier::LogResolvedTiers();
285
286	// -------------------------------------------------------------------------
287	// [Boot] [Profile] Self-report (BATCH-13 step 6)
288	//
289	// Build.sh exports `Browser`/`Mountain`/`Electron`/`Bundle`/`Compiler`/
290	// `Profile` into the shell that invokes cargo. `build.rs` captures
291	// those into `cargo:rustc-env=LAND_*` so they're baked into the binary -
292	// runtime env lookups don't survive launching the binary from Finder /
293	// another shell. `option_env!` falls back to "unknown" when the build
294	// ran outside Build.sh (e.g. plain `cargo build`).
295	// -------------------------------------------------------------------------
296	{
297		let NamedProfile = option_env!("Profile").unwrap_or("unknown");
298
299		let Workbench = option_env!("Pack").unwrap_or("Unknown");
300
301		let Bundle = option_env!("Bundle").unwrap_or("");
302
303		let Compiler = option_env!("Compiler").unwrap_or("default");
304
305		dev_log!(
306			"lifecycle",
307			"[LandFix:Profile] Active profile={} workbench={} bundle={} compiler={}",
308			NamedProfile,
309			Workbench,
310			Bundle,
311			Compiler
312		);
313	}
314
315	// -------------------------------------------------------------------------
316	// [Boot] [Runtime] Tokio runtime creation
317	// -------------------------------------------------------------------------
318	TraceStep!("[Boot] [Runtime] Building Tokio runtime...");
319
320	let Runtime = tokio::runtime::Builder::new_multi_thread()
321		.enable_all()
322		.build()
323		.expect("FATAL: Cannot build Tokio runtime.");
324
325	TraceStep!("[Boot] [Runtime] Tokio runtime built.");
326
327	Runtime.block_on(async {
328		// ---------------------------------------------------------------------
329		// [Boot] [Telemetry] Hydrate runtime env from compile-baked
330		// Constants so spawned children (Cocoon Node, Sky webview)
331		// see the same telemetry config Mountain itself was built
332		// with - even when the user invokes the bare binary without
333		// sourcing `.env.Land.PostHog`. Idempotent + debug-only.
334		// Must run BEFORE PostHogPlugin::Initialize so the client
335		// reads the same effective env as the children.
336		// ---------------------------------------------------------------------
337		crate::Binary::Build::PostHogPlugin::HydrateRuntimeEnvironment::Fn();
338
339		// ---------------------------------------------------------------------
340		// [Boot] [PostHog] Initialize telemetry client first so any
341		// error captured during the rest of boot lands in the project.
342		// No-op in release builds or when Report=false.
343		// ---------------------------------------------------------------------
344		crate::Binary::Build::PostHogPlugin::Initialize::Fn().await;
345
346		// ---------------------------------------------------------------------
347		// [Boot] [Common::Telemetry] Initialize the shared dual-pipe
348		// stack so library crates linked into Mountain (Echo, Mist,
349		// Common) emit through the same client. The HydrateRuntime
350		// Environment step above populated the env so this picks up
351		// the same Authorize/Beam/Capture values Mountain's plugin
352		// already loaded. Idempotent.
353		// ---------------------------------------------------------------------
354		CommonLibrary::Telemetry::Initialize::Fn(CommonLibrary::Telemetry::Tier::Tier::Mountain).await;
355
356		// ---------------------------------------------------------------------
357		// [Boot] [Args] CLI parsing (using CliParse module)
358		// ---------------------------------------------------------------------
359		let _WorkspaceConfigurationPath = CliParseFn();
360		let _InitialFolders:Vec<String> = vec![];
361
362		// ---------------------------------------------------------------------
363		// [Boot] [State] ApplicationState (using StateBuild module)
364		// ---------------------------------------------------------------------
365		dev_log!("lifecycle", "[Boot] [State] Building ApplicationState...");
366
367		// Create application state directly (StateBuild::Build with default config)
368		let AppState = ApplicationState::default();
369
370		// -------------------------------------------------------------------
371		// [Boot] [Workspace] Seed initial workspace folders so every extension
372		// that calls `vscode.workspace.findFiles(...)` at activation has
373		// something to walk. Precedence: --folder flags → positional dirs →
374		// Open env → CWD fallback. See
375		// CliParse::ParseWorkspaceFolders.
376		// -------------------------------------------------------------------
377		{
378			let InitialFolderPaths = crate::Binary::Initialize::CliParse::ParseWorkspaceFolders();
379			if InitialFolderPaths.is_empty() {
380				dev_log!(
381					"lifecycle",
382					"[Boot] [Workspace] No initial folders resolved - editor will open in \"no folder\" mode."
383				);
384			} else {
385				use crate::ApplicationState::DTO::WorkspaceFolderStateDTO::WorkspaceFolderStateDTO;
386				let mut Folders:Vec<WorkspaceFolderStateDTO> = Vec::new();
387				for (Index, Path) in InitialFolderPaths.iter().enumerate() {
388					let Uri = match url::Url::from_directory_path(Path) {
389						Ok(U) => U,
390						Err(()) => {
391							dev_log!(
392								"lifecycle",
393								"warn: [Boot] [Workspace] Failed to build URL for {}; skipping",
394								Path.display()
395							);
396							continue;
397						},
398					};
399					let Name = Path
400						.file_name()
401						.and_then(|N| N.to_str())
402						.map(str::to_string)
403						.unwrap_or_else(|| Path.display().to_string());
404					match WorkspaceFolderStateDTO::New(Uri, Name, Index) {
405						Ok(Dto) => Folders.push(Dto),
406						Err(Error) => {
407							dev_log!(
408								"lifecycle",
409								"warn: [Boot] [Workspace] Failed to build folder DTO for {}: {}",
410								Path.display(),
411								Error
412							);
413						},
414					}
415				}
416				if !Folders.is_empty() {
417					// Seed state directly; Cocoon is not yet spawned at this
418					// point, so there is no sidecar to notify. The initial
419					// workspace makes it to Cocoon via `InitializeExtensionHost`'s
420					// `workspace` payload during its handshake instead.
421					AppState.Workspace.SetWorkspaceFolders(Folders);
422					dev_log!(
423						"lifecycle",
424						"[Boot] [Workspace] Seeded {} workspace folder(s).",
425						InitialFolderPaths.len()
426					);
427				}
428			}
429		}
430
431		dev_log!(
432			"lifecycle",
433			"[Boot] [State] ApplicationState created with {} workspace folders.",
434			AppState.Workspace.WorkspaceFolders.lock().map(|f| f.len()).unwrap_or(0)
435		);
436
437		// Create Arc for application state to be managed by Tauri
438		let AppStateArcForClosure = Arc::new(AppState.clone());
439
440		// ---------------------------------------------------------------------
441		// [Boot] [Runtime] Scheduler handles (using RuntimeBuild module)
442		// ---------------------------------------------------------------------
443		let Scheduler = Arc::new(SchedulerBuilder::Create().Build());
444		let SchedulerForClosure = Scheduler.clone();
445		TraceStep!("[Boot] [Echo] Scheduler handles prepared.");
446
447		// ---------------------------------------------------------------------
448		// [Boot] [Localhost] Port selection (using PortSelector module)
449		// ---------------------------------------------------------------------
450		let ServerPort = SelectPort();
451		let LocalhostUrl = BuildPortUrl(ServerPort);
452
453		// ---------------------------------------------------------------------
454		// [Boot] [Logging] Log level resolution (using LogLevel module)
455		// ---------------------------------------------------------------------
456		let log_level = ResolveLogLevel();
457
458		// ---------------------------------------------------------------------
459		// [Boot] [Tauri] Builder setup (using TauriBuild module)
460		// ---------------------------------------------------------------------
461		let Builder = TauriBuildFn();
462
463		Builder
464			.plugin(LoggingPluginFn(log_level))
465			.plugin(LocalhostPluginFn(ServerPort))
466			.manage(AppStateArcForClosure.clone())
467			.setup({
468				let LocalhostUrl = LocalhostUrl.clone();
469				let ServerPortForClosure = ServerPort;
470				move |app:&mut App| {
471					dev_log!("lifecycle", "[Lifecycle] [Setup] Setup hook started.");
472					dev_log!("lifecycle", "[Lifecycle] [Setup] LocalhostUrl={}", LocalhostUrl);
473
474					// ---------------------------------------------------------
475					// [Service Registry] Initialize service registry for land:// routing
476					// ---------------------------------------------------------
477					dev_log!(
478						"lifecycle",
479						"[Lifecycle] [Setup] Initializing ServiceRegistry for land:// scheme..."
480					);
481					let service_registry = ServiceRegistryFn::new();
482					init_service_registry(service_registry.clone());
483
484					// ---------------------------------------------------------
485					// [Service Registry] Register local HTTP services
486					// ---------------------------------------------------------
487					// Register the main code editor service
488					dev_log!(
489						"lifecycle",
490						"[Lifecycle] [Setup] Registering code.editor.land service on port {}",
491						ServerPortForClosure
492					);
493					register_land_service("code.editor.land", ServerPortForClosure);
494
495					// Register API editor service (same port for now, can be separate later)
496					register_land_service("api.editor.land", ServerPortForClosure);
497
498					// Register assets editor service (same port for now, can be separate later)
499					register_land_service("assets.editor.land", ServerPortForClosure);
500
501					// Make the registry available as managed state for Tauri commands
502					app.manage(service_registry);
503					dev_log!(
504						"lifecycle",
505						"[Lifecycle] [Setup] ServiceRegistry initialized and services registered."
506					);
507
508					// ---------------------------------------------------------
509					// [DNS Server] Start the Hickory DNS server
510					// ---------------------------------------------------------
511					// The DNS server must start BEFORE any webview loads to ensure
512					// that land:// protocol_resolution is available
513					dev_log!("lifecycle", "[Lifecycle] [Setup] Starting DNS server on preferred port 5380...");
514					let dns_port = Mist::start(5380).unwrap_or_else(|e| {
515						dev_log!(
516							"lifecycle",
517							"warn: [Lifecycle] [Setup] Failed to start DNS server on port 5380: {}",
518							e
519						);
520						// Fallback to random port if preferred port fails
521						Mist::start(0).unwrap_or_else(|e| {
522							dev_log!(
523								"lifecycle",
524								"error: [Lifecycle] [Setup] Completely failed to start DNS server: {}",
525								e
526							);
527							0 // Return 0 as error indicator
528						})
529					});
530
531					if dns_port == 0 {
532						dev_log!(
533							"lifecycle",
534							"warn: [Lifecycle] [Setup] DNS server failed to start, land:// protocol will not be \
535							 available"
536						);
537					} else {
538						dev_log!(
539							"lifecycle",
540							"[Lifecycle] [Setup] DNS server started successfully on port {}",
541							dns_port
542						);
543						// Initialize DNS startup time for tracking
544						crate::Binary::Build::DnsCommands::StartupTime::init_dns_startup_time();
545					}
546
547					// Register DnsPort as managed state for Tauri commands
548					app.manage(DnsPort(dns_port));
549
550					let AppHandle = app.handle().clone();
551					TraceStep!("[Lifecycle] [Setup] AppHandle acquired.");
552
553					// ---------------------------------------------------------
554					// Setup application lifecycle through AppLifecycle module
555					// ---------------------------------------------------------
556					let AppStateArcFromClosure = AppStateArcForClosure.clone();
557
558					if let Err(e) = AppLifecycleSetup(
559						app,
560						AppHandle.clone(),
561						LocalhostUrl.clone(),
562						SchedulerForClosure.clone(),
563						AppStateArcFromClosure,
564					) {
565						dev_log!("lifecycle", "error: [Lifecycle] [Setup] Failed to setup lifecycle: {}", e);
566					}
567
568					Ok(())
569				}
570			})
571			.register_asynchronous_uri_scheme_protocol("land", |_ctx, request, responder| {
572				// Implemented: delegate to synchronous scheme handler
573				let response = crate::Binary::Build::Scheme::land_scheme_handler(&request);
574				responder.respond(response);
575			})
576			.register_asynchronous_uri_scheme_protocol("vscode-file", |ctx, request, responder| {
577				// VS Code Electron workbench uses vscode-file:// to load assets.
578				// Maps to embedded frontend assets from Sky/Target.
579				let AppHandle = ctx.app_handle().clone();
580				std::thread::spawn(move || {
581					let response = crate::Binary::Build::Scheme::VscodeFileSchemeHandler(&AppHandle, &request);
582					responder.respond(response);
583				});
584			})
585			.register_asynchronous_uri_scheme_protocol("vscode-webview", |ctx, request, responder| {
586				// VS Code's `WebviewElement` wraps every extension webview in
587				// an iframe whose `src` is `vscode-webview://<authority>/index.html?...`.
588				// Without this handler the iframe stays blank and every
589				// extension that uses `webviewView` / `WebviewPanel` /
590				// `CustomEditor` (Roo Code, Claude, GitLens, custom editors)
591				// is dead. The handler serves the three-file `pre/`
592				// directory (`index.html`, `service-worker.js`, `fake.html`);
593				// extension HTML itself comes through later via the workbench's
594				// `swMessage` postMessage channel, not this scheme.
595				let AppHandle = ctx.app_handle().clone();
596				std::thread::spawn(move || {
597					let response = crate::Binary::Build::Scheme::VscodeWebviewSchemeHandler(&AppHandle, &request);
598					responder.respond(response);
599				});
600			})
601			.register_asynchronous_uri_scheme_protocol("vscode-webview-resource", |ctx, request, responder| {
602				// `vscode-webview-resource://<auth>/<path>` is the URI shape
603				// stock VS Code emits from `webview.asWebviewUri(...)`. The
604				// service worker inside `pre/index.html` would normally
605				// intercept these and proxy through the host. Land disables
606				// that SW (see Output `PatchWebviewIframeServiceWorker`)
607				// because WKWebView refuses SW registration on the
608				// `vscode-webview://` custom protocol; instead we register
609				// this scheme directly so any extension that hard-codes
610				// the URI form (or didn't go through Cocoon's `asWebviewUri`
611				// rewrite) still resolves. Strip the `<auth>` and forward
612				// the path to `VscodeFileSchemeHandler` by rewriting the
613				// URI to `vscode-file://vscode-app/<path>`.
614				let AppHandle = ctx.app_handle().clone();
615				std::thread::spawn(move || {
616					let Original = request.uri().to_string();
617					let RewrittenUri = match Original.strip_prefix("vscode-webview-resource://") {
618						Some(After) => {
619							let Rest = After.find('/').map(|I| &After[I..]).unwrap_or("/");
620							format!("vscode-file://vscode-app{}", Rest)
621						},
622						None => "vscode-file://vscode-app/".to_string(),
623					};
624					crate::dev_log!(
625						"scheme-assets",
626						"[LandFix:VscodeWebviewResource] {} -> {}",
627						Original,
628						RewrittenUri
629					);
630					let mut Builder = tauri::http::request::Request::builder().uri(&RewrittenUri);
631					for (Name, Value) in request.headers().iter() {
632						Builder = Builder.header(Name, Value);
633					}
634					let Forwarded = Builder
635						.method(request.method().clone())
636						.body(request.body().clone())
637						.unwrap_or_else(|_| request.clone());
638					let response = crate::Binary::Build::Scheme::VscodeFileSchemeHandler(&AppHandle, &Forwarded);
639					responder.respond(response);
640				});
641			})
642			.register_asynchronous_uri_scheme_protocol("vscode-resource", |ctx, request, responder| {
643				// Legacy stock-VS Code resource scheme. Same handling as
644				// `vscode-webview-resource` - rewrite to `vscode-file://`
645				// and dispatch through the existing file handler.
646				let AppHandle = ctx.app_handle().clone();
647				std::thread::spawn(move || {
648					let Original = request.uri().to_string();
649					let RewrittenUri = match Original.strip_prefix("vscode-resource://") {
650						Some(After) => {
651							let Rest = After.find('/').map(|I| &After[I..]).unwrap_or("/");
652							format!("vscode-file://vscode-app{}", Rest)
653						},
654						None => "vscode-file://vscode-app/".to_string(),
655					};
656					crate::dev_log!("scheme-assets", "[LandFix:VscodeResource] {} -> {}", Original, RewrittenUri);
657					let mut Builder = tauri::http::request::Request::builder().uri(&RewrittenUri);
658					for (Name, Value) in request.headers().iter() {
659						Builder = Builder.header(Name, Value);
660					}
661					let Forwarded = Builder
662						.method(request.method().clone())
663						.body(request.body().clone())
664						.unwrap_or_else(|_| request.clone());
665					let response = crate::Binary::Build::Scheme::VscodeFileSchemeHandler(&AppHandle, &Forwarded);
666					responder.respond(response);
667				});
668			})
669			.plugin(tauri_plugin_dialog::init())
670			.plugin(tauri_plugin_fs::init())
671			.invoke_handler(tauri::generate_handler![
672				crate::Binary::Tray::SwitchTrayIcon::SwitchTrayIcon,
673
674				crate::Binary::IPC::WorkbenchConfigurationCommand::MountainGetWorkbenchConfiguration,
675
676				Command::TreeView::GetTreeViewChildren::GetTreeViewChildren,
677
678				Command::LanguageFeature::MountainProvideHover::MountainProvideHover,
679
680				Command::LanguageFeature::MountainProvideCompletions::MountainProvideCompletions,
681
682				Command::LanguageFeature::MountainProvideDefinition::MountainProvideDefinition,
683
684				Command::LanguageFeature::MountainProvideReferences::MountainProvideReferences,
685
686				Command::SourceControlManagement::GetAllSourceControlManagementState::GetAllSourceControlManagementState,
687
688				Command::Keybinding::GetResolvedKeybinding::GetResolvedKeybinding,
689
690				Track::FrontendCommand::DispatchFrontendCommand::DispatchFrontendCommand,
691
692				Track::UIRequest::ResolveUIRequest::ResolveUIRequest,
693
694				Track::Webview::MountainWebviewPostMessageFromGuest::MountainWebviewPostMessageFromGuest,
695
696				crate::Binary::IPC::MessageReceiveCommand::MountainIPCReceiveMessage,
697
698				crate::Binary::IPC::StatusGetCommand::MountainIPCGetStatus,
699
700				crate::Binary::IPC::InvokeCommand::MountainIPCInvoke,
701
702				crate::Binary::IPC::WindConfigurationCommand::MountainGetWindDesktopConfiguration,
703
704				crate::Binary::IPC::ConfigurationUpdateCommand::MountainUpdateConfigurationFromWind,
705
706				crate::Binary::IPC::ConfigurationSyncCommand::MountainSynchronizeConfiguration,
707
708				crate::Binary::IPC::ConfigurationStatusCommand::MountainGetConfigurationStatus,
709
710				crate::Binary::IPC::IPCStatusCommand::MountainGetIPCStatus,
711
712				crate::Binary::IPC::IPCStatusHistoryCommand::MountainGetIPCStatusHistory,
713
714				crate::Binary::IPC::IPCStatusReportingStartCommand::MountainStartIPCStatusReporting,
715
716				crate::Binary::IPC::PerformanceStatsCommand::MountainGetPerformanceStats,
717
718				crate::Binary::IPC::CacheStatsCommand::MountainGetCacheStats,
719
720				crate::Binary::IPC::CollaborationSessionCommand::MountainCreateCollaborationSession,
721
722				crate::Binary::IPC::CollaborationSessionCommand::MountainGetCollaborationSessions,
723
724				crate::Binary::IPC::DocumentSyncCommand::MountainAddDocumentForSync,
725
726				crate::Binary::IPC::DocumentSyncCommand::MountainGetSyncStatus,
727
728				crate::Binary::IPC::UpdateSubscriptionCommand::MountainSubscribeToUpdates,
729
730				crate::Binary::IPC::ConfigurationDataCommand::GetConfigurationData,
731
732				crate::Binary::IPC::ConfigurationDataCommand::SaveConfigurationData,
733
734				crate::Binary::IPC::WorkspaceFolderCommand::MountainWorkspaceOpenFolder,
735
736				crate::Binary::IPC::WorkspaceFolderCommand::MountainWorkspaceListFolders,
737
738				crate::Binary::IPC::WorkspaceFolderCommand::MountainWorkspaceCloseAllFolders,
739
740				crate::Binary::Build::DnsCommands::dns_get_server_info::dns_get_server_info,
741
742				crate::Binary::Build::DnsCommands::dns_get_zone_info::dns_get_zone_info,
743
744				crate::Binary::Build::DnsCommands::dns_get_forward_allowlist::dns_get_forward_allowlist,
745
746				crate::Binary::Build::DnsCommands::dns_get_health_status::dns_get_health_status,
747
748				crate::Binary::Build::DnsCommands::dns_resolve::dns_resolve,
749
750				crate::Binary::Build::DnsCommands::dns_test_resolution::dns_test_resolution,
751
752				crate::Binary::Build::DnsCommands::dns_health_check::dns_health_check,
753
754				// Process commands (direct Tauri invoke from ProcessPolyfill)
755				crate::Binary::IPC::ProcessCommand::process_get_exec_path::process_get_exec_path,
756
757				crate::Binary::IPC::ProcessCommand::process_get_platform::process_get_platform,
758
759				crate::Binary::IPC::ProcessCommand::process_get_arch::process_get_arch,
760
761				crate::Binary::IPC::ProcessCommand::process_get_pid::process_get_pid,
762
763				crate::Binary::IPC::ProcessCommand::process_get_shell_env::process_get_shell_env,
764
765				crate::Binary::IPC::ProcessCommand::process_get_memory_info::process_get_memory_info,
766
767				// Health check commands (direct Tauri invoke from SharedProcessProxy)
768				crate::Binary::IPC::HealthCommand::cocoon_extension_host_health::cocoon_extension_host_health,
769
770				crate::Binary::IPC::HealthCommand::cocoon_search_service_health::cocoon_search_service_health,
771
772				crate::Binary::IPC::HealthCommand::cocoon_debug_service_health::cocoon_debug_service_health,
773
774				crate::Binary::IPC::HealthCommand::shared_process_service_health::shared_process_service_health,
775
776				crate::Binary::IPC::RenderDevLogCommand::RenderDevLog,
777
778				// LAND-PATCH B7-S6 P14.5: Vine notification broadcast
779				// subscription. `vine_subscribe_notifications` opens a
780				// Tauri Channel that drains the process-wide
781				// `Vine::Client` broadcast into the webview; Effect-TS
782				// `VineNotificationsLive` Layer wraps it as a
783				// `Stream<NotificationFrame>`. `vine_subscriber_count`
784				// is a diagnostic for verifying registrations didn't
785				// leak across reloads.
786				crate::Binary::IPC::VineSubscribeCommand::vine_subscribe_notifications,
787
788				crate::Binary::IPC::VineSubscribeCommand::vine_subscriber_count,
789			])
790			.build(tauri::generate_context!())
791			.expect("FATAL: Error while building Mountain Tauri application")
792			.run(move |app_handle:&tauri::AppHandle, event:tauri::RunEvent| {
793				// Debug-only: log selected lifecycle events
794				if cfg!(debug_assertions) {
795					match &event {
796						RunEvent::MainEventsCleared => {},
797						RunEvent::WindowEvent { .. } => {},
798						_ => dev_log!("lifecycle", "[Lifecycle] [RunEvent] {:?}", event),
799					}
800				}
801
802				if let RunEvent::ExitRequested { api, .. } = event {
803					// Shutdown runs once. The graceful path ends with
804					// `app_handle.exit(0)`, which Tauri re-delivers as a
805					// second `ExitRequested { code: Some(0) }`. On re-entry
806					// we must NOT `prevent_exit` or spawn the shutdown task
807					// again - Cocoon has already been SIGKILLed and the
808					// second pass would log spurious "tcp connect error"
809					// warnings trying to notify a dead sidecar.
810					static SHUTTING_DOWN:AtomicBool = AtomicBool::new(false);
811					if SHUTTING_DOWN.swap(true, Ordering::SeqCst) {
812						return;
813					}
814
815					dev_log!(
816						"lifecycle",
817						"warn: [Lifecycle] [Shutdown] Exit requested. Starting graceful shutdown..."
818					);
819					api.prevent_exit();
820
821					let SchedulerHandle = Scheduler.clone();
822					let app_handle_clone = app_handle.clone();
823
824					tokio::spawn(async move {
825						dev_log!("lifecycle", "[Lifecycle] [Shutdown] Shutting down ApplicationRunTime...");
826						let _ = RuntimeShutdownFn(&app_handle_clone).await;
827
828						dev_log!("lifecycle", "[Lifecycle] [Shutdown] Stopping Echo scheduler...");
829						let _ = SchedulerShutdownFn(SchedulerHandle).await;
830
831						dev_log!("lifecycle", "[Lifecycle] [Shutdown] Done. Exiting process.");
832						app_handle_clone.exit(0);
833					});
834				}
835			});
836
837		dev_log!("lifecycle", "[Lifecycle] [Exit] Mountain application has shut down.");
838	});
839}