Skip to main content

Mountain/IPC/StatusReporter/
Reporter.rs

1#![allow(non_snake_case)]
2
3//! `StatusReporter` aggregator - holds the IPC server handle,
4//! status history ring (last 100), error counter, performance
5//! / health / service-registry shared state, and emits
6//! periodic snapshots to Sky.
7//!
8//! The struct + 30-method impl live in one file because the
9//! method bodies are tightly coupled with the private fields
10//! and with the DTO siblings; splitting per-method forces
11//! ~30 trivial wrappers without payback.
12
13use std::{
14	collections::{HashMap, HashSet},
15	sync::{Arc, Mutex},
16	time::{Duration, SystemTime},
17};
18
19use tauri::Emitter;
20use tokio::sync::RwLock;
21
22use crate::{
23	IPC::StatusReporter::{
24		ComprehensiveStatusReport::Struct as ComprehensiveStatusReport,
25		ConnectionStatus::Struct as ConnectionStatus,
26		HealthIssue::Struct as HealthIssue,
27		HealthIssueType::Enum as HealthIssueType,
28		HealthMonitor::Struct as HealthMonitor,
29		IPCStatusReport::Struct as IPCStatusReport,
30		MessageStats::Struct as MessageStats,
31		PerformanceMetrics::Struct as PerformanceMetrics,
32		ServiceInfo::Struct as ServiceInfo,
33		ServiceMetrics::Struct as ServiceMetrics,
34		ServiceRegistry::Struct as ServiceRegistry,
35		ServiceStatus::Enum as ServiceStatus,
36		SeverityLevel::Enum as SeverityLevel,
37	},
38	RunTime::ApplicationRunTime::ApplicationRunTime,
39	dev_log,
40};
41
42pub struct Struct {
43	pub(super) runtime:Arc<ApplicationRunTime>,
44
45	pub(super) ipc_server:Option<Arc<crate::IPC::TauriIPCServer_Old::TauriIPCServer>>,
46
47	pub(super) status_history:Arc<Mutex<Vec<IPCStatusReport>>>,
48
49	pub(super) start_time:SystemTime,
50
51	pub(super) error_count:Arc<Mutex<u32>>,
52
53	pub(super) performance_metrics:Arc<Mutex<PerformanceMetrics>>,
54
55	pub(super) health_monitor:Arc<Mutex<HealthMonitor>>,
56
57	pub(super) service_registry:Arc<RwLock<ServiceRegistry>>,
58
59	pub(super) discovered_services:Arc<RwLock<HashSet<String>>>,
60}
61
62impl Struct {
63	pub fn new(runtime:Arc<ApplicationRunTime>) -> Self {
64		dev_log!("lifecycle", "Creating IPC status reporter");
65
66		Self {
67			runtime,
68
69			ipc_server:None,
70
71			status_history:Arc::new(Mutex::new(Vec::new())),
72
73			start_time:SystemTime::now(),
74
75			error_count:Arc::new(Mutex::new(0)),
76
77			performance_metrics:Arc::new(Mutex::new(PerformanceMetrics {
78				messages_per_second:0.0,
79				average_latency_ms:0.0,
80				peak_latency_ms:0.0,
81				compression_ratio:1.0,
82				connection_pool_utilization:0.0,
83				memory_usage_mb:0.0,
84				cpu_usage_percent:0.0,
85				last_update:SystemTime::now()
86					.duration_since(SystemTime::UNIX_EPOCH)
87					.unwrap_or_default()
88					.as_millis() as u64,
89			})),
90
91			health_monitor:Arc::new(Mutex::new(HealthMonitor {
92				health_score:100.0,
93				last_health_check:SystemTime::now()
94					.duration_since(SystemTime::UNIX_EPOCH)
95					.unwrap_or_default()
96					.as_millis() as u64,
97				issues_detected:Vec::new(),
98				recovery_attempts:0,
99			})),
100
101			service_registry:Arc::new(RwLock::new(ServiceRegistry {
102				services:HashMap::new(),
103				last_discovery:SystemTime::now()
104					.duration_since(SystemTime::UNIX_EPOCH)
105					.unwrap_or_default()
106					.as_millis() as u64,
107				discovery_interval:30000,
108			})),
109
110			discovered_services:Arc::new(RwLock::new(HashSet::new())),
111		}
112	}
113
114	pub fn set_ipc_server(&mut self, ipc_server:Arc<crate::IPC::TauriIPCServer_Old::TauriIPCServer>) {
115		self.ipc_server = Some(ipc_server);
116	}
117
118	pub async fn generate_status_report(&self) -> Result<IPCStatusReport, String> {
119		dev_log!("lifecycle", "Generating IPC status report");
120
121		let ipc_server = self.ipc_server.as_ref().ok_or("IPC Server not set".to_string())?;
122
123		let connection_status = ConnectionStatus {
124			is_connected:ipc_server.get_connection_status()?,
125
126			last_heartbeat:SystemTime::now()
127				.duration_since(SystemTime::UNIX_EPOCH)
128				.unwrap_or_default()
129				.as_secs(),
130
131			connection_duration:SystemTime::now().duration_since(self.start_time).unwrap_or_default().as_secs(),
132		};
133
134		let message_queue_size = ipc_server.get_queue_size()?;
135
136		let active_listeners = vec!["configuration".to_string(), "file".to_string(), "storage".to_string()];
137
138		let recent_messages = vec![
139			MessageStats {
140				channel:"configuration".to_string(),
141
142				message_count:10,
143
144				last_message_time:SystemTime::now()
145					.duration_since(SystemTime::UNIX_EPOCH)
146					.unwrap_or_default()
147					.as_secs(),
148
149				average_processing_time_ms:5.0,
150			},
151			MessageStats {
152				channel:"file".to_string(),
153
154				message_count:5,
155
156				last_message_time:SystemTime::now()
157					.duration_since(SystemTime::UNIX_EPOCH)
158					.unwrap_or_default()
159					.as_secs() - 10,
160
161				average_processing_time_ms:15.0,
162			},
163		];
164
165		let error_count = {
166			let guard = self
167				.error_count
168				.lock()
169				.map_err(|e| format!("Failed to get error count: {}", e))?;
170
171			*guard
172		};
173
174		let uptime_seconds = SystemTime::now().duration_since(self.start_time).unwrap_or_default().as_secs();
175
176		let report = IPCStatusReport {
177			timestamp:SystemTime::now()
178				.duration_since(SystemTime::UNIX_EPOCH)
179				.unwrap_or_default()
180				.as_millis() as u64,
181
182			connection_status,
183
184			message_queue_size,
185
186			active_listeners,
187
188			recent_messages,
189
190			error_count,
191
192			uptime_seconds,
193		};
194
195		{
196			let mut history = self
197				.status_history
198				.lock()
199				.map_err(|e| format!("Failed to access status history: {}", e))?;
200
201			history.push(report.clone());
202
203			if history.len() > 100 {
204				history.remove(0);
205			}
206		}
207
208		Ok(report)
209	}
210
211	pub async fn report_to_sky(&self) -> Result<(), String> {
212		dev_log!("lifecycle", "Reporting IPC status to Sky");
213
214		let report = self.generate_status_report().await?;
215
216		self.update_performance_metrics().await?;
217
218		self.perform_health_check().await?;
219
220		let performance_metrics = self.get_performance_metrics()?;
221
222		let health_status = self.get_health_status()?;
223
224		let comprehensive_report = ComprehensiveStatusReport {
225			basic_status:report.clone(),
226
227			performance_metrics:performance_metrics.clone(),
228
229			health_status:health_status.clone(),
230
231			timestamp:SystemTime::now()
232				.duration_since(SystemTime::UNIX_EPOCH)
233				.unwrap_or_default()
234				.as_millis() as u64,
235		};
236
237		if let Err(e) = self
238			.runtime
239			.Environment
240			.ApplicationHandle
241			.emit("ipc-status-report", &comprehensive_report)
242		{
243			dev_log!(
244				"lifecycle",
245				"error: [StatusReporter] Failed to emit status report to Sky: {}",
246				e
247			);
248
249			return Err(format!("Failed to emit status report: {}", e));
250		}
251
252		if let Err(e) = self
253			.runtime
254			.Environment
255			.ApplicationHandle
256			.emit("ipc-performance-metrics", &performance_metrics)
257		{
258			dev_log!("lifecycle", "error: [StatusReporter] Failed to emit performance metrics: {}", e);
259		}
260
261		if let Err(e) = self
262			.runtime
263			.Environment
264			.ApplicationHandle
265			.emit("ipc-health-status", &health_status)
266		{
267			dev_log!("lifecycle", "error: [StatusReporter] Failed to emit health status: {}", e);
268		}
269
270		dev_log!("lifecycle", "Comprehensive status report sent to Sky");
271
272		Ok(())
273	}
274
275	pub async fn start_periodic_reporting(&self, interval_seconds:u64) -> Result<(), String> {
276		dev_log!(
277			"lifecycle",
278			"[StatusReporter] Starting periodic status reporting (interval: {}s)",
279			interval_seconds
280		);
281
282		let reporter = self.clone_reporter();
283
284		tokio::spawn(async move {
285			let mut interval = tokio::time::interval(Duration::from_secs(interval_seconds));
286
287			loop {
288				interval.tick().await;
289
290				if let Err(e) = reporter.report_to_sky().await {
291					dev_log!("lifecycle", "error: [StatusReporter] Periodic reporting failed: {}", e);
292				}
293			}
294		});
295
296		Ok(())
297	}
298
299	pub fn record_error(&self) {
300		if let Ok(mut error_count) = self.error_count.lock() {
301			*error_count += 1;
302		}
303	}
304
305	pub fn get_status_history(&self) -> Result<Vec<IPCStatusReport>, String> {
306		let history = self
307			.status_history
308			.lock()
309			.map_err(|e| format!("Failed to access status history: {}", e))?;
310
311		Ok(history.clone())
312	}
313
314	pub fn get_start_time(&self) -> SystemTime { self.start_time }
315
316	pub async fn update_performance_metrics(&self) -> Result<(), String> {
317		let ipc_server = self.ipc_server.as_ref().ok_or("IPC Server not set".to_string())?;
318
319		let connection_stats = ipc_server.get_connection_stats().await.unwrap_or_default();
320
321		let messages_per_second = self.calculate_message_rate().await;
322
323		let average_latency_ms = self.calculate_average_latency().await;
324
325		let peak_latency_ms = self.calculate_peak_latency().await;
326
327		let compression_ratio = self.calculate_compression_ratio().await;
328
329		let connection_pool_utilization = self.calculate_pool_utilization(&connection_stats).await;
330
331		let memory_usage_mb = self.get_memory_usage().await;
332
333		let cpu_usage_percent = self.get_cpu_usage().await;
334
335		let last_update = SystemTime::now()
336			.duration_since(SystemTime::UNIX_EPOCH)
337			.unwrap_or_default()
338			.as_millis() as u64;
339
340		let mut metrics = self
341			.performance_metrics
342			.lock()
343			.map_err(|e| format!("Failed to access performance metrics: {}", e))?;
344
345		metrics.messages_per_second = messages_per_second;
346
347		metrics.average_latency_ms = average_latency_ms;
348
349		metrics.peak_latency_ms = peak_latency_ms;
350
351		metrics.compression_ratio = compression_ratio;
352
353		metrics.connection_pool_utilization = connection_pool_utilization;
354
355		metrics.memory_usage_mb = memory_usage_mb;
356
357		metrics.cpu_usage_percent = cpu_usage_percent;
358
359		metrics.last_update = last_update;
360
361		dev_log!(
362			"lifecycle",
363			"[StatusReporter] Performance metrics updated: {:.2} msg/s, {:.2}ms latency",
364			metrics.messages_per_second,
365			metrics.average_latency_ms
366		);
367
368		Ok(())
369	}
370
371	pub async fn perform_health_check(&self) -> Result<(), String> {
372		let mut health_monitor = self
373			.health_monitor
374			.lock()
375			.map_err(|e| format!("Failed to access health monitor: {}", e))?;
376
377		let mut health_score:f64 = 100.0;
378
379		let mut issues = Vec::new();
380
381		if let Some(ipc_server) = &self.ipc_server {
382			if !ipc_server.get_connection_status()? {
383				health_score -= 25.0;
384
385				issues.push(HealthIssue {
386					issue_type:HealthIssueType::ConnectionLoss,
387					severity:SeverityLevel::Critical,
388					description:"IPC connection lost".to_string(),
389					detected_at:SystemTime::now()
390						.duration_since(SystemTime::UNIX_EPOCH)
391						.unwrap_or_default()
392						.as_millis() as u64,
393					resolved_at:None,
394				});
395			}
396		}
397
398		if let Some(ipc_server) = &self.ipc_server {
399			let queue_size = ipc_server.get_queue_size()?;
400
401			if queue_size > 100 {
402				health_score -= 15.0;
403
404				issues.push(HealthIssue {
405					issue_type:HealthIssueType::QueueOverflow,
406					severity:SeverityLevel::High,
407					description:format!("Message queue overflow: {} messages", queue_size),
408					detected_at:SystemTime::now()
409						.duration_since(SystemTime::UNIX_EPOCH)
410						.unwrap_or_default()
411						.as_millis() as u64,
412					resolved_at:None,
413				});
414			}
415		}
416
417		let metrics = self
418			.performance_metrics
419			.lock()
420			.map_err(|e| format!("Failed to access performance metrics: {}", e))?;
421
422		if metrics.average_latency_ms > 100.0 {
423			health_score -= 20.0;
424
425			issues.push(HealthIssue {
426				issue_type:HealthIssueType::HighLatency,
427				severity:SeverityLevel::High,
428				description:format!("High latency detected: {:.2}ms", metrics.average_latency_ms),
429				detected_at:SystemTime::now()
430					.duration_since(SystemTime::UNIX_EPOCH)
431					.unwrap_or_default()
432					.as_millis() as u64,
433				resolved_at:None,
434			});
435		}
436
437		health_monitor.health_score = health_score.max(0.0);
438
439		health_monitor.issues_detected = issues;
440
441		health_monitor.last_health_check = SystemTime::now()
442			.duration_since(SystemTime::UNIX_EPOCH)
443			.unwrap_or_default()
444			.as_millis() as u64;
445
446		if health_score < 70.0 {
447			dev_log!(
448				"lifecycle",
449				"warn: [StatusReporter] Health check failed: score {:.1}%",
450				health_score
451			);
452
453			if let Err(e) = self
454				.runtime
455				.Environment
456				.ApplicationHandle
457				.emit("ipc-health-alert", &health_monitor.clone())
458			{
459				dev_log!("lifecycle", "error: [StatusReporter] Failed to emit health alert: {}", e);
460			}
461		}
462
463		Ok(())
464	}
465
466	async fn calculate_message_rate(&self) -> f64 {
467		let history = self.get_status_history().unwrap_or_default();
468
469		if history.len() < 2 {
470			return 0.0;
471		}
472
473		let recent_reports:Vec<&IPCStatusReport> = history.iter().rev().take(5).collect();
474
475		let total_messages:u32 = recent_reports
476			.iter()
477			.map(|report| report.recent_messages.iter().map(|m| m.message_count).sum::<u32>())
478			.sum();
479
480		let time_span = if recent_reports.len() > 1 {
481			let first_time = recent_reports.first().unwrap().timestamp;
482
483			let last_time = recent_reports.last().unwrap().timestamp;
484
485			(last_time - first_time) as f64 / 1000.0
486		} else {
487			1.0
488		};
489
490		total_messages as f64 / time_span.max(1.0)
491	}
492
493	async fn calculate_average_latency(&self) -> f64 {
494		let history = self.get_status_history().unwrap_or_default();
495
496		if history.is_empty() {
497			return 0.0;
498		}
499
500		let recent_reports:Vec<&IPCStatusReport> = history.iter().rev().take(10).collect();
501
502		let total_latency:f64 = recent_reports
503			.iter()
504			.flat_map(|report| &report.recent_messages)
505			.map(|msg| msg.average_processing_time_ms)
506			.sum();
507
508		let message_count = recent_reports.iter().flat_map(|report| &report.recent_messages).count();
509
510		total_latency / message_count.max(1) as f64
511	}
512
513	async fn calculate_peak_latency(&self) -> f64 {
514		let history = self.get_status_history().unwrap_or_default();
515
516		history
517			.iter()
518			.flat_map(|report| &report.recent_messages)
519			.map(|msg| msg.average_processing_time_ms)
520			.fold(0.0, f64::max)
521	}
522
523	async fn calculate_compression_ratio(&self) -> f64 { 2.5 }
524
525	async fn calculate_pool_utilization(&self, stats:&crate::IPC::TauriIPCServer_Old::ConnectionStats) -> f64 {
526		if stats.total_connections == 0 {
527			return 0.0;
528		}
529
530		stats.total_connections as f64 / stats.max_connections as f64
531	}
532
533	async fn get_memory_usage(&self) -> f64 { 50.0 }
534
535	async fn get_cpu_usage(&self) -> f64 { 15.0 }
536
537	pub async fn discover_services(&self) -> Result<Vec<ServiceInfo>, String> {
538		dev_log!("lifecycle", "Starting service discovery");
539
540		let mut registry = self.service_registry.write().await;
541
542		let mut discovered = self.discovered_services.write().await;
543
544		let mut services = Vec::new();
545
546		let core_services = vec![
547			("EditorService", "1.0.0", ServiceStatus::Running),
548			("ExtensionHostService", "1.0.0", ServiceStatus::Running),
549			("ConfigurationService", "1.0.0", ServiceStatus::Running),
550			("FileService", "1.0.0", ServiceStatus::Running),
551			("StorageService", "1.0.0", ServiceStatus::Running),
552		];
553
554		for (name, version, status) in core_services {
555			let service_info = ServiceInfo {
556				name:name.to_string(),
557
558				version:version.to_string(),
559
560				status:status.clone(),
561
562				last_heartbeat:SystemTime::now()
563					.duration_since(SystemTime::UNIX_EPOCH)
564					.unwrap_or_default()
565					.as_millis() as u64,
566
567				uptime:SystemTime::now().duration_since(self.start_time).unwrap_or_default().as_secs(),
568
569				dependencies:self.get_service_dependencies(name),
570
571				metrics:ServiceMetrics {
572					response_time:self.calculate_service_response_time(name).await,
573
574					error_rate:self.calculate_service_error_rate(name).await,
575
576					throughput:self.calculate_service_throughput(name).await,
577
578					memory_usage:self.get_service_memory_usage(name).await,
579
580					cpu_usage:self.get_service_cpu_usage(name).await,
581
582					last_updated:SystemTime::now()
583						.duration_since(SystemTime::UNIX_EPOCH)
584						.unwrap_or_default()
585						.as_millis() as u64,
586				},
587
588				endpoint:Some(format!("localhost:{}", 50050 + services.len() as u16)),
589
590				port:Some(50050 + services.len() as u16),
591			};
592
593			registry.services.insert(name.to_string(), service_info.clone());
594
595			discovered.insert(name.to_string());
596
597			services.push(service_info);
598		}
599
600		registry.last_discovery = SystemTime::now()
601			.duration_since(SystemTime::UNIX_EPOCH)
602			.unwrap_or_default()
603			.as_millis() as u64;
604
605		dev_log!(
606			"lifecycle",
607			"[StatusReporter] Service discovery completed: {} services found",
608			services.len()
609		);
610
611		if let Err(e) = self
612			.runtime
613			.Environment
614			.ApplicationHandle
615			.emit("mountain_service_discovery", &services)
616		{
617			dev_log!(
618				"lifecycle",
619				"error: [StatusReporter] Failed to emit service discovery event: {}",
620				e
621			);
622		}
623
624		Ok(services)
625	}
626
627	fn get_service_dependencies(&self, service_name:&str) -> Vec<String> {
628		match service_name {
629			"ExtensionHostService" => vec!["ConfigurationService".to_string()],
630
631			"FileService" => vec!["StorageService".to_string()],
632
633			"StorageService" => vec!["ConfigurationService".to_string()],
634
635			_ => Vec::new(),
636		}
637	}
638
639	async fn calculate_service_response_time(&self, service_name:&str) -> f64 {
640		match service_name {
641			"EditorService" => 5.0,
642
643			"ExtensionHostService" => 15.0,
644
645			"ConfigurationService" => 2.0,
646
647			"FileService" => 8.0,
648
649			"StorageService" => 3.0,
650
651			_ => 10.0,
652		}
653	}
654
655	async fn calculate_service_error_rate(&self, service_name:&str) -> f64 {
656		match service_name {
657			"EditorService" => 0.1,
658
659			"ExtensionHostService" => 2.5,
660
661			"ConfigurationService" => 0.5,
662
663			"FileService" => 1.2,
664
665			"StorageService" => 0.8,
666
667			_ => 5.0,
668		}
669	}
670
671	async fn calculate_service_throughput(&self, service_name:&str) -> f64 {
672		match service_name {
673			"EditorService" => 1000.0,
674
675			"ExtensionHostService" => 500.0,
676
677			"ConfigurationService" => 2000.0,
678
679			"FileService" => 800.0,
680
681			"StorageService" => 1500.0,
682
683			_ => 100.0,
684		}
685	}
686
687	async fn get_service_memory_usage(&self, service_name:&str) -> f64 {
688		match service_name {
689			"EditorService" => 256.0,
690
691			"ExtensionHostService" => 512.0,
692
693			"ConfigurationService" => 128.0,
694
695			"FileService" => 192.0,
696
697			"StorageService" => 64.0,
698
699			_ => 100.0,
700		}
701	}
702
703	async fn get_service_cpu_usage(&self, service_name:&str) -> f64 {
704		match service_name {
705			"EditorService" => 15.0,
706
707			"ExtensionHostService" => 25.0,
708
709			"ConfigurationService" => 5.0,
710
711			"FileService" => 10.0,
712
713			"StorageService" => 8.0,
714
715			_ => 20.0,
716		}
717	}
718
719	pub async fn start_periodic_discovery(&self) -> Result<(), String> {
720		dev_log!("lifecycle", "Starting periodic service discovery");
721
722		let registry = self.service_registry.read().await;
723
724		let interval = registry.discovery_interval;
725
726		drop(registry);
727
728		let reporter = self.clone_reporter();
729
730		tokio::spawn(async move {
731			let mut interval = tokio::time::interval(Duration::from_millis(interval));
732
733			loop {
734				interval.tick().await;
735
736				if let Err(e) = reporter.discover_services().await {
737					dev_log!("lifecycle", "error: [StatusReporter] Periodic service discovery failed: {}", e);
738				}
739			}
740		});
741
742		Ok(())
743	}
744
745	pub async fn get_service_registry(&self) -> Result<ServiceRegistry, String> {
746		let registry = self.service_registry.read().await;
747
748		Ok(registry.clone())
749	}
750
751	pub async fn get_service_info(&self, service_name:&str) -> Result<Option<ServiceInfo>, String> {
752		let registry = self.service_registry.read().await;
753
754		Ok(registry.services.get(service_name).cloned())
755	}
756
757	pub async fn attempt_recovery(&self) -> Result<(), String> {
758		let mut health_monitor = self
759			.health_monitor
760			.lock()
761			.map_err(|e| format!("Failed to access health monitor: {}", e))?;
762
763		health_monitor.recovery_attempts += 1;
764
765		if let Some(ipc_server) = &self.ipc_server {
766			if let Err(e) = ipc_server.dispose() {
767				return Err(format!("Failed to dispose IPC server: {}", e));
768			}
769
770			if let Err(e) = ipc_server.initialize().await {
771				return Err(format!("Failed to reinitialize IPC server: {}", e));
772			}
773		}
774
775		if let Ok(mut error_count) = self.error_count.lock() {
776			*error_count = 0;
777		}
778
779		dev_log!(
780			"lifecycle",
781			"[StatusReporter] Recovery attempt {} completed",
782			health_monitor.recovery_attempts
783		);
784
785		Ok(())
786	}
787
788	pub fn get_performance_metrics(&self) -> Result<PerformanceMetrics, String> {
789		let metrics = self
790			.performance_metrics
791			.lock()
792			.map_err(|e| format!("Failed to access performance metrics: {}", e))?;
793
794		Ok(metrics.clone())
795	}
796
797	pub fn get_health_status(&self) -> Result<HealthMonitor, String> {
798		let health_monitor = self
799			.health_monitor
800			.lock()
801			.map_err(|e| format!("Failed to access health monitor: {}", e))?;
802
803		Ok(health_monitor.clone())
804	}
805
806	pub(super) fn clone_reporter(&self) -> Struct {
807		Struct {
808			runtime:self.runtime.clone(),
809
810			ipc_server:self.ipc_server.clone(),
811
812			status_history:self.status_history.clone(),
813
814			start_time:self.start_time,
815
816			error_count:self.error_count.clone(),
817
818			performance_metrics:self.performance_metrics.clone(),
819
820			health_monitor:self.health_monitor.clone(),
821
822			service_registry:self.service_registry.clone(),
823
824			discovered_services:self.discovered_services.clone(),
825		}
826	}
827}