1use astar_primitives::*;
22use cumulus_client_cli::CollatorOptions;
23use cumulus_client_consensus_aura::collators::slot_based::{
24 self as aura, Params as AuraParams, SlotBasedBlockImport, SlotBasedBlockImportHandle,
25};
26use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport;
27use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier;
28use cumulus_client_service::{
29 prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, DARecoveryProfile,
30 StartRelayChainTasksParams,
31};
32use cumulus_primitives_core::{
33 relay_chain::{CollatorPair, ValidationCode},
34 ParaId,
35};
36use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain;
37use cumulus_relay_chain_interface::{RelayChainInterface, RelayChainResult};
38use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node_with_rpc;
39use fc_consensus::FrontierBlockImport as TFrontierBlockImport;
40use fc_rpc_core::types::{FeeHistoryCache, FilterPool};
41use fc_storage::StorageOverrideHandler;
42use futures::StreamExt;
43use sc_client_api::BlockchainEvents;
44use sc_consensus::{import_queue::BasicQueue, ImportQueue};
45use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
46use sc_network::{config::NetworkBackendType, NetworkBackend, NetworkBlock};
47use sc_network_sync::SyncingService;
48use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
49use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
50use sp_api::{ApiExt, ProvideRuntimeApi};
51use sp_consensus_aura::{
52 sr25519::AuthorityId as AuraId, sr25519::AuthorityPair as AuraPair, AuraApi,
53};
54use sp_keystore::KeystorePtr;
55use sp_runtime::{traits::Block as BlockT, Percent};
56use std::{collections::BTreeMap, sync::Arc, time::Duration};
57use substrate_prometheus_endpoint::Registry;
58
59use super::shell_upgrade::*;
60
61use crate::{
62 evm_tracing_types::{EthApi as EthApiCmd, FrontierConfig},
63 rpc::tracing,
64};
65
66#[cfg(feature = "runtime-benchmarks")]
68pub type HostFunctions = (
69 frame_benchmarking::benchmarking::HostFunctions,
70 cumulus_client_service::ParachainHostFunctions,
71 moonbeam_primitives_ext::moonbeam_ext::HostFunctions,
72);
73
74#[cfg(not(feature = "runtime-benchmarks"))]
76pub type HostFunctions = (
77 cumulus_client_service::ParachainHostFunctions,
78 moonbeam_primitives_ext::moonbeam_ext::HostFunctions,
79);
80
81pub type ParachainExecutor = WasmExecutor<HostFunctions>;
83
84type FullClient =
85 TFullClient<Block, crate::parachain::fake_runtime_api::RuntimeApi, ParachainExecutor>;
86
87type FrontierBlockImportType = TFrontierBlockImport<Block, Arc<FullClient>, FullClient>;
89
90type SlotBasedImport = SlotBasedBlockImport<Block, FrontierBlockImportType, FullClient>;
92
93type ParachainBlockImport = TParachainBlockImport<Block, SlotBasedImport, TFullBackend<Block>>;
95
96pub fn new_partial(
101 config: &Configuration,
102 evm_tracing_config: &FrontierConfig,
103) -> Result<
104 PartialComponents<
105 FullClient,
106 TFullBackend<Block>,
107 (),
108 sc_consensus::DefaultImportQueue<Block>,
109 sc_transaction_pool::TransactionPoolHandle<Block, FullClient>,
110 (
111 ParachainBlockImport,
112 SlotBasedBlockImportHandle<Block>,
113 Option<Telemetry>,
114 Option<TelemetryWorkerHandle>,
115 Arc<fc_db::Backend<Block, FullClient>>,
116 ),
117 >,
118 sc_service::Error,
119> {
120 let telemetry = config
121 .telemetry_endpoints
122 .clone()
123 .filter(|x| !x.is_empty())
124 .map(|endpoints| -> Result<_, sc_telemetry::Error> {
125 let worker = TelemetryWorker::new(16)?;
126 let telemetry = worker.handle().new_telemetry(endpoints);
127 Ok((worker, telemetry))
128 })
129 .transpose()?;
130
131 let heap_pages = config
132 .executor
133 .default_heap_pages
134 .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
135 extra_pages: h as _,
136 });
137
138 let executor = ParachainExecutor::builder()
139 .with_execution_method(config.executor.wasm_method)
140 .with_onchain_heap_alloc_strategy(heap_pages)
141 .with_offchain_heap_alloc_strategy(heap_pages)
142 .with_max_runtime_instances(config.executor.max_runtime_instances)
143 .with_runtime_cache_size(config.executor.runtime_cache_size)
144 .build();
145
146 let (client, backend, keystore_container, task_manager) =
147 sc_service::new_full_parts_record_import::<Block, _, _>(
148 config,
149 telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
150 executor,
151 true,
152 )?;
153 let client = Arc::new(client);
154
155 let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
156
157 let telemetry = telemetry.map(|(worker, telemetry)| {
158 task_manager
159 .spawn_handle()
160 .spawn("telemetry", None, worker.run());
161 telemetry
162 });
163
164 let transaction_pool = sc_transaction_pool::Builder::new(
165 task_manager.spawn_essential_handle(),
166 client.clone(),
167 config.role.is_authority().into(),
168 )
169 .with_options(config.transaction_pool.clone())
170 .with_prometheus(config.prometheus_registry())
171 .build();
172
173 let frontier_backend = Arc::new(crate::rpc::open_frontier_backend(
174 client.clone(),
175 config,
176 evm_tracing_config,
177 )?);
178
179 let frontier_block_import = TFrontierBlockImport::new(client.clone(), client.clone());
180 let (slot_based_block_import, slot_based_import_handle) =
181 SlotBasedBlockImport::new(frontier_block_import, client.clone());
182 let parachain_block_import: ParachainBlockImport =
183 ParachainBlockImport::new(slot_based_block_import, backend.clone());
184
185 let import_queue = build_import_queue(
186 client.clone(),
187 parachain_block_import.clone(),
188 config,
189 telemetry.as_ref().map(|telemetry| telemetry.handle()),
190 &task_manager,
191 );
192
193 let params = PartialComponents {
194 backend,
195 client,
196 import_queue,
197 keystore_container,
198 task_manager,
199 transaction_pool: transaction_pool.into(),
200 select_chain: (),
201 other: (
202 parachain_block_import,
203 slot_based_import_handle,
204 telemetry,
205 telemetry_worker_handle,
206 frontier_backend,
207 ),
208 };
209
210 Ok(params)
211}
212
213async fn build_relay_chain_interface(
214 polkadot_config: Configuration,
215 parachain_config: &Configuration,
216 telemetry_worker_handle: Option<TelemetryWorkerHandle>,
217 task_manager: &mut TaskManager,
218 collator_options: CollatorOptions,
219 hwbench: Option<sc_sysinfo::HwBench>,
220) -> RelayChainResult<(
221 Arc<(dyn RelayChainInterface + 'static)>,
222 Option<CollatorPair>,
223)> {
224 let result = if let cumulus_client_cli::RelayChainMode::ExternalRpc(rpc_target_urls) =
225 collator_options.relay_chain_mode
226 {
227 build_minimal_relay_chain_node_with_rpc(
228 polkadot_config,
229 parachain_config.prometheus_registry(),
230 task_manager,
231 rpc_target_urls,
232 )
233 .await
234 } else {
235 build_inprocess_relay_chain(
236 polkadot_config,
237 parachain_config,
238 telemetry_worker_handle,
239 task_manager,
240 hwbench,
241 )
242 };
243
244 result
246 .map(|(relay_chain_interface, collator_pair, _, _)| (relay_chain_interface, collator_pair))
247}
248
249#[derive(Clone)]
250pub struct AdditionalConfig {
252 pub evm_tracing_config: FrontierConfig,
254
255 pub enable_evm_rpc: bool,
257
258 pub proposer_block_size_limit: usize,
260
261 pub proposer_soft_deadline_percent: u8,
263
264 pub hwbench: Option<sc_sysinfo::HwBench>,
266}
267
268#[sc_tracing::logging::prefix_logs_with("Parachain")]
272async fn start_node_impl<N>(
273 parachain_config: Configuration,
274 polkadot_config: Configuration,
275 collator_options: CollatorOptions,
276 para_id: ParaId,
277 additional_config: AdditionalConfig,
278) -> sc_service::error::Result<(TaskManager, Arc<FullClient>)>
279where
280 N: NetworkBackend<Block, <Block as BlockT>::Hash>,
281{
282 let parachain_config = prepare_node_config(parachain_config);
283
284 let PartialComponents {
285 client,
286 backend,
287 mut task_manager,
288 keystore_container,
289 select_chain: _,
290 import_queue,
291 transaction_pool,
292 other:
293 (
294 parachain_block_import,
295 block_import_handle,
296 mut telemetry,
297 telemetry_worker_handle,
298 frontier_backend,
299 ),
300 } = new_partial(¶chain_config, &additional_config.evm_tracing_config)?;
301
302 let prometheus_registry = parachain_config.prometheus_registry().cloned();
303 let net_config = sc_network::config::FullNetworkConfiguration::<_, _, N>::new(
304 ¶chain_config.network,
305 prometheus_registry.clone(),
306 );
307
308 let metrics = N::register_notification_metrics(
309 parachain_config
310 .prometheus_config
311 .as_ref()
312 .map(|cfg| &cfg.registry),
313 );
314
315 let (relay_chain_interface, collator_key) = build_relay_chain_interface(
316 polkadot_config,
317 ¶chain_config,
318 telemetry_worker_handle,
319 &mut task_manager,
320 collator_options.clone(),
321 additional_config.hwbench.clone(),
322 )
323 .await
324 .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
325
326 let is_authority = parachain_config.role.is_authority();
327 let import_queue_service = import_queue.service();
328 let (network, system_rpc_tx, tx_handler_controller, sync_service) =
329 cumulus_client_service::build_network(BuildNetworkParams {
330 parachain_config: ¶chain_config,
331 net_config,
332 para_id,
333 client: client.clone(),
334 transaction_pool: transaction_pool.clone(),
335 spawn_handle: task_manager.spawn_handle(),
336 import_queue,
337 relay_chain_interface: relay_chain_interface.clone(),
338 sybil_resistance_level: cumulus_client_service::CollatorSybilResistance::Resistant,
339 metrics,
340 })
341 .await?;
342
343 let filter_pool: FilterPool = Arc::new(std::sync::Mutex::new(BTreeMap::new()));
344 let fee_history_cache: FeeHistoryCache = Arc::new(std::sync::Mutex::new(BTreeMap::new()));
345 let storage_override = Arc::new(StorageOverrideHandler::new(client.clone()));
346
347 let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
352 fc_mapping_sync::EthereumBlockNotification<Block>,
353 > = Default::default();
354 let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
355
356 let ethapi_cmd = additional_config.evm_tracing_config.ethapi.clone();
357 let tracing_requesters =
358 if ethapi_cmd.contains(&EthApiCmd::Debug) || ethapi_cmd.contains(&EthApiCmd::Trace) {
359 tracing::spawn_tracing_tasks(
360 &additional_config.evm_tracing_config,
361 prometheus_registry.clone(),
362 tracing::SpawnTasksParams {
363 task_manager: &task_manager,
364 client: client.clone(),
365 substrate_backend: backend.clone(),
366 frontier_backend: frontier_backend.clone(),
367 storage_override: storage_override.clone(),
368 },
369 )
370 } else {
371 tracing::RpcRequesters {
372 debug: None,
373 trace: None,
374 }
375 };
376
377 match frontier_backend.as_ref() {
380 fc_db::Backend::KeyValue(ref b) => {
381 task_manager.spawn_essential_handle().spawn(
382 "frontier-mapping-sync-worker",
383 Some("frontier"),
384 fc_mapping_sync::kv::MappingSyncWorker::new(
385 client.import_notification_stream(),
386 Duration::new(6, 0),
387 client.clone(),
388 backend.clone(),
389 storage_override.clone(),
390 b.clone(),
391 3,
392 0,
393 fc_mapping_sync::SyncStrategy::Parachain,
394 sync_service.clone(),
395 pubsub_notification_sinks.clone(),
396 )
397 .for_each(|()| futures::future::ready(())),
398 );
399 }
400 fc_db::Backend::Sql(ref b) => {
401 task_manager.spawn_essential_handle().spawn_blocking(
402 "frontier-mapping-sync-worker",
403 Some("frontier"),
404 fc_mapping_sync::sql::SyncWorker::run(
405 client.clone(),
406 backend.clone(),
407 b.clone(),
408 client.import_notification_stream(),
409 fc_mapping_sync::sql::SyncWorkerConfig {
410 read_notification_timeout: Duration::from_secs(10),
411 check_indexed_blocks_interval: Duration::from_secs(60),
412 },
413 fc_mapping_sync::SyncStrategy::Parachain,
414 sync_service.clone(),
415 pubsub_notification_sinks.clone(),
416 ),
417 );
418 }
419 }
420
421 const FILTER_RETAIN_THRESHOLD: u64 = 100;
424 task_manager.spawn_essential_handle().spawn(
425 "frontier-filter-pool",
426 Some("frontier"),
427 fc_rpc::EthTask::filter_pool_task(
428 client.clone(),
429 filter_pool.clone(),
430 FILTER_RETAIN_THRESHOLD,
431 ),
432 );
433
434 const FEE_HISTORY_LIMIT: u64 = 2048;
435 task_manager.spawn_essential_handle().spawn(
436 "frontier-fee-history",
437 Some("frontier"),
438 fc_rpc::EthTask::fee_history_task(
439 client.clone(),
440 storage_override.clone(),
441 fee_history_cache.clone(),
442 FEE_HISTORY_LIMIT,
443 ),
444 );
445
446 let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
447 task_manager.spawn_handle(),
448 storage_override.clone(),
449 50,
450 50,
451 prometheus_registry.clone(),
452 ));
453
454 let rpc_extensions_builder = {
455 let client = client.clone();
456 let network = network.clone();
457 let transaction_pool = transaction_pool.clone();
458 let rpc_config = crate::rpc::EvmTracingConfig {
459 tracing_requesters,
460 trace_filter_max_count: additional_config.evm_tracing_config.ethapi_trace_max_count,
461 enable_txpool: ethapi_cmd.contains(&EthApiCmd::TxPool),
462 };
463 let sync = sync_service.clone();
464 let pubsub_notification_sinks = pubsub_notification_sinks.clone();
465
466 Box::new(move |subscription| {
467 let deps = crate::rpc::FullDeps {
468 client: client.clone(),
469 pool: transaction_pool.clone(),
470 graph: transaction_pool.clone(),
471 network: network.clone(),
472 sync: sync.clone(),
473 is_authority,
474 frontier_backend: match *frontier_backend {
475 fc_db::Backend::KeyValue(ref b) => b.clone(),
476 fc_db::Backend::Sql(ref b) => b.clone(),
477 },
478 filter_pool: filter_pool.clone(),
479 fee_history_limit: FEE_HISTORY_LIMIT,
480 fee_history_cache: fee_history_cache.clone(),
481 block_data_cache: block_data_cache.clone(),
482 storage_override: storage_override.clone(),
483 enable_evm_rpc: additional_config.enable_evm_rpc,
484 command_sink: None,
485 };
486
487 crate::rpc::create_full(
488 deps,
489 subscription,
490 pubsub_notification_sinks.clone(),
491 rpc_config.clone(),
492 )
493 .map_err(Into::into)
494 })
495 };
496
497 sc_service::spawn_tasks(sc_service::SpawnTasksParams {
499 rpc_builder: rpc_extensions_builder,
500 client: client.clone(),
501 transaction_pool: transaction_pool.clone(),
502 task_manager: &mut task_manager,
503 config: parachain_config,
504 keystore: keystore_container.keystore(),
505 backend: backend.clone(),
506 network: network.clone(),
507 system_rpc_tx,
508 sync_service: sync_service.clone(),
509 tx_handler_controller,
510 telemetry: telemetry.as_mut(),
511 })?;
512
513 if let Some(hwbench) = additional_config.hwbench.clone() {
514 sc_sysinfo::print_hwbench(&hwbench);
515 if is_authority {
516 warn_if_slow_hardware(&hwbench);
517 }
518
519 if let Some(ref mut telemetry) = telemetry {
520 let telemetry_handle = telemetry.handle();
521 task_manager.spawn_handle().spawn(
522 "telemetry_hwbench",
523 None,
524 sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
525 );
526 }
527 }
528
529 let announce_block = {
530 let sync_service = sync_service.clone();
531 Arc::new(move |hash, data| sync_service.announce_block(hash, data))
532 };
533
534 let overseer_handle = relay_chain_interface
535 .overseer_handle()
536 .map_err(|e| sc_service::Error::Application(Box::new(e)))?;
537
538 start_relay_chain_tasks(StartRelayChainTasksParams {
539 client: client.clone(),
540 announce_block: announce_block.clone(),
541 task_manager: &mut task_manager,
542 para_id,
543 relay_chain_interface: relay_chain_interface.clone(),
544 relay_chain_slot_duration: Duration::from_secs(6),
545 import_queue: import_queue_service,
546 recovery_handle: Box::new(overseer_handle.clone()),
547 sync_service: sync_service.clone(),
548 da_recovery_profile: if is_authority {
549 DARecoveryProfile::Collator
550 } else {
551 DARecoveryProfile::FullNode
552 },
553 prometheus_registry: prometheus_registry.as_ref(),
554 })?;
555
556 if is_authority {
557 start_aura_consensus(
558 client.clone(),
559 backend,
560 parachain_block_import,
561 block_import_handle,
562 prometheus_registry.as_ref(),
563 telemetry.map(|t| t.handle()),
564 &mut task_manager,
565 relay_chain_interface,
566 transaction_pool,
567 sync_service,
568 keystore_container.keystore(),
569 para_id,
570 collator_key.expect("Command line arguments do not allow this. qed"),
571 additional_config,
572 )?;
573 }
574
575 Ok((task_manager, client))
576}
577
578pub fn build_import_queue(
581 client: Arc<FullClient>,
582 block_import: ParachainBlockImport,
583 config: &Configuration,
584 telemetry_handle: Option<TelemetryHandle>,
585 task_manager: &TaskManager,
586) -> sc_consensus::DefaultImportQueue<Block> {
587 let verifier_client = client.clone();
588 let create_aura_inherent_data_providers = move |parent_hash, _| {
590 let cidp_client = verifier_client.clone();
591 async move {
592 let slot_duration =
593 cumulus_client_consensus_aura::slot_duration_at(&*cidp_client, parent_hash)?;
594 let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
595
596 let slot =
597 sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
598 *timestamp,
599 slot_duration,
600 );
601
602 Ok((slot, timestamp))
603 }
604 };
605
606 let create_relay_inherent_data_providers = move |_parent_hash: Hash, _| async move {
609 let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
610 Ok(timestamp)
611 };
612
613 let aura_verifier = Box::new(cumulus_client_consensus_aura::build_verifier::<
614 AuraPair,
615 _,
616 _,
617 _,
618 >(cumulus_client_consensus_aura::BuildVerifierParams {
619 client: client.clone(),
620 create_inherent_data_providers: create_aura_inherent_data_providers,
621 telemetry: telemetry_handle,
622 }));
623
624 let relay_chain_verifier = Box::new(RelayChainVerifier::new(
625 client.clone(),
626 create_relay_inherent_data_providers,
627 )) as Box<_>;
628
629 let verifier = Verifier {
630 client,
631 relay_chain_verifier,
632 aura_verifier,
633 };
634
635 let registry = config.prometheus_registry();
636 let spawner = task_manager.spawn_essential_handle();
637
638 BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)
639}
640
641fn start_aura_consensus(
643 client: Arc<FullClient>,
644 backend: Arc<TFullBackend<Block>>,
645 block_import: ParachainBlockImport,
646 block_import_handle: SlotBasedBlockImportHandle<Block>,
647 prometheus_registry: Option<&Registry>,
648 telemetry: Option<TelemetryHandle>,
649 task_manager: &TaskManager,
650 relay_chain_interface: Arc<dyn RelayChainInterface>,
651 transaction_pool: Arc<sc_transaction_pool::TransactionPoolHandle<Block, FullClient>>,
652 sync_oracle: Arc<SyncingService<Block>>,
653 keystore: KeystorePtr,
654 para_id: ParaId,
655 collator_key: CollatorPair,
656 additional_config: AdditionalConfig,
657) -> Result<(), sc_service::Error> {
658 let mut proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
659 task_manager.spawn_handle(),
660 client.clone(),
661 transaction_pool,
662 prometheus_registry,
663 telemetry,
664 );
665
666 proposer_factory.set_default_block_size_limit(additional_config.proposer_block_size_limit);
667 proposer_factory.set_soft_deadline(Percent::from_percent(
668 additional_config.proposer_soft_deadline_percent,
669 ));
670
671 let announce_block = {
672 let sync_service = sync_oracle.clone();
673 Arc::new(move |hash, data| sync_service.announce_block(hash, data))
674 };
675
676 let collator_service = cumulus_client_collator::service::CollatorService::new(
677 client.clone(),
678 Arc::new(task_manager.spawn_handle()),
679 announce_block,
680 client.clone(),
681 );
682
683 let params = AuraParams {
684 create_inherent_data_providers: move |_, ()| async move { Ok(()) },
685 block_import,
686 para_client: client.clone(),
687 para_backend: backend,
688 relay_client: relay_chain_interface.clone(),
689 code_hash_provider: {
690 let client = client.clone();
691 move |block_hash| {
692 client
693 .code_at(block_hash)
694 .ok()
695 .map(|c| ValidationCode::from(c).hash())
696 }
697 },
698 keystore,
699 collator_key,
700 para_id,
701 slot_offset: Duration::from_secs(1),
702 relay_chain_slot_duration: Duration::from_secs(6),
703 proposer: cumulus_client_consensus_proposer::Proposer::new(proposer_factory),
704 collator_service,
705 authoring_duration: Duration::from_millis(2000),
706 reinitialize: false,
707 block_import_handle,
708 spawner: task_manager.spawn_handle(),
709 max_pov_percentage: None, export_pov: None,
713 };
714
715 let fut = async move {
716 wait_for_aura(client).await;
717 aura::run::<Block, AuraPair, _, _, _, _, _, _, _, _, _>(params);
718 };
719
720 task_manager.spawn_handle().spawn("aura", None, fut);
721 Ok(())
722}
723
724async fn wait_for_aura(client: Arc<FullClient>) {
728 let finalized_hash = client.chain_info().finalized_hash;
729 if client
730 .runtime_api()
731 .has_api::<dyn AuraApi<Block, AuraId>>(finalized_hash)
732 .unwrap_or_default()
733 {
734 return;
735 };
736
737 let mut stream = client.finality_notification_stream();
738 while let Some(notification) = stream.next().await {
739 if client
740 .runtime_api()
741 .has_api::<dyn AuraApi<Block, AuraId>>(notification.hash)
742 .unwrap_or_default()
743 {
744 return;
745 }
746 }
747}
748
749fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) {
751 if let Err(err) =
754 frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE.check_hardware(hwbench, false)
755 {
756 log::warn!(
757 "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority' find out more at:\n\
758 https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware",
759 err
760 );
761 }
762}
763
764pub async fn start_node(
766 parachain_config: Configuration,
767 polkadot_config: Configuration,
768 collator_options: CollatorOptions,
769 para_id: ParaId,
770 additional_config: AdditionalConfig,
771) -> sc_service::error::Result<(TaskManager, Arc<FullClient>)> {
772 match parachain_config.network.network_backend {
773 NetworkBackendType::Libp2p => {
774 start_node_impl::<sc_network::NetworkWorker<_, _>>(
775 parachain_config,
776 polkadot_config,
777 collator_options,
778 para_id,
779 additional_config,
780 )
781 .await
782 }
783 NetworkBackendType::Litep2p => {
784 start_node_impl::<sc_network::Litep2pNetworkBackend>(
785 parachain_config,
786 polkadot_config,
787 collator_options,
788 para_id,
789 additional_config,
790 )
791 .await
792 }
793 }
794}