• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

getdozer / dozer / 5888798292

17 Aug 2023 08:51AM UTC coverage: 76.025% (-1.4%) from 77.415%
5888798292

push

github

web-flow
feat: implement graph on live ui (#1847)

* feat: implement progress

* feat: implement enable progress flag

* feat: implement progress in live

* chore: fix clippy

* chore: always use telemetry metrics

* fix: Only run build once

---------

Co-authored-by: sagar <sagar@getdozer.io>
Co-authored-by: chubei <914745487@qq.com>

536 of 536 new or added lines in 21 files covered. (100.0%)

46101 of 60639 relevant lines covered (76.03%)

40410.07 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

64.63
/dozer-cli/src/simple/orchestrator.rs
1
use super::executor::{run_dag_executor, Executor};
2
use crate::errors::OrchestrationError;
3
use crate::pipeline::PipelineBuilder;
4
use crate::shutdown::ShutdownReceiver;
5
use crate::simple::build;
6
use crate::simple::helper::validate_config;
7
use crate::utils::{
8
    get_api_security_config, get_app_grpc_config, get_cache_manager_options, get_executor_options,
9
    get_grpc_config, get_log_options, get_rest_config,
10
};
11

12
use crate::{flatten_join_handle, join_handle_map_err};
13
use dozer_api::auth::{Access, Authorizer};
14
use dozer_api::grpc::internal::internal_pipeline_server::start_internal_pipeline_server;
15
use dozer_api::{grpc, rest, CacheEndpoint};
16
use dozer_cache::cache::LmdbRwCacheManager;
17
use dozer_cache::dozer_log::home_dir::HomeDir;
18
use dozer_core::app::AppPipeline;
19
use dozer_core::dag_schemas::DagSchemas;
20

21
use crate::console_helper::get_colored_text;
22
use crate::console_helper::GREEN;
23
use crate::console_helper::PURPLE;
24
use crate::console_helper::RED;
25
use dozer_core::errors::ExecutionError;
26
use dozer_ingestion::connectors::{get_connector, SourceSchema, TableInfo};
27
use dozer_sql::pipeline::builder::statement_to_pipeline;
28
use dozer_sql::pipeline::errors::PipelineError;
29
use dozer_types::crossbeam::channel::{self, Sender};
30
use dozer_types::indicatif::{MultiProgress, ProgressDrawTarget};
31
use dozer_types::log::info;
32
use dozer_types::models::config::Config;
33
use dozer_types::tracing::error;
34
use futures::stream::FuturesUnordered;
35
use futures::{FutureExt, StreamExt, TryFutureExt};
36
use metrics::{describe_counter, describe_histogram};
37
use std::collections::HashMap;
38
use std::fs;
39
use std::path::PathBuf;
40

41
use std::sync::Arc;
42
use std::thread;
43
use tokio::runtime::Runtime;
44
use tokio::sync::broadcast;
45

46
#[derive(Clone)]
12✔
47
pub struct SimpleOrchestrator {
×
48
    pub config: Config,
49
    pub runtime: Arc<Runtime>,
50
    pub multi_pb: MultiProgress,
51
}
52

53
impl SimpleOrchestrator {
54
    pub fn new(config: Config, runtime: Arc<Runtime>, enable_progress: bool) -> Self {
6✔
55
        let progress_draw_target = if enable_progress && atty::is(atty::Stream::Stderr) {
6✔
56
            ProgressDrawTarget::stderr()
×
57
        } else {
×
58
            ProgressDrawTarget::hidden()
6✔
59
        };
×
60

61
        Self {
6✔
62
            config,
6✔
63
            runtime,
6✔
64
            multi_pb: MultiProgress::with_draw_target(progress_draw_target),
6✔
65
        }
6✔
66
    }
6✔
67

68
    pub fn run_api(&mut self, shutdown: ShutdownReceiver) -> Result<(), OrchestrationError> {
6✔
69
        describe_histogram!(
6✔
70
            dozer_api::API_LATENCY_HISTOGRAM_NAME,
×
71
            "The api processing latency in seconds"
×
72
        );
73
        describe_counter!(
6✔
74
            dozer_api::API_REQUEST_COUNTER_NAME,
×
75
            "Number of requests processed by the api"
×
76
        );
77
        self.runtime.block_on(async {
6✔
78
            let mut futures = FuturesUnordered::new();
6✔
79

6✔
80
            // Open `RoCacheEndpoint`s. Streaming operations if necessary.
6✔
81
            let flags = self.config.flags.clone().unwrap_or_default();
6✔
82
            let (operations_sender, operations_receiver) = if flags.dynamic {
6✔
83
                let (sender, receiver) = broadcast::channel(16);
6✔
84
                (Some(sender), Some(receiver))
6✔
85
            } else {
86
                (None, None)
×
87
            };
88

89
            let internal_grpc_config = get_app_grpc_config(&self.config);
6✔
90
            let app_server_addr = format!(
6✔
91
                "http://{}:{}",
6✔
92
                internal_grpc_config.host, internal_grpc_config.port
6✔
93
            );
6✔
94
            let cache_manager = Arc::new(
6✔
95
                LmdbRwCacheManager::new(get_cache_manager_options(&self.config))
6✔
96
                    .map_err(OrchestrationError::CacheInitFailed)?,
6✔
97
            );
98
            let mut cache_endpoints = vec![];
6✔
99
            for endpoint in &self.config.endpoints {
12✔
100
                let (cache_endpoint, handle) = CacheEndpoint::new(
6✔
101
                    app_server_addr.clone(),
6✔
102
                    &*cache_manager,
6✔
103
                    endpoint.clone(),
6✔
104
                    Box::pin(shutdown.create_shutdown_future()),
6✔
105
                    operations_sender.clone(),
6✔
106
                    Some(self.multi_pb.clone()),
6✔
107
                )
6✔
108
                .await?;
54✔
109
                let cache_name = endpoint.name.clone();
6✔
110
                futures.push(flatten_join_handle(join_handle_map_err(handle, move |e| {
6✔
111
                    if e.is_map_full() {
×
112
                        OrchestrationError::CacheFull(cache_name)
×
113
                    } else {
114
                        OrchestrationError::CacheBuildFailed(cache_name, e)
×
115
                    }
116
                })));
6✔
117
                cache_endpoints.push(Arc::new(cache_endpoint));
6✔
118
            }
119

120
            // Initialize API Server
121
            let rest_config = get_rest_config(&self.config);
6✔
122
            let rest_handle = if rest_config.enabled {
6✔
123
                let security = get_api_security_config(&self.config).cloned();
6✔
124
                let cache_endpoints_for_rest = cache_endpoints.clone();
6✔
125
                let shutdown_for_rest = shutdown.create_shutdown_future();
6✔
126
                let api_server = rest::ApiServer::new(rest_config, security);
6✔
127
                let api_server = api_server
6✔
128
                    .run(cache_endpoints_for_rest, shutdown_for_rest)
6✔
129
                    .map_err(OrchestrationError::ApiInitFailed)?;
6✔
130
                tokio::spawn(api_server.map_err(OrchestrationError::RestServeFailed))
6✔
131
            } else {
132
                tokio::spawn(async move { Ok::<(), OrchestrationError>(()) })
×
133
            };
134

135
            // Initialize gRPC Server
136
            let grpc_config = get_grpc_config(&self.config);
6✔
137
            let grpc_handle = if grpc_config.enabled {
6✔
138
                let api_security = get_api_security_config(&self.config).cloned();
6✔
139
                let grpc_server = grpc::ApiServer::new(grpc_config, api_security, flags);
6✔
140
                let shutdown = shutdown.create_shutdown_future();
6✔
141
                let grpc_server = grpc_server
6✔
142
                    .run(cache_endpoints, shutdown, operations_receiver)
6✔
143
                    .await
×
144
                    .map_err(OrchestrationError::ApiInitFailed)?;
6✔
145
                tokio::spawn(async move {
6✔
146
                    grpc_server
6✔
147
                        .await
12✔
148
                        .map_err(OrchestrationError::GrpcServeFailed)
6✔
149
                })
6✔
150
            } else {
151
                tokio::spawn(async move { Ok::<(), OrchestrationError>(()) })
×
152
            };
153

154
            futures.push(flatten_join_handle(rest_handle));
6✔
155
            futures.push(flatten_join_handle(grpc_handle));
6✔
156

157
            while let Some(result) = futures.next().await {
24✔
158
                result?;
18✔
159
            }
160

161
            Ok::<(), OrchestrationError>(())
6✔
162
        })?;
6✔
163

164
        Ok(())
6✔
165
    }
6✔
166

167
    pub fn run_apps(
6✔
168
        &mut self,
6✔
169
        shutdown: ShutdownReceiver,
6✔
170
        api_notifier: Option<Sender<bool>>,
6✔
171
    ) -> Result<(), OrchestrationError> {
6✔
172
        let home_dir = HomeDir::new(self.config.home_dir.clone(), self.config.cache_dir.clone());
6✔
173
        let executor = self.runtime.block_on(Executor::new(
6✔
174
            &home_dir,
6✔
175
            &self.config.connections,
6✔
176
            &self.config.sources,
6✔
177
            self.config.sql.as_deref(),
6✔
178
            &self.config.endpoints,
6✔
179
            get_log_options(&self.config),
6✔
180
            self.multi_pb.clone(),
6✔
181
        ))?;
6✔
182
        let dag_executor = self.runtime.block_on(
6✔
183
            executor.create_dag_executor(&self.runtime, get_executor_options(&self.config)),
6✔
184
        )?;
6✔
185

×
186
        let app_grpc_config = get_app_grpc_config(&self.config);
6✔
187
        let internal_server_future = self
6✔
188
            .runtime
6✔
189
            .block_on(start_internal_pipeline_server(
6✔
190
                executor.endpoint_and_logs().to_vec(),
6✔
191
                &app_grpc_config,
6✔
192
                shutdown.create_shutdown_future(),
6✔
193
            ))
6✔
194
            .map_err(OrchestrationError::InternalServerFailed)?;
6✔
195

×
196
        if let Some(api_notifier) = api_notifier {
6✔
197
            api_notifier
6✔
198
                .send(true)
6✔
199
                .expect("Failed to notify API server");
6✔
200
        }
6✔
201

×
202
        let running = shutdown.get_running_flag();
6✔
203
        let pipeline_future = self
6✔
204
            .runtime
6✔
205
            .spawn_blocking(|| run_dag_executor(dag_executor, running));
6✔
206

6✔
207
        let mut futures = FuturesUnordered::new();
6✔
208
        futures.push(
6✔
209
            internal_server_future
6✔
210
                .map_err(OrchestrationError::GrpcServeFailed)
6✔
211
                .boxed(),
6✔
212
        );
6✔
213
        futures.push(flatten_join_handle(pipeline_future).boxed());
6✔
214

6✔
215
        self.runtime.block_on(async move {
6✔
216
            while let Some(result) = futures.next().await {
24✔
217
                result?;
12✔
218
            }
×
219
            Ok(())
6✔
220
        })
6✔
221
    }
6✔
222

223
    #[allow(clippy::type_complexity)]
×
224
    pub fn list_connectors(
×
225
        &self,
×
226
    ) -> Result<HashMap<String, (Vec<TableInfo>, Vec<SourceSchema>)>, OrchestrationError> {
×
227
        self.runtime.block_on(async {
×
228
            let mut schema_map = HashMap::new();
×
229
            for connection in &self.config.connections {
×
230
                let connector = get_connector(connection.clone())?;
×
231
                let schema_tuples = connector.list_all_schemas().await?;
×
232
                schema_map.insert(connection.name.clone(), schema_tuples);
×
233
            }
234

×
235
            Ok(schema_map)
×
236
        })
×
237
    }
×
238

239
    pub fn generate_token(&self) -> Result<String, OrchestrationError> {
×
240
        if let Some(api_config) = &self.config.api {
×
241
            if let Some(api_security) = &api_config.api_security {
×
242
                match api_security {
×
243
                    dozer_types::models::api_security::ApiSecurity::Jwt(secret) => {
×
244
                        let auth = Authorizer::new(secret, None, None);
×
245
                        let token = auth
×
246
                            .generate_token(Access::All, None)
×
247
                            .map_err(OrchestrationError::GenerateTokenFailed)?;
×
248
                        return Ok(token);
×
249
                    }
250
                }
×
251
            }
×
252
        }
×
253
        Err(OrchestrationError::MissingSecurityConfig)
×
254
    }
×
255

×
256
    pub fn build(&mut self, force: bool) -> Result<(), OrchestrationError> {
6✔
257
        let home_dir = HomeDir::new(self.config.home_dir.clone(), self.config.cache_dir.clone());
6✔
258

6✔
259
        info!(
6✔
260
            "Initiating app: {}",
×
261
            get_colored_text(&self.config.app_name, PURPLE)
×
262
        );
×
263
        if force {
6✔
264
            self.clean()?;
×
265
        }
6✔
266
        validate_config(&self.config)?;
6✔
267

268
        // Calculate schemas.
×
269
        let endpoint_and_logs = self
6✔
270
            .config
6✔
271
            .endpoints
6✔
272
            .iter()
6✔
273
            // We're not really going to run the pipeline, so we don't create logs.
6✔
274
            .map(|endpoint| (endpoint.clone(), None))
6✔
275
            .collect();
6✔
276
        let builder = PipelineBuilder::new(
6✔
277
            &self.config.connections,
6✔
278
            &self.config.sources,
6✔
279
            self.config.sql.as_deref(),
6✔
280
            endpoint_and_logs,
6✔
281
            self.multi_pb.clone(),
6✔
282
        );
6✔
283
        let dag = self.runtime.block_on(builder.build(&self.runtime))?;
6✔
284
        // Populate schemas.
×
285
        let dag_schemas = DagSchemas::new(dag)?;
6✔
286

287
        // Get current contract.
×
288
        let enable_token = self
6✔
289
            .config
6✔
290
            .api
6✔
291
            .as_ref()
6✔
292
            .map(|api| api.api_security.is_some())
6✔
293
            .unwrap_or(false);
6✔
294
        let enable_on_event = self
6✔
295
            .config
6✔
296
            .flags
6✔
297
            .as_ref()
6✔
298
            .map(|flags| flags.push_events)
6✔
299
            .unwrap_or(false);
6✔
300
        let contract = build::Contract::new(
6✔
301
            dag_schemas,
6✔
302
            &self.config.endpoints,
6✔
303
            enable_token,
6✔
304
            enable_on_event,
6✔
305
        )?;
6✔
306

307
        // Run build
×
308
        let storage_config = get_log_options(&self.config).storage_config;
6✔
309
        self.runtime
6✔
310
            .block_on(build::build(&home_dir, &contract, &storage_config))?;
6✔
311

×
312
        Ok(())
6✔
313
    }
6✔
314

315
    // Cleaning the entire folder as there will be inconsistencies
316
    // between pipeline, cache and generated proto files.
×
317
    pub fn clean(&mut self) -> Result<(), OrchestrationError> {
×
318
        let cache_dir = PathBuf::from(self.config.cache_dir.clone());
×
319
        if cache_dir.exists() {
×
320
            fs::remove_dir_all(&cache_dir)
×
321
                .map_err(|e| ExecutionError::FileSystemError(cache_dir, e))?;
×
322
        };
×
323

×
324
        let home_dir = PathBuf::from(self.config.home_dir.clone());
×
325
        if home_dir.exists() {
×
326
            fs::remove_dir_all(&home_dir)
×
327
                .map_err(|e| ExecutionError::FileSystemError(home_dir, e))?;
×
328
        };
×
329

×
330
        Ok(())
×
331
    }
×
332

×
333
    pub fn run_all(&mut self, shutdown: ShutdownReceiver) -> Result<(), OrchestrationError> {
6✔
334
        let shutdown_api = shutdown.clone();
6✔
335

6✔
336
        let mut dozer_api = self.clone();
6✔
337

6✔
338
        let (tx, rx) = channel::unbounded::<bool>();
6✔
339

6✔
340
        self.build(false)?;
6✔
341

×
342
        let mut dozer_pipeline = self.clone();
6✔
343
        let pipeline_thread = thread::spawn(move || dozer_pipeline.run_apps(shutdown, Some(tx)));
6✔
344

6✔
345
        // Wait for pipeline to initialize caches before starting api server
6✔
346
        if rx.recv().is_err() {
6✔
347
            // This means the pipeline thread returned before sending a message. Either an error happened or it panicked.
×
348
            return match pipeline_thread.join() {
×
349
                Ok(Err(e)) => Err(e),
×
350
                Ok(Ok(())) => panic!("An error must have happened"),
×
351
                Err(e) => {
×
352
                    std::panic::panic_any(e);
×
353
                }
354
            };
×
355
        }
6✔
356

6✔
357
        dozer_api.run_api(shutdown_api)?;
6✔
358

359
        // wait for pipeline thread to shutdown gracefully
×
360
        pipeline_thread.join().unwrap()
6✔
361
    }
6✔
362
}
363

×
364
pub fn validate_sql(sql: String) -> Result<(), PipelineError> {
×
365
    statement_to_pipeline(&sql, &mut AppPipeline::new(), None).map_or_else(
×
366
        |e| {
×
367
            error!(
×
368
                "[sql][{}] Transforms validation error: {}",
×
369
                get_colored_text("X", RED),
×
370
                e
×
371
            );
×
372
            Err(e)
×
373
        },
×
374
        |_| {
×
375
            info!(
×
376
                "[sql][{}]  Transforms validation completed",
×
377
                get_colored_text("✓", GREEN)
×
378
            );
×
379
            Ok(())
×
380
        },
×
381
    )
×
382
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc