• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

getdozer / dozer / 5921125008

21 Aug 2023 02:02AM UTC coverage: 74.902% (-1.2%) from 76.06%
5921125008

push

github

web-flow
Wait for connectors to stop on shutdown (#1865)

* Wait for connectors to stop on shutdown

* Fix shutdown of object store connector

* Propagate errors in object store connector

338 of 338 new or added lines in 14 files covered. (100.0%)

46077 of 61516 relevant lines covered (74.9%)

39792.39 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

61.56
/dozer-cli/src/simple/orchestrator.rs
1
use super::executor::{run_dag_executor, Executor};
2
use crate::errors::OrchestrationError;
3
use crate::pipeline::PipelineBuilder;
4
use crate::shutdown::ShutdownReceiver;
5
use crate::simple::build;
6
use crate::simple::helper::validate_config;
7
use crate::utils::{
8
    get_api_security_config, get_app_grpc_config, get_cache_manager_options, get_executor_options,
9
    get_grpc_config, get_log_options, get_rest_config,
10
};
11

12
use crate::{flatten_join_handle, join_handle_map_err};
13
use dozer_api::auth::{Access, Authorizer};
14
use dozer_api::grpc::internal::internal_pipeline_server::start_internal_pipeline_server;
15
use dozer_api::{grpc, rest, CacheEndpoint};
16
use dozer_cache::cache::LmdbRwCacheManager;
17
use dozer_cache::dozer_log::home_dir::HomeDir;
18
use dozer_core::app::AppPipeline;
19
use dozer_core::dag_schemas::DagSchemas;
20

21
use crate::console_helper::get_colored_text;
22
use crate::console_helper::GREEN;
23
use crate::console_helper::PURPLE;
24
use crate::console_helper::RED;
25
use dozer_core::errors::ExecutionError;
26
use dozer_ingestion::connectors::{get_connector, SourceSchema, TableInfo};
27
use dozer_sql::pipeline::builder::statement_to_pipeline;
28
use dozer_sql::pipeline::errors::PipelineError;
29
use dozer_types::crossbeam::channel::{self, Sender};
30
use dozer_types::indicatif::{MultiProgress, ProgressDrawTarget};
31
use dozer_types::log::info;
32
use dozer_types::models::config::Config;
33
use dozer_types::tracing::error;
34
use futures::stream::FuturesUnordered;
35
use futures::{FutureExt, StreamExt, TryFutureExt};
36
use metrics::{describe_counter, describe_histogram};
37
use std::collections::HashMap;
38
use std::fs;
39
use std::path::PathBuf;
40

41
use std::sync::Arc;
42
use std::thread;
43
use tokio::runtime::Runtime;
44
use tokio::sync::broadcast;
45

46
#[derive(Clone)]
12✔
47
pub struct SimpleOrchestrator {
48
    pub config: Config,
49
    pub runtime: Arc<Runtime>,
50
    pub multi_pb: MultiProgress,
51
}
52

53
impl SimpleOrchestrator {
54
    pub fn new(config: Config, runtime: Arc<Runtime>, enable_progress: bool) -> Self {
6✔
55
        let progress_draw_target = if enable_progress && atty::is(atty::Stream::Stderr) {
6✔
56
            ProgressDrawTarget::stderr()
×
57
        } else {
58
            ProgressDrawTarget::hidden()
6✔
59
        };
60

×
61
        Self {
6✔
62
            config,
6✔
63
            runtime,
6✔
64
            multi_pb: MultiProgress::with_draw_target(progress_draw_target),
6✔
65
        }
6✔
66
    }
6✔
67

×
68
    pub fn run_api(&mut self, shutdown: ShutdownReceiver) -> Result<(), OrchestrationError> {
6✔
69
        describe_histogram!(
6✔
70
            dozer_api::API_LATENCY_HISTOGRAM_NAME,
×
71
            "The api processing latency in seconds"
×
72
        );
×
73
        describe_counter!(
6✔
74
            dozer_api::API_REQUEST_COUNTER_NAME,
×
75
            "Number of requests processed by the api"
×
76
        );
×
77
        self.runtime.block_on(async {
6✔
78
            let mut futures = FuturesUnordered::new();
6✔
79

6✔
80
            // Open `RoCacheEndpoint`s. Streaming operations if necessary.
6✔
81
            let flags = self.config.flags.clone().unwrap_or_default();
6✔
82
            let (operations_sender, operations_receiver) = if flags.dynamic {
6✔
83
                let (sender, receiver) = broadcast::channel(16);
6✔
84
                (Some(sender), Some(receiver))
6✔
85
            } else {
×
86
                (None, None)
×
87
            };
88

×
89
            let internal_grpc_config = get_app_grpc_config(&self.config);
6✔
90
            let app_server_addr = format!(
6✔
91
                "http://{}:{}",
6✔
92
                internal_grpc_config.host, internal_grpc_config.port
6✔
93
            );
6✔
94
            let cache_manager = Arc::new(
6✔
95
                LmdbRwCacheManager::new(get_cache_manager_options(&self.config))
6✔
96
                    .map_err(OrchestrationError::CacheInitFailed)?,
6✔
97
            );
×
98
            let mut cache_endpoints = vec![];
6✔
99
            for endpoint in &self.config.endpoints {
12✔
100
                let (cache_endpoint, handle) = CacheEndpoint::new(
6✔
101
                    app_server_addr.clone(),
6✔
102
                    &*cache_manager,
6✔
103
                    endpoint.clone(),
6✔
104
                    Box::pin(shutdown.create_shutdown_future()),
6✔
105
                    operations_sender.clone(),
6✔
106
                    Some(self.multi_pb.clone()),
6✔
107
                )
6✔
108
                .await?;
54✔
109
                let cache_name = endpoint.name.clone();
6✔
110
                futures.push(flatten_join_handle(join_handle_map_err(handle, move |e| {
6✔
111
                    if e.is_map_full() {
×
112
                        OrchestrationError::CacheFull(cache_name)
×
113
                    } else {
×
114
                        OrchestrationError::CacheBuildFailed(cache_name, e)
×
115
                    }
×
116
                })));
6✔
117
                cache_endpoints.push(Arc::new(cache_endpoint));
6✔
118
            }
119

120
            // Initialize API Server
×
121
            let rest_config = get_rest_config(&self.config);
6✔
122
            let rest_handle = if rest_config.enabled {
6✔
123
                let security = get_api_security_config(&self.config).cloned();
6✔
124
                let cache_endpoints_for_rest = cache_endpoints.clone();
6✔
125
                let shutdown_for_rest = shutdown.create_shutdown_future();
6✔
126
                let api_server = rest::ApiServer::new(rest_config, security);
6✔
127
                let api_server = api_server
6✔
128
                    .run(cache_endpoints_for_rest, shutdown_for_rest)
6✔
129
                    .map_err(OrchestrationError::ApiInitFailed)?;
6✔
130
                tokio::spawn(api_server.map_err(OrchestrationError::RestServeFailed))
6✔
131
            } else {
×
132
                tokio::spawn(async move { Ok::<(), OrchestrationError>(()) })
×
133
            };
134

135
            // Initialize gRPC Server
×
136
            let grpc_config = get_grpc_config(&self.config);
6✔
137
            let grpc_handle = if grpc_config.enabled {
6✔
138
                let api_security = get_api_security_config(&self.config).cloned();
6✔
139
                let grpc_server = grpc::ApiServer::new(grpc_config, api_security, flags);
6✔
140
                let shutdown = shutdown.create_shutdown_future();
6✔
141
                let grpc_server = grpc_server
6✔
142
                    .run(cache_endpoints, shutdown, operations_receiver)
6✔
143
                    .await
×
144
                    .map_err(OrchestrationError::ApiInitFailed)?;
6✔
145
                tokio::spawn(async move {
6✔
146
                    grpc_server
6✔
147
                        .await
12✔
148
                        .map_err(OrchestrationError::GrpcServeFailed)
6✔
149
                })
6✔
150
            } else {
×
151
                tokio::spawn(async move { Ok::<(), OrchestrationError>(()) })
×
152
            };
153

×
154
            futures.push(flatten_join_handle(rest_handle));
6✔
155
            futures.push(flatten_join_handle(grpc_handle));
6✔
156

×
157
            while let Some(result) = futures.next().await {
24✔
158
                result?;
18✔
159
            }
160

×
161
            Ok::<(), OrchestrationError>(())
6✔
162
        })?;
6✔
163

×
164
        Ok(())
6✔
165
    }
6✔
166

×
167
    pub fn run_apps(
6✔
168
        &mut self,
6✔
169
        shutdown: ShutdownReceiver,
6✔
170
        api_notifier: Option<Sender<bool>>,
6✔
171
    ) -> Result<(), OrchestrationError> {
6✔
172
        let home_dir = HomeDir::new(self.config.home_dir.clone(), self.config.cache_dir.clone());
6✔
173
        let executor = self.runtime.block_on(Executor::new(
6✔
174
            &home_dir,
6✔
175
            &self.config.connections,
6✔
176
            &self.config.sources,
6✔
177
            self.config.sql.as_deref(),
6✔
178
            &self.config.endpoints,
6✔
179
            get_log_options(&self.config),
6✔
180
            self.multi_pb.clone(),
6✔
181
        ))?;
6✔
182
        let dag_executor = self.runtime.block_on(executor.create_dag_executor(
6✔
183
            &self.runtime,
6✔
184
            get_executor_options(&self.config),
6✔
185
            shutdown.clone(),
6✔
186
        ))?;
6✔
187

×
188
        let app_grpc_config = get_app_grpc_config(&self.config);
6✔
189
        let internal_server_future = self
6✔
190
            .runtime
6✔
191
            .block_on(start_internal_pipeline_server(
6✔
192
                executor.endpoint_and_logs().to_vec(),
6✔
193
                &app_grpc_config,
6✔
194
                shutdown.create_shutdown_future(),
6✔
195
            ))
6✔
196
            .map_err(OrchestrationError::InternalServerFailed)?;
6✔
197

×
198
        if let Some(api_notifier) = api_notifier {
6✔
199
            api_notifier
6✔
200
                .send(true)
6✔
201
                .expect("Failed to notify API server");
6✔
202
        }
6✔
203

×
204
        let pipeline_future = self
6✔
205
            .runtime
6✔
206
            .spawn_blocking(|| run_dag_executor(dag_executor, shutdown));
6✔
207

6✔
208
        let mut futures = FuturesUnordered::new();
6✔
209
        futures.push(
6✔
210
            internal_server_future
6✔
211
                .map_err(OrchestrationError::GrpcServeFailed)
6✔
212
                .boxed(),
6✔
213
        );
6✔
214
        futures.push(flatten_join_handle(pipeline_future).boxed());
6✔
215

6✔
216
        self.runtime.block_on(async move {
6✔
217
            while let Some(result) = futures.next().await {
24✔
218
                result?;
12✔
219
            }
×
220
            Ok(())
6✔
221
        })
6✔
222
    }
6✔
223

×
224
    #[allow(clippy::type_complexity)]
×
225
    pub fn list_connectors(
×
226
        &self,
×
227
    ) -> Result<HashMap<String, (Vec<TableInfo>, Vec<SourceSchema>)>, OrchestrationError> {
×
228
        self.runtime.block_on(async {
×
229
            let mut schema_map = HashMap::new();
×
230
            for connection in &self.config.connections {
×
231
                let connector = get_connector(connection.clone())?;
×
232
                let schema_tuples = connector.list_all_schemas().await?;
×
233
                schema_map.insert(connection.name.clone(), schema_tuples);
×
234
            }
×
235

×
236
            Ok(schema_map)
×
237
        })
×
238
    }
×
239

×
240
    pub fn generate_token(&self) -> Result<String, OrchestrationError> {
×
241
        if let Some(api_config) = &self.config.api {
×
242
            if let Some(api_security) = &api_config.api_security {
×
243
                match api_security {
×
244
                    dozer_types::models::api_security::ApiSecurity::Jwt(secret) => {
×
245
                        let auth = Authorizer::new(secret, None, None);
×
246
                        let token = auth
×
247
                            .generate_token(Access::All, None)
×
248
                            .map_err(OrchestrationError::GenerateTokenFailed)?;
×
249
                        return Ok(token);
×
250
                    }
×
251
                }
×
252
            }
×
253
        }
×
254
        Err(OrchestrationError::MissingSecurityConfig)
×
255
    }
×
256

×
257
    pub fn build(
6✔
258
        &mut self,
6✔
259
        force: bool,
6✔
260
        shutdown: ShutdownReceiver,
6✔
261
    ) -> Result<(), OrchestrationError> {
6✔
262
        let home_dir = HomeDir::new(self.config.home_dir.clone(), self.config.cache_dir.clone());
6✔
263

6✔
264
        info!(
6✔
265
            "Initiating app: {}",
×
266
            get_colored_text(&self.config.app_name, PURPLE)
×
267
        );
×
268
        if force {
6✔
269
            self.clean()?;
×
270
        }
6✔
271
        validate_config(&self.config)?;
6✔
272

×
273
        // Calculate schemas.
×
274
        let endpoint_and_logs = self
6✔
275
            .config
6✔
276
            .endpoints
6✔
277
            .iter()
6✔
278
            // We're not really going to run the pipeline, so we don't create logs.
6✔
279
            .map(|endpoint| (endpoint.clone(), None))
6✔
280
            .collect();
6✔
281
        let builder = PipelineBuilder::new(
6✔
282
            &self.config.connections,
6✔
283
            &self.config.sources,
6✔
284
            self.config.sql.as_deref(),
6✔
285
            endpoint_and_logs,
6✔
286
            self.multi_pb.clone(),
6✔
287
        );
6✔
288
        let dag = self
6✔
289
            .runtime
6✔
290
            .block_on(builder.build(&self.runtime, shutdown))?;
6✔
291
        // Populate schemas.
×
292
        let dag_schemas = DagSchemas::new(dag)?;
6✔
293

×
294
        // Get current contract.
×
295
        let enable_token = self
6✔
296
            .config
6✔
297
            .api
6✔
298
            .as_ref()
6✔
299
            .map(|api| api.api_security.is_some())
6✔
300
            .unwrap_or(false);
6✔
301
        let enable_on_event = self
6✔
302
            .config
6✔
303
            .flags
6✔
304
            .as_ref()
6✔
305
            .map(|flags| flags.push_events)
6✔
306
            .unwrap_or(false);
6✔
307
        let contract = build::Contract::new(
6✔
308
            dag_schemas,
6✔
309
            &self.config.endpoints,
6✔
310
            enable_token,
6✔
311
            enable_on_event,
6✔
312
        )?;
6✔
313

314
        // Run build
315
        let storage_config = get_log_options(&self.config).storage_config;
6✔
316
        self.runtime
6✔
317
            .block_on(build::build(&home_dir, &contract, &storage_config))?;
6✔
318

×
319
        Ok(())
6✔
320
    }
6✔
321

322
    // Cleaning the entire folder as there will be inconsistencies
×
323
    // between pipeline, cache and generated proto files.
×
324
    pub fn clean(&mut self) -> Result<(), OrchestrationError> {
×
325
        let cache_dir = PathBuf::from(self.config.cache_dir.clone());
×
326
        if cache_dir.exists() {
×
327
            fs::remove_dir_all(&cache_dir)
×
328
                .map_err(|e| ExecutionError::FileSystemError(cache_dir, e))?;
×
329
        };
×
330

331
        let home_dir = PathBuf::from(self.config.home_dir.clone());
×
332
        if home_dir.exists() {
×
333
            fs::remove_dir_all(&home_dir)
×
334
                .map_err(|e| ExecutionError::FileSystemError(home_dir, e))?;
×
335
        };
×
336

×
337
        Ok(())
×
338
    }
×
339

340
    pub fn run_all(&mut self, shutdown: ShutdownReceiver) -> Result<(), OrchestrationError> {
6✔
341
        let mut dozer_api = self.clone();
6✔
342

6✔
343
        let (tx, rx) = channel::unbounded::<bool>();
6✔
344

6✔
345
        self.build(false, shutdown.clone())?;
6✔
346

×
347
        let mut dozer_pipeline = self.clone();
6✔
348
        let pipeline_shutdown = shutdown.clone();
6✔
349
        let pipeline_thread =
6✔
350
            thread::spawn(move || dozer_pipeline.run_apps(pipeline_shutdown, Some(tx)));
6✔
351

6✔
352
        // Wait for pipeline to initialize caches before starting api server
6✔
353
        if rx.recv().is_err() {
6✔
354
            // This means the pipeline thread returned before sending a message. Either an error happened or it panicked.
×
355
            return match pipeline_thread.join() {
×
356
                Ok(Err(e)) => Err(e),
×
357
                Ok(Ok(())) => panic!("An error must have happened"),
×
358
                Err(e) => {
×
359
                    std::panic::panic_any(e);
×
360
                }
361
            };
362
        }
6✔
363

6✔
364
        dozer_api.run_api(shutdown)?;
6✔
365

×
366
        // wait for pipeline thread to shutdown gracefully
×
367
        pipeline_thread.join().unwrap()
6✔
368
    }
6✔
369
}
×
370

×
371
pub fn validate_sql(sql: String) -> Result<(), PipelineError> {
×
372
    statement_to_pipeline(&sql, &mut AppPipeline::new(), None).map_or_else(
×
373
        |e| {
×
374
            error!(
×
375
                "[sql][{}] Transforms validation error: {}",
×
376
                get_colored_text("X", RED),
×
377
                e
×
378
            );
×
379
            Err(e)
×
380
        },
×
381
        |_| {
×
382
            info!(
×
383
                "[sql][{}]  Transforms validation completed",
×
384
                get_colored_text("✓", GREEN)
×
385
            );
386
            Ok(())
×
387
        },
×
388
    )
×
389
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc