• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

geo-engine / geoengine / 3931034422

pending completion
3931034422

Pull #714

github

GitHub
Merge 5f93dc588 into a5761b055
Pull Request #714: use nextest in test

86519 of 98458 relevant lines covered (87.87%)

78874.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.3
/services/src/pro/contexts/postgres.rs
1
use crate::datasets::add_from_directory::add_providers_from_directory;
2
use crate::error::{self, Result};
3
use crate::layers::add_from_directory::{
4
    add_layer_collections_from_directory, add_layers_from_directory, UNSORTED_COLLECTION_ID,
5
};
6
use crate::layers::storage::INTERNAL_LAYER_DB_ROOT_COLLECTION_ID;
7
use crate::pro::datasets::{add_datasets_from_directory, PostgresDatasetDb, Role};
8
use crate::pro::layers::{PostgresLayerDb, PostgresLayerProviderDb};
9
use crate::pro::projects::ProjectPermission;
10
use crate::pro::quota::{initialize_quota_tracking, QuotaTrackingFactory};
11
use crate::pro::users::{OidcRequestDb, UserDb, UserId, UserSession};
12
use crate::pro::util::config::Oidc;
13
use crate::pro::workflows::postgres_workflow_registry::PostgresWorkflowRegistry;
14
use crate::projects::ProjectId;
15
use crate::tasks::{SimpleTaskManager, SimpleTaskManagerContext};
16
use crate::{contexts::Context, pro::users::PostgresUserDb};
17
use crate::{contexts::QueryContextImpl, pro::projects::PostgresProjectDb};
18
use async_trait::async_trait;
19
use bb8_postgres::{
20
    bb8::Pool,
21
    bb8::PooledConnection,
22
    tokio_postgres::{error::SqlState, tls::MakeTlsConnect, tls::TlsConnect, Config, Socket},
23
    PostgresConnectionManager,
24
};
25
use geoengine_datatypes::raster::TilingSpecification;
26
use geoengine_datatypes::util::Identifier;
27
use geoengine_operators::engine::{ChunkByteSize, QueryContextExtensions};
28
use geoengine_operators::pro::meta::quota::ComputationContext;
29
use geoengine_operators::util::create_rayon_thread_pool;
30
use log::{debug, warn};
31
use rayon::ThreadPool;
32
use snafu::ResultExt;
33
use std::path::PathBuf;
34
use std::sync::Arc;
35

36
use super::{ExecutionContextImpl, ProContext};
37

38
// TODO: do not report postgres error details to user
39

40
/// A contex with references to Postgres backends of the dbs. Automatically migrates schema on instantiation
41
#[derive(Clone)]
×
42
pub struct PostgresContext<Tls>
43
where
44
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
45
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
46
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
47
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
48
{
49
    user_db: Arc<PostgresUserDb<Tls>>,
50
    project_db: Arc<PostgresProjectDb<Tls>>,
51
    workflow_registry: Arc<PostgresWorkflowRegistry<Tls>>,
52
    dataset_db: Arc<PostgresDatasetDb<Tls>>,
53
    layer_db: Arc<PostgresLayerDb<Tls>>,
54
    layer_provider_db: Arc<PostgresLayerProviderDb<Tls>>,
55
    thread_pool: Arc<ThreadPool>,
56
    exe_ctx_tiling_spec: TilingSpecification,
57
    query_ctx_chunk_size: ChunkByteSize,
58
    task_manager: Arc<SimpleTaskManager>,
59
    oidc_request_db: Arc<Option<OidcRequestDb>>,
60
    quota: QuotaTrackingFactory,
61
}
62

63
impl<Tls> PostgresContext<Tls>
64
where
65
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
66
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
67
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
68
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
69
{
70
    pub async fn new_with_context_spec(
18✔
71
        config: Config,
18✔
72
        tls: Tls,
18✔
73
        exe_ctx_tiling_spec: TilingSpecification,
18✔
74
        query_ctx_chunk_size: ChunkByteSize,
18✔
75
    ) -> Result<Self> {
18✔
76
        let pg_mgr = PostgresConnectionManager::new(config, tls);
18✔
77

78
        let pool = Pool::builder().build(pg_mgr).await?;
18✔
79

80
        Self::update_schema(pool.get().await?).await?;
36✔
81

82
        let user_db = Arc::new(PostgresUserDb::new(pool.clone()));
18✔
83
        let quota = initialize_quota_tracking(user_db.clone());
18✔
84

18✔
85
        Ok(Self {
18✔
86
            user_db,
18✔
87
            project_db: Arc::new(PostgresProjectDb::new(pool.clone())),
18✔
88
            workflow_registry: Arc::new(PostgresWorkflowRegistry::new(pool.clone())),
18✔
89
            dataset_db: Arc::new(PostgresDatasetDb::new(pool.clone())),
18✔
90
            layer_db: Arc::new(PostgresLayerDb::new(pool.clone())),
18✔
91
            layer_provider_db: Arc::new(PostgresLayerProviderDb::new(pool.clone())),
18✔
92
            task_manager: Arc::new(SimpleTaskManager::default()),
18✔
93
            thread_pool: create_rayon_thread_pool(0),
18✔
94
            exe_ctx_tiling_spec,
18✔
95
            query_ctx_chunk_size,
18✔
96
            oidc_request_db: Arc::new(None),
18✔
97
            quota,
18✔
98
        })
18✔
99
    }
18✔
100

101
    // TODO: check if the datasets exist already and don't output warnings when skipping them
102
    #[allow(clippy::too_many_arguments)]
103
    pub async fn new_with_data(
×
104
        config: Config,
×
105
        tls: Tls,
×
106
        dataset_defs_path: PathBuf,
×
107
        provider_defs_path: PathBuf,
×
108
        layer_defs_path: PathBuf,
×
109
        layer_collection_defs_path: PathBuf,
×
110
        exe_ctx_tiling_spec: TilingSpecification,
×
111
        query_ctx_chunk_size: ChunkByteSize,
×
112
        oidc_config: Oidc,
×
113
    ) -> Result<Self> {
×
114
        let pg_mgr = PostgresConnectionManager::new(config, tls);
×
115

116
        let pool = Pool::builder().build(pg_mgr).await?;
×
117

118
        Self::update_schema(pool.get().await?).await?;
×
119

120
        let workflow_db = PostgresWorkflowRegistry::new(pool.clone());
×
121
        let mut layer_db = PostgresLayerDb::new(pool.clone());
×
122

×
123
        add_layers_from_directory(&mut layer_db, layer_defs_path).await;
×
124
        add_layer_collections_from_directory(&mut layer_db, layer_collection_defs_path).await;
×
125

126
        let mut dataset_db = PostgresDatasetDb::new(pool.clone());
×
127

×
128
        add_datasets_from_directory(&mut dataset_db, dataset_defs_path).await;
×
129

130
        let mut layer_provider_db = PostgresLayerProviderDb::new(pool.clone());
×
131

×
132
        add_providers_from_directory(
×
133
            &mut layer_provider_db,
×
134
            provider_defs_path.clone(),
×
135
            &[provider_defs_path.join("pro")],
×
136
        )
×
137
        .await;
×
138

139
        let user_db = Arc::new(PostgresUserDb::new(pool.clone()));
×
140
        let quota = initialize_quota_tracking(user_db.clone());
×
141

×
142
        Ok(Self {
×
143
            user_db,
×
144
            project_db: Arc::new(PostgresProjectDb::new(pool.clone())),
×
145
            workflow_registry: Arc::new(workflow_db),
×
146
            dataset_db: Arc::new(dataset_db),
×
147
            layer_db: Arc::new(layer_db),
×
148
            layer_provider_db: Arc::new(PostgresLayerProviderDb::new(pool.clone())),
×
149
            task_manager: Arc::new(SimpleTaskManager::default()),
×
150
            thread_pool: create_rayon_thread_pool(0),
×
151
            exe_ctx_tiling_spec,
×
152
            query_ctx_chunk_size,
×
153
            oidc_request_db: Arc::new(OidcRequestDb::try_from(oidc_config).ok()),
×
154
            quota,
×
155
        })
×
156
    }
×
157

158
    async fn schema_version(
18✔
159
        conn: &PooledConnection<'_, PostgresConnectionManager<Tls>>,
18✔
160
    ) -> Result<i32> {
18✔
161
        let stmt = match conn.prepare("SELECT version from version").await {
18✔
162
            Ok(stmt) => stmt,
1✔
163
            Err(e) => {
17✔
164
                if let Some(code) = e.code() {
17✔
165
                    if *code == SqlState::UNDEFINED_TABLE {
17✔
166
                        warn!("UserDB: Uninitialized schema");
×
167
                        return Ok(0);
17✔
168
                    }
×
169
                }
×
170
                return Err(error::Error::TokioPostgres { source: e });
×
171
            }
172
        };
173

174
        let row = conn.query_one(&stmt, &[]).await?;
1✔
175

176
        Ok(row.get(0))
1✔
177
    }
18✔
178

179
    #[allow(clippy::too_many_lines)]
180
    async fn update_schema(
18✔
181
        conn: PooledConnection<'_, PostgresConnectionManager<Tls>>,
18✔
182
    ) -> Result<()> {
18✔
183
        let mut version = Self::schema_version(&conn).await?;
19✔
184

185
        loop {
186
            match version {
35✔
187
                0 => {
188
                    conn.batch_execute(
17✔
189
                        &format!(r#"
17✔
190
                        CREATE TABLE version (
17✔
191
                            version INT
17✔
192
                        );
17✔
193
                        INSERT INTO version VALUES (1);
17✔
194

17✔
195
                        CREATE TABLE roles (
17✔
196
                            id UUID PRIMARY KEY,
17✔
197
                            name text NOT NULL
17✔
198
                        );
17✔
199

17✔
200
                        INSERT INTO roles (id, name) VALUES
17✔
201
                            ('{system_role_id}', 'system'),
17✔
202
                            ('{user_role_id}', 'user'),
17✔
203
                            ('{anonymous_role_id}', 'anonymous');
17✔
204

17✔
205
                        CREATE TABLE users (
17✔
206
                            id UUID PRIMARY KEY REFERENCES roles(id),
17✔
207
                            email character varying (256) UNIQUE,
17✔
208
                            password_hash character varying (256),
17✔
209
                            real_name character varying (256),
17✔
210
                            active boolean NOT NULL,
17✔
211
                            quota_used bigint NOT NULL DEFAULT 0,
17✔
212
                            CONSTRAINT users_anonymous_ck CHECK (
17✔
213
                               (email IS NULL AND password_hash IS NULL AND real_name IS NULL) OR 
17✔
214
                               (email IS NOT NULL AND password_hash IS NOT NULL AND 
17✔
215
                                real_name IS NOT NULL) 
17✔
216
                            ),
17✔
217
                            CONSTRAINT users_quota_used_ck CHECK (quota_used >= 0)
17✔
218
                        );
17✔
219

17✔
220
                        INSERT INTO users (
17✔
221
                            id, 
17✔
222
                            email,
17✔
223
                            password_hash,
17✔
224
                            real_name,
17✔
225
                            active)
17✔
226
                        VALUES (
17✔
227
                            '{system_role_id}', 
17✔
228
                            'system@geoengine.io',
17✔
229
                            '',
17✔
230
                            'system',
17✔
231
                            true
17✔
232
                        );
17✔
233

17✔
234
                        -- relation between users and roles
17✔
235
                        -- all users have a default role where role_id = user_id
17✔
236
                        CREATE TABLE user_roles (
17✔
237
                            user_id UUID REFERENCES users(id) ON DELETE CASCADE NOT NULL,
17✔
238
                            role_id UUID REFERENCES roles(id) ON DELETE CASCADE NOT NULL,
17✔
239
                            PRIMARY KEY (user_id, role_id)
17✔
240
                        );
17✔
241

17✔
242
                        -- system user role
17✔
243
                        INSERT INTO user_roles 
17✔
244
                            (user_id, role_id)
17✔
245
                        VALUES 
17✔
246
                            ('{system_role_id}', 
17✔
247
                            '{system_role_id}');
17✔
248

17✔
249
                        CREATE TYPE "SpatialReferenceAuthority" AS ENUM (
17✔
250
                            'Epsg', 'SrOrg', 'Iau2000', 'Esri'
17✔
251
                        );
17✔
252

17✔
253
                        CREATE TYPE "SpatialReference" AS (
17✔
254
                            authority "SpatialReferenceAuthority", 
17✔
255
                            code OID
17✔
256
                        );
17✔
257

17✔
258
                        CREATE TYPE "Coordinate2D" AS (
17✔
259
                            x double precision, 
17✔
260
                            y double precision
17✔
261
                        );
17✔
262

17✔
263
                        CREATE TYPE "BoundingBox2D" AS (
17✔
264
                            lower_left_coordinate "Coordinate2D", 
17✔
265
                            upper_right_coordinate "Coordinate2D"
17✔
266
                        );
17✔
267

17✔
268
                        CREATE TYPE "TimeInterval" AS (                                                      
17✔
269
                            start timestamp with time zone,
17✔
270
                            "end" timestamp with time zone
17✔
271
                        );
17✔
272

17✔
273
                        CREATE TYPE "STRectangle" AS (
17✔
274
                            spatial_reference "SpatialReference",
17✔
275
                            bounding_box "BoundingBox2D",
17✔
276
                            time_interval "TimeInterval"
17✔
277
                        );
17✔
278
                        
17✔
279
                        CREATE TYPE "TimeGranularity" AS ENUM (
17✔
280
                            'Millis', 'Seconds', 'Minutes', 'Hours',
17✔
281
                            'Days',  'Months', 'Years'
17✔
282
                        );
17✔
283
                        
17✔
284
                        CREATE TYPE "TimeStep" AS (
17✔
285
                            granularity "TimeGranularity",
17✔
286
                            step OID
17✔
287
                        );
17✔
288

17✔
289
                        CREATE TABLE projects (
17✔
290
                            id UUID PRIMARY KEY
17✔
291
                        );        
17✔
292
                        
17✔
293
                        CREATE TABLE sessions (
17✔
294
                            id UUID PRIMARY KEY,
17✔
295
                            user_id UUID REFERENCES users(id),
17✔
296
                            created timestamp with time zone NOT NULL,
17✔
297
                            valid_until timestamp with time zone NOT NULL,
17✔
298
                            project_id UUID REFERENCES projects(id) ON DELETE SET NULL,
17✔
299
                            view "STRectangle"
17✔
300
                        );                
17✔
301

17✔
302
                        CREATE TABLE project_versions (
17✔
303
                            id UUID PRIMARY KEY,
17✔
304
                            project_id UUID REFERENCES projects(id) ON DELETE CASCADE NOT NULL,
17✔
305
                            name character varying (256) NOT NULL,
17✔
306
                            description text NOT NULL,
17✔
307
                            bounds "STRectangle" NOT NULL,
17✔
308
                            time_step "TimeStep" NOT NULL,
17✔
309
                            changed timestamp with time zone,
17✔
310
                            author_user_id UUID REFERENCES users(id) NOT NULL,
17✔
311
                            latest boolean
17✔
312
                        );
17✔
313

17✔
314
                        CREATE INDEX project_version_latest_idx 
17✔
315
                        ON project_versions (project_id, latest DESC, changed DESC, author_user_id DESC);
17✔
316

17✔
317
                        CREATE TYPE "LayerType" AS ENUM ('Raster', 'Vector');
17✔
318
                        
17✔
319
                        CREATE TYPE "LayerVisibility" AS (
17✔
320
                            data BOOLEAN,
17✔
321
                            legend BOOLEAN
17✔
322
                        );
17✔
323

17✔
324
                        CREATE TABLE project_version_layers (
17✔
325
                            layer_index integer NOT NULL,
17✔
326
                            project_id UUID REFERENCES projects(id) ON DELETE CASCADE NOT NULL,
17✔
327
                            project_version_id UUID REFERENCES project_versions(id) ON DELETE CASCADE NOT NULL,                            
17✔
328
                            name character varying (256) NOT NULL,
17✔
329
                            workflow_id UUID NOT NULL, -- TODO: REFERENCES workflows(id)
17✔
330
                            symbology json,
17✔
331
                            visibility "LayerVisibility" NOT NULL,
17✔
332
                            PRIMARY KEY (project_id, project_version_id, layer_index)            
17✔
333
                        );
17✔
334
                        
17✔
335
                        CREATE TABLE project_version_plots (
17✔
336
                            plot_index integer NOT NULL,
17✔
337
                            project_id UUID REFERENCES projects(id) ON DELETE CASCADE NOT NULL,
17✔
338
                            project_version_id UUID REFERENCES project_versions(id) ON DELETE CASCADE NOT NULL,                            
17✔
339
                            name character varying (256) NOT NULL,
17✔
340
                            workflow_id UUID NOT NULL, -- TODO: REFERENCES workflows(id)
17✔
341
                            PRIMARY KEY (project_id, project_version_id, plot_index)            
17✔
342
                        );
17✔
343

17✔
344
                        CREATE TYPE "ProjectPermission" AS ENUM ('Read', 'Write', 'Owner');
17✔
345

17✔
346
                        CREATE TABLE user_project_permissions (
17✔
347
                            user_id UUID REFERENCES users(id) NOT NULL,
17✔
348
                            project_id UUID REFERENCES projects(id) ON DELETE CASCADE NOT NULL,
17✔
349
                            permission "ProjectPermission" NOT NULL,
17✔
350
                            PRIMARY KEY (user_id, project_id)
17✔
351
                        );
17✔
352

17✔
353
                        CREATE TABLE workflows (
17✔
354
                            id UUID PRIMARY KEY,
17✔
355
                            workflow json NOT NULL
17✔
356
                        );
17✔
357

17✔
358
                        CREATE TABLE datasets (
17✔
359
                            id UUID PRIMARY KEY,
17✔
360
                            name text NOT NULL,
17✔
361
                            description text NOT NULL, 
17✔
362
                            tags text[], 
17✔
363
                            source_operator text NOT NULL,
17✔
364

17✔
365
                            result_descriptor json NOT NULL,
17✔
366
                            meta_data json NOT NULL,
17✔
367

17✔
368
                            symbology json,
17✔
369
                            provenance json
17✔
370
                        );
17✔
371

17✔
372
                        -- TODO: add constraint not null
17✔
373
                        -- TODO: add constaint byte_size >= 0
17✔
374
                        CREATE TYPE "FileUpload" AS (
17✔
375
                            id UUID,
17✔
376
                            name text,
17✔
377
                            byte_size bigint
17✔
378
                        );
17✔
379

17✔
380
                        -- TODO: store user
17✔
381
                        -- TODO: time of creation and last update
17✔
382
                        -- TODO: upload directory that is not directly derived from id
17✔
383
                        CREATE TABLE uploads (
17✔
384
                            id UUID PRIMARY KEY,
17✔
385
                            user_id UUID REFERENCES users(id) ON DELETE CASCADE NOT NULL,
17✔
386
                            files "FileUpload"[] NOT NULL
17✔
387
                        );
17✔
388

17✔
389
                        CREATE TYPE "Permission" AS ENUM (
17✔
390
                            'Read', 'Write', 'Owner'
17✔
391
                        );
17✔
392

17✔
393
                        -- TODO: add indexes
17✔
394
                        CREATE TABLE dataset_permissions (
17✔
395
                            role_id UUID REFERENCES roles(id) ON DELETE CASCADE NOT NULL,
17✔
396
                            dataset_id UUID REFERENCES datasets(id) ON DELETE CASCADE NOT NULL,
17✔
397
                            permission "Permission" NOT NULL,
17✔
398
                            PRIMARY KEY (role_id, dataset_id)
17✔
399
                        );
17✔
400

17✔
401
                        CREATE VIEW user_permitted_datasets AS
17✔
402
                            SELECT 
17✔
403
                                r.user_id,
17✔
404
                                p.dataset_id,
17✔
405
                                p.permission
17✔
406
                            FROM 
17✔
407
                                user_roles r JOIN dataset_permissions p ON (r.role_id = p.role_id);
17✔
408

17✔
409

17✔
410
                        CREATE TABLE layer_collections (
17✔
411
                            id UUID PRIMARY KEY,
17✔
412
                            name text NOT NULL,
17✔
413
                            description text NOT NULL
17✔
414
                        );
17✔
415

17✔
416
                        -- insert the root layer collection
17✔
417
                        INSERT INTO layer_collections (
17✔
418
                            id,
17✔
419
                            name,
17✔
420
                            description
17✔
421
                        ) VALUES (
17✔
422
                            '{root_layer_collection_id}',
17✔
423
                            'Layers',
17✔
424
                            'All available Geo Engine layers'
17✔
425
                        );
17✔
426

17✔
427
                        -- insert the unsorted layer collection
17✔
428
                        INSERT INTO layer_collections (
17✔
429
                            id,
17✔
430
                            name,
17✔
431
                            description
17✔
432
                        ) VALUES (
17✔
433
                            '{unsorted_layer_collection_id}',
17✔
434
                            'Unsorted',
17✔
435
                            'Unsorted Layers'
17✔
436
                        );
17✔
437
    
17✔
438

17✔
439
                        CREATE TABLE layers (
17✔
440
                            id UUID PRIMARY KEY,
17✔
441
                            name text NOT NULL,
17✔
442
                            description text NOT NULL,
17✔
443
                            workflow json NOT NULL,
17✔
444
                            symbology json 
17✔
445
                        );
17✔
446

17✔
447
                        CREATE TABLE collection_layers (
17✔
448
                            collection UUID REFERENCES layer_collections(id) ON DELETE CASCADE NOT NULL,
17✔
449
                            layer UUID REFERENCES layers(id) ON DELETE CASCADE NOT NULL,
17✔
450
                            PRIMARY KEY (collection, layer)
17✔
451
                        );
17✔
452

17✔
453
                        CREATE TABLE collection_children (
17✔
454
                            parent UUID REFERENCES layer_collections(id) ON DELETE CASCADE NOT NULL,
17✔
455
                            child UUID REFERENCES layer_collections(id) ON DELETE CASCADE NOT NULL,
17✔
456
                            PRIMARY KEY (parent, child)
17✔
457
                        );
17✔
458

17✔
459
                        -- add unsorted layers to root layer collection
17✔
460
                        INSERT INTO collection_children (parent, child) VALUES
17✔
461
                        ('{root_layer_collection_id}', '{unsorted_layer_collection_id}');
17✔
462

17✔
463
                        -- TODO: should name be unique (per user)?
17✔
464
                        CREATE TABLE layer_providers (
17✔
465
                            id UUID PRIMARY KEY,
17✔
466
                            type_name text NOT NULL,
17✔
467
                            name text NOT NULL,
17✔
468

17✔
469
                            definition json NOT NULL
17✔
470
                        );
17✔
471

17✔
472
                        -- TODO: uploads, providers permissions
17✔
473

17✔
474
                        -- TODO: relationship between uploads and datasets?
17✔
475

17✔
476
                        CREATE TABLE external_users (
17✔
477
                            id UUID PRIMARY KEY REFERENCES users(id),
17✔
478
                            external_id character varying (256) UNIQUE,
17✔
479
                            email character varying (256),
17✔
480
                            real_name character varying (256),
17✔
481
                            active boolean NOT NULL
17✔
482
                        );
17✔
483
                        "#
17✔
484
                    ,
17✔
485
                    system_role_id = Role::system_role_id(),
17✔
486
                    user_role_id = Role::user_role_id(),
17✔
487
                    anonymous_role_id = Role::anonymous_role_id(),
17✔
488
                    root_layer_collection_id = INTERNAL_LAYER_DB_ROOT_COLLECTION_ID,
17✔
489
                    unsorted_layer_collection_id = UNSORTED_COLLECTION_ID))
17✔
490
                    .await?;
17✔
491
                    debug!("Updated user database to schema version {}", version + 1);
×
492
                }
493
                // 1 => {
494
                // next version
495
                // conn.batch_execute(
496
                //     "\
497
                //     ALTER TABLE users ...
498
                //
499
                //     UPDATE version SET version = 2;\
500
                //     ",
501
                // )
502
                // .await?;
503
                // eprintln!("Updated user database to schema version {}", version + 1);
504
                // }
505
                _ => return Ok(()),
18✔
506
            }
507
            version += 1;
17✔
508
        }
509
    }
18✔
510

511
    pub(crate) async fn check_user_project_permission(
32✔
512
        conn: &PooledConnection<'_, PostgresConnectionManager<Tls>>,
32✔
513
        user: UserId,
32✔
514
        project: ProjectId,
32✔
515
        permissions: &[ProjectPermission],
32✔
516
    ) -> Result<()> {
32✔
517
        let stmt = conn
32✔
518
            .prepare(
32✔
519
                "
32✔
520
                SELECT TRUE
32✔
521
                FROM user_project_permissions
32✔
522
                WHERE user_id = $1 AND project_id = $2 AND permission = ANY ($3);",
32✔
523
            )
32✔
524
            .await?;
32✔
525

526
        conn.query_one(&stmt, &[&user, &project, &permissions])
32✔
527
            .await
25✔
528
            .map_err(|_error| error::Error::ProjectDbUnauthorized)?;
32✔
529

530
        Ok(())
30✔
531
    }
32✔
532
}
533

534
#[async_trait]
535
impl<Tls> ProContext for PostgresContext<Tls>
536
where
537
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
538
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
539
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
540
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
541
{
542
    type UserDB = PostgresUserDb<Tls>;
543
    type ProDatasetDB = PostgresDatasetDb<Tls>;
544
    type ProProjectDB = PostgresProjectDb<Tls>;
545

546
    fn user_db(&self) -> Arc<Self::UserDB> {
1✔
547
        self.user_db.clone()
1✔
548
    }
1✔
549
    fn user_db_ref(&self) -> &Self::UserDB {
27✔
550
        &self.user_db
27✔
551
    }
27✔
552
    fn oidc_request_db(&self) -> Option<&OidcRequestDb> {
×
553
        self.oidc_request_db.as_ref().as_ref()
×
554
    }
×
555

556
    fn pro_dataset_db(&self) -> Arc<Self::ProDatasetDB> {
×
557
        self.dataset_db.clone()
×
558
    }
×
559
    fn pro_dataset_db_ref(&self) -> &Self::ProDatasetDB {
×
560
        &self.dataset_db
×
561
    }
×
562

563
    fn pro_project_db(&self) -> Arc<Self::ProProjectDB> {
×
564
        self.project_db.clone()
×
565
    }
×
566
    fn pro_project_db_ref(&self) -> &Self::ProProjectDB {
×
567
        &self.project_db
×
568
    }
×
569
}
570

571
#[async_trait]
572
impl<Tls> Context for PostgresContext<Tls>
573
where
574
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
575
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
576
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
577
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
578
{
579
    type Session = UserSession;
580
    type ProjectDB = PostgresProjectDb<Tls>;
581
    type WorkflowRegistry = PostgresWorkflowRegistry<Tls>;
582
    type DatasetDB = PostgresDatasetDb<Tls>;
583
    type LayerDB = PostgresLayerDb<Tls>;
584
    type LayerProviderDB = PostgresLayerProviderDb<Tls>;
585
    type TaskContext = SimpleTaskManagerContext;
586
    type TaskManager = SimpleTaskManager; // this does not persist across restarts
587
    type QueryContext = QueryContextImpl;
588
    type ExecutionContext =
589
        ExecutionContextImpl<UserSession, PostgresDatasetDb<Tls>, PostgresLayerProviderDb<Tls>>;
590

591
    fn project_db(&self) -> Arc<Self::ProjectDB> {
×
592
        self.project_db.clone()
×
593
    }
×
594
    fn project_db_ref(&self) -> &Self::ProjectDB {
46✔
595
        &self.project_db
46✔
596
    }
46✔
597

598
    fn workflow_registry(&self) -> Arc<Self::WorkflowRegistry> {
×
599
        self.workflow_registry.clone()
×
600
    }
×
601
    fn workflow_registry_ref(&self) -> &Self::WorkflowRegistry {
10✔
602
        &self.workflow_registry
10✔
603
    }
10✔
604

605
    fn dataset_db(&self) -> Arc<Self::DatasetDB> {
×
606
        self.dataset_db.clone()
×
607
    }
×
608
    fn dataset_db_ref(&self) -> &Self::DatasetDB {
31✔
609
        &self.dataset_db
31✔
610
    }
31✔
611

612
    fn layer_db(&self) -> Arc<Self::LayerDB> {
×
613
        self.layer_db.clone()
×
614
    }
×
615
    fn layer_db_ref(&self) -> &Self::LayerDB {
4✔
616
        &self.layer_db
4✔
617
    }
4✔
618

619
    fn layer_provider_db(&self) -> Arc<Self::LayerProviderDB> {
×
620
        self.layer_provider_db.clone()
×
621
    }
×
622
    fn layer_provider_db_ref(&self) -> &Self::LayerProviderDB {
1✔
623
        &self.layer_provider_db
1✔
624
    }
1✔
625

626
    fn tasks(&self) -> Arc<Self::TaskManager> {
×
627
        self.task_manager.clone()
×
628
    }
×
629
    fn tasks_ref(&self) -> &Self::TaskManager {
×
630
        &self.task_manager
×
631
    }
×
632

633
    fn query_context(&self, session: UserSession) -> Result<Self::QueryContext> {
×
634
        // TODO: load config only once
×
635

×
636
        let mut extensions = QueryContextExtensions::default();
×
637
        extensions.insert(
×
638
            self.quota
×
639
                .create_quota_tracking(&session, ComputationContext::new()),
×
640
        );
×
641

×
642
        Ok(QueryContextImpl::new_with_extensions(
×
643
            self.query_ctx_chunk_size,
×
644
            self.thread_pool.clone(),
×
645
            extensions,
×
646
        ))
×
647
    }
×
648

649
    fn execution_context(&self, session: UserSession) -> Result<Self::ExecutionContext> {
×
650
        Ok(ExecutionContextImpl::<
×
651
            UserSession,
×
652
            PostgresDatasetDb<Tls>,
×
653
            PostgresLayerProviderDb<Tls>,
×
654
        >::new(
×
655
            self.dataset_db.clone(),
×
656
            self.layer_provider_db.clone(),
×
657
            self.thread_pool.clone(),
×
658
            session,
×
659
            self.exe_ctx_tiling_spec,
×
660
        ))
×
661
    }
×
662

663
    async fn session_by_id(&self, session_id: crate::contexts::SessionId) -> Result<Self::Session> {
×
664
        self.user_db_ref()
×
665
            .session(session_id)
×
666
            .await
×
667
            .map_err(Box::new)
×
668
            .context(error::Authorization)
×
669
    }
×
670
}
671

672
#[cfg(test)]
673
mod tests {
674
    use std::collections::HashMap;
675
    use std::str::FromStr;
676

677
    use super::*;
678
    use crate::api::model::datatypes::{DataProviderId, DatasetId};
679
    use crate::api::model::services::AddDataset;
680
    use crate::datasets::external::mock::{MockCollection, MockExternalLayerProviderDefinition};
681
    use crate::datasets::listing::SessionMetaDataProvider;
682
    use crate::datasets::listing::{DatasetListOptions, DatasetListing, ProvenanceOutput};
683
    use crate::datasets::listing::{DatasetProvider, Provenance};
684
    use crate::datasets::storage::{DatasetStore, MetaDataDefinition};
685
    use crate::datasets::upload::{FileId, UploadId};
686
    use crate::datasets::upload::{FileUpload, Upload, UploadDb};
687
    use crate::layers::layer::{
688
        AddLayer, AddLayerCollection, CollectionItem, LayerCollection, LayerCollectionListOptions,
689
        LayerCollectionListing, LayerListing, ProviderLayerCollectionId, ProviderLayerId,
690
    };
691
    use crate::layers::listing::{LayerCollectionId, LayerCollectionProvider};
692
    use crate::layers::storage::{
693
        LayerDb, LayerProviderDb, LayerProviderListing, LayerProviderListingOptions,
694
        INTERNAL_PROVIDER_ID,
695
    };
696
    use crate::pro::datasets::{DatasetPermission, Permission, UpdateDatasetPermissions};
697
    use crate::pro::projects::{LoadVersion, ProProjectDb, UserProjectPermission};
698
    use crate::pro::users::{ExternalUserClaims, UserCredentials, UserDb, UserRegistration};
699
    use crate::projects::{
700
        CreateProject, Layer, LayerUpdate, OrderBy, Plot, PlotUpdate, PointSymbology, ProjectDb,
701
        ProjectFilter, ProjectId, ProjectListOptions, ProjectListing, STRectangle, UpdateProject,
702
    };
703
    use crate::util::config::{get_config_element, Postgres};
704
    use crate::util::user_input::UserInput;
705
    use crate::workflows::registry::WorkflowRegistry;
706
    use crate::workflows::workflow::Workflow;
707
    use bb8_postgres::bb8::ManageConnection;
708
    use bb8_postgres::tokio_postgres::{self, NoTls};
709
    use futures::Future;
710
    use geoengine_datatypes::collections::VectorDataType;
711
    use geoengine_datatypes::primitives::{
712
        BoundingBox2D, Coordinate2D, DateTime, Duration, FeatureDataType, Measurement,
713
        SpatialResolution, TimeInterval, VectorQueryRectangle,
714
    };
715
    use geoengine_datatypes::spatial_reference::{SpatialReference, SpatialReferenceOption};
716
    use geoengine_datatypes::util::test::TestDefault;
717
    use geoengine_datatypes::util::Identifier;
718
    use geoengine_operators::engine::{
719
        MetaData, MultipleRasterOrSingleVectorSource, PlotOperator, StaticMetaData, TypedOperator,
720
        TypedResultDescriptor, VectorColumnInfo, VectorOperator, VectorResultDescriptor,
721
    };
722
    use geoengine_operators::mock::{MockPointSource, MockPointSourceParams};
723
    use geoengine_operators::plot::{Statistics, StatisticsParams};
724
    use geoengine_operators::source::{
725
        CsvHeader, FormatSpecifics, OgrSourceColumnSpec, OgrSourceDataset,
726
        OgrSourceDatasetTimeType, OgrSourceDurationSpec, OgrSourceErrorSpec, OgrSourceTimeFormat,
727
    };
728
    use geoengine_operators::util::input::MultiRasterOrVectorOperator::Raster;
729
    use openidconnect::SubjectIdentifier;
730
    use rand::RngCore;
731
    use tokio::runtime::Handle;
732

733
    /// Setup database schema and return its name.
734
    async fn setup_db() -> (tokio_postgres::Config, String) {
17✔
735
        let mut db_config = get_config_element::<Postgres>().unwrap();
17✔
736
        db_config.schema = format!("geoengine_test_{}", rand::thread_rng().next_u64()); // generate random temp schema
17✔
737

17✔
738
        let mut pg_config = tokio_postgres::Config::new();
17✔
739
        pg_config
17✔
740
            .user(&db_config.user)
17✔
741
            .password(&db_config.password)
17✔
742
            .host(&db_config.host)
17✔
743
            .dbname(&db_config.database);
17✔
744

17✔
745
        // generate schema with prior connection
17✔
746
        PostgresConnectionManager::new(pg_config.clone(), NoTls)
17✔
747
            .connect()
17✔
748
            .await
67✔
749
            .unwrap()
17✔
750
            .batch_execute(&format!("CREATE SCHEMA {};", &db_config.schema))
17✔
751
            .await
17✔
752
            .unwrap();
17✔
753

17✔
754
        // fix schema by providing `search_path` option
17✔
755
        pg_config.options(&format!("-c search_path={}", db_config.schema));
17✔
756

17✔
757
        (pg_config, db_config.schema)
17✔
758
    }
17✔
759

760
    /// Tear down database schema.
761
    async fn tear_down_db(pg_config: tokio_postgres::Config, schema: &str) {
17✔
762
        // generate schema with prior connection
17✔
763
        PostgresConnectionManager::new(pg_config, NoTls)
17✔
764
            .connect()
17✔
765
            .await
68✔
766
            .unwrap()
17✔
767
            .batch_execute(&format!("DROP SCHEMA {schema} CASCADE;"))
17✔
768
            .await
17✔
769
            .unwrap();
17✔
770
    }
17✔
771

772
    async fn with_temp_context<F, Fut>(f: F)
17✔
773
    where
17✔
774
        F: FnOnce(PostgresContext<NoTls>, tokio_postgres::Config) -> Fut
17✔
775
            + std::panic::UnwindSafe
17✔
776
            + Send
17✔
777
            + 'static,
17✔
778
        Fut: Future<Output = ()> + Send,
17✔
779
    {
17✔
780
        let (pg_config, schema) = setup_db().await;
84✔
781

782
        // catch all panics and clean up first…
783
        let executed_fn = {
17✔
784
            let pg_config = pg_config.clone();
17✔
785
            std::panic::catch_unwind(move || {
17✔
786
                tokio::task::block_in_place(move || {
17✔
787
                    Handle::current().block_on(async move {
17✔
788
                        let ctx = PostgresContext::new_with_context_spec(
17✔
789
                            pg_config.clone(),
17✔
790
                            tokio_postgres::NoTls,
17✔
791
                            TestDefault::test_default(),
17✔
792
                            TestDefault::test_default(),
17✔
793
                        )
17✔
794
                        .await
51✔
795
                        .unwrap();
17✔
796
                        f(ctx, pg_config.clone()).await;
1,197✔
797
                    });
17✔
798
                });
17✔
799
            })
17✔
800
        };
17✔
801

17✔
802
        tear_down_db(pg_config, &schema).await;
85✔
803

804
        // then throw errors afterwards
805
        if let Err(err) = executed_fn {
17✔
806
            std::panic::resume_unwind(err);
×
807
        }
17✔
808
    }
17✔
809

810
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
811
    async fn test() {
1✔
812
        with_temp_context(|ctx, _| async move {
1✔
813
            anonymous(&ctx).await;
37✔
814

815
            let _user_id = user_reg_login(&ctx).await;
7✔
816

817
            let session = ctx
1✔
818
                .user_db_ref()
1✔
819
                .login(UserCredentials {
1✔
820
                    email: "foo@example.com".into(),
1✔
821
                    password: "secret123".into(),
1✔
822
                })
1✔
823
                .await
2✔
824
                .unwrap();
1✔
825

1✔
826
            create_projects(&ctx, &session).await;
50✔
827

828
            let projects = list_projects(&ctx, &session).await;
11✔
829

830
            set_session(&ctx, &projects).await;
32✔
831

832
            let project_id = projects[0].id;
1✔
833

1✔
834
            update_projects(&ctx, &session, project_id).await;
113✔
835

836
            add_permission(&ctx, &session, project_id).await;
13✔
837

838
            delete_project(&ctx, &session, project_id).await;
4✔
839
        })
1✔
840
        .await;
9✔
841
    }
842

843
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
844
    async fn test_external() {
1✔
845
        with_temp_context(|ctx, _| async move {
1✔
846
            anonymous(&ctx).await;
37✔
847

848
            let session = external_user_login_twice(&ctx).await;
38✔
849

850
            create_projects(&ctx, &session).await;
96✔
851

852
            let projects = list_projects(&ctx, &session).await;
11✔
853

854
            set_session_external(&ctx, &projects).await;
38✔
855

856
            let project_id = projects[0].id;
1✔
857

1✔
858
            update_projects(&ctx, &session, project_id).await;
114✔
859

860
            add_permission(&ctx, &session, project_id).await;
8✔
861

862
            delete_project(&ctx, &session, project_id).await;
1✔
863
        })
1✔
864
        .await;
8✔
865
    }
866

867
    async fn set_session(ctx: &PostgresContext<NoTls>, projects: &[ProjectListing]) {
1✔
868
        let credentials = UserCredentials {
1✔
869
            email: "foo@example.com".into(),
1✔
870
            password: "secret123".into(),
1✔
871
        };
1✔
872

1✔
873
        let user_db = ctx.user_db_ref();
1✔
874

875
        let session = user_db.login(credentials).await.unwrap();
5✔
876

1✔
877
        set_session_in_database(user_db, projects, session).await;
27✔
878
    }
1✔
879

880
    async fn set_session_external(ctx: &PostgresContext<NoTls>, projects: &[ProjectListing]) {
1✔
881
        let external_user_claims = ExternalUserClaims {
1✔
882
            external_id: SubjectIdentifier::new("Foo bar Id".into()),
1✔
883
            email: "foo@bar.de".into(),
1✔
884
            real_name: "Foo Bar".into(),
1✔
885
        };
1✔
886

1✔
887
        let user_db = ctx.user_db_ref();
1✔
888

889
        let session = user_db
1✔
890
            .login_external(external_user_claims, Duration::minutes(10))
1✔
891
            .await
7✔
892
            .unwrap();
1✔
893

1✔
894
        set_session_in_database(user_db, projects, session).await;
31✔
895
    }
1✔
896

897
    async fn set_session_in_database(
2✔
898
        user_db: &PostgresUserDb<NoTls>,
2✔
899
        projects: &[ProjectListing],
2✔
900
        session: UserSession,
2✔
901
    ) {
2✔
902
        user_db
2✔
903
            .set_session_project(&session, projects[0].id)
2✔
904
            .await
10✔
905
            .unwrap();
2✔
906

2✔
907
        assert_eq!(
2✔
908
            user_db.session(session.id).await.unwrap().project,
36✔
909
            Some(projects[0].id)
2✔
910
        );
911

912
        let rect = STRectangle::new_unchecked(SpatialReference::epsg_4326(), 0., 1., 2., 3., 1, 2);
2✔
913
        user_db
2✔
914
            .set_session_view(&session, rect.clone())
2✔
915
            .await
6✔
916
            .unwrap();
2✔
917
        assert_eq!(user_db.session(session.id).await.unwrap().view, Some(rect));
6✔
918
    }
2✔
919

920
    async fn delete_project(
2✔
921
        ctx: &PostgresContext<NoTls>,
2✔
922
        session: &UserSession,
2✔
923
        project_id: ProjectId,
2✔
924
    ) {
2✔
925
        ctx.project_db_ref()
2✔
926
            .delete(session, project_id)
2✔
927
            .await
5✔
928
            .unwrap();
2✔
929

2✔
930
        assert!(ctx
2✔
931
            .project_db_ref()
2✔
932
            .load(session, project_id)
2✔
933
            .await
×
934
            .is_err());
2✔
935
    }
2✔
936

937
    async fn add_permission(
2✔
938
        ctx: &PostgresContext<NoTls>,
2✔
939
        session: &UserSession,
2✔
940
        project_id: ProjectId,
2✔
941
    ) {
2✔
942
        assert_eq!(
2✔
943
            ctx.project_db_ref()
2✔
944
                .list_permissions(session, project_id)
2✔
945
                .await
10✔
946
                .unwrap()
2✔
947
                .len(),
2✔
948
            1
949
        );
950

951
        let user2 = ctx
2✔
952
            .user_db_ref()
2✔
953
            .register(
2✔
954
                UserRegistration {
2✔
955
                    email: "user2@example.com".into(),
2✔
956
                    password: "12345678".into(),
2✔
957
                    real_name: "User2".into(),
2✔
958
                }
2✔
959
                .validated()
2✔
960
                .unwrap(),
2✔
961
            )
2✔
962
            .await
6✔
963
            .unwrap();
2✔
964

2✔
965
        ctx.project_db_ref()
2✔
966
            .add_permission(
2✔
967
                session,
2✔
968
                UserProjectPermission {
2✔
969
                    project: project_id,
2✔
970
                    permission: ProjectPermission::Read,
2✔
971
                    user: user2,
2✔
972
                },
2✔
973
            )
2✔
974
            .await
5✔
975
            .unwrap();
2✔
976

2✔
977
        assert_eq!(
2✔
978
            ctx.project_db_ref()
2✔
979
                .list_permissions(session, project_id)
2✔
980
                .await
×
981
                .unwrap()
2✔
982
                .len(),
2✔
983
            2
984
        );
985
    }
2✔
986

987
    #[allow(clippy::too_many_lines)]
988
    async fn update_projects(
2✔
989
        ctx: &PostgresContext<NoTls>,
2✔
990
        session: &UserSession,
2✔
991
        project_id: ProjectId,
2✔
992
    ) {
2✔
993
        let project = ctx
2✔
994
            .project_db_ref()
2✔
995
            .load_version(session, project_id, LoadVersion::Latest)
2✔
996
            .await
22✔
997
            .unwrap();
2✔
998

999
        let layer_workflow_id = ctx
2✔
1000
            .workflow_registry_ref()
2✔
1001
            .register(Workflow {
2✔
1002
                operator: TypedOperator::Vector(
2✔
1003
                    MockPointSource {
2✔
1004
                        params: MockPointSourceParams {
2✔
1005
                            points: vec![Coordinate2D::new(1., 2.); 3],
2✔
1006
                        },
2✔
1007
                    }
2✔
1008
                    .boxed(),
2✔
1009
                ),
2✔
1010
            })
2✔
1011
            .await
6✔
1012
            .unwrap();
2✔
1013

2✔
1014
        assert!(ctx
2✔
1015
            .workflow_registry_ref()
2✔
1016
            .load(&layer_workflow_id)
2✔
1017
            .await
6✔
1018
            .is_ok());
2✔
1019

1020
        let plot_workflow_id = ctx
2✔
1021
            .workflow_registry_ref()
2✔
1022
            .register(Workflow {
2✔
1023
                operator: Statistics {
2✔
1024
                    params: StatisticsParams {
2✔
1025
                        column_names: vec![],
2✔
1026
                    },
2✔
1027
                    sources: MultipleRasterOrSingleVectorSource {
2✔
1028
                        source: Raster(vec![]),
2✔
1029
                    },
2✔
1030
                }
2✔
1031
                .boxed()
2✔
1032
                .into(),
2✔
1033
            })
2✔
1034
            .await
6✔
1035
            .unwrap();
2✔
1036

2✔
1037
        assert!(ctx
2✔
1038
            .workflow_registry_ref()
2✔
1039
            .load(&plot_workflow_id)
2✔
1040
            .await
6✔
1041
            .is_ok());
2✔
1042

1043
        // add a plot
1044
        let update = UpdateProject {
2✔
1045
            id: project.id,
2✔
1046
            name: Some("Test9 Updated".into()),
2✔
1047
            description: None,
2✔
1048
            layers: Some(vec![LayerUpdate::UpdateOrInsert(Layer {
2✔
1049
                workflow: layer_workflow_id,
2✔
1050
                name: "TestLayer".into(),
2✔
1051
                symbology: PointSymbology::default().into(),
2✔
1052
                visibility: Default::default(),
2✔
1053
            })]),
2✔
1054
            plots: Some(vec![PlotUpdate::UpdateOrInsert(Plot {
2✔
1055
                workflow: plot_workflow_id,
2✔
1056
                name: "Test Plot".into(),
2✔
1057
            })]),
2✔
1058
            bounds: None,
2✔
1059
            time_step: None,
2✔
1060
        };
2✔
1061
        ctx.project_db_ref()
2✔
1062
            .update(session, update.validated().unwrap())
2✔
1063
            .await
62✔
1064
            .unwrap();
2✔
1065

1066
        let versions = ctx
2✔
1067
            .project_db_ref()
2✔
1068
            .versions(session, project_id)
2✔
1069
            .await
10✔
1070
            .unwrap();
2✔
1071
        assert_eq!(versions.len(), 2);
2✔
1072

1073
        // add second plot
1074
        let update = UpdateProject {
2✔
1075
            id: project.id,
2✔
1076
            name: Some("Test9 Updated".into()),
2✔
1077
            description: None,
2✔
1078
            layers: Some(vec![LayerUpdate::UpdateOrInsert(Layer {
2✔
1079
                workflow: layer_workflow_id,
2✔
1080
                name: "TestLayer".into(),
2✔
1081
                symbology: PointSymbology::default().into(),
2✔
1082
                visibility: Default::default(),
2✔
1083
            })]),
2✔
1084
            plots: Some(vec![
2✔
1085
                PlotUpdate::UpdateOrInsert(Plot {
2✔
1086
                    workflow: plot_workflow_id,
2✔
1087
                    name: "Test Plot".into(),
2✔
1088
                }),
2✔
1089
                PlotUpdate::UpdateOrInsert(Plot {
2✔
1090
                    workflow: plot_workflow_id,
2✔
1091
                    name: "Test Plot".into(),
2✔
1092
                }),
2✔
1093
            ]),
2✔
1094
            bounds: None,
2✔
1095
            time_step: None,
2✔
1096
        };
2✔
1097
        ctx.project_db_ref()
2✔
1098
            .update(session, update.validated().unwrap())
2✔
1099
            .await
48✔
1100
            .unwrap();
2✔
1101

1102
        let versions = ctx
2✔
1103
            .project_db_ref()
2✔
1104
            .versions(session, project_id)
2✔
1105
            .await
11✔
1106
            .unwrap();
2✔
1107
        assert_eq!(versions.len(), 3);
2✔
1108

1109
        // delete plots
1110
        let update = UpdateProject {
2✔
1111
            id: project.id,
2✔
1112
            name: None,
2✔
1113
            description: None,
2✔
1114
            layers: None,
2✔
1115
            plots: Some(vec![]),
2✔
1116
            bounds: None,
2✔
1117
            time_step: None,
2✔
1118
        };
2✔
1119
        ctx.project_db_ref()
2✔
1120
            .update(session, update.validated().unwrap())
2✔
1121
            .await
40✔
1122
            .unwrap();
2✔
1123

1124
        let versions = ctx
2✔
1125
            .project_db_ref()
2✔
1126
            .versions(session, project_id)
2✔
1127
            .await
10✔
1128
            .unwrap();
2✔
1129
        assert_eq!(versions.len(), 4);
2✔
1130
    }
2✔
1131

1132
    async fn list_projects(
2✔
1133
        ctx: &PostgresContext<NoTls>,
2✔
1134
        session: &UserSession,
2✔
1135
    ) -> Vec<ProjectListing> {
2✔
1136
        let options = ProjectListOptions {
2✔
1137
            filter: ProjectFilter::None,
2✔
1138
            order: OrderBy::NameDesc,
2✔
1139
            offset: 0,
2✔
1140
            limit: 2,
2✔
1141
        }
2✔
1142
        .validated()
2✔
1143
        .unwrap();
2✔
1144
        let projects = ctx.project_db_ref().list(session, options).await.unwrap();
22✔
1145

2✔
1146
        assert_eq!(projects.len(), 2);
2✔
1147
        assert_eq!(projects[0].name, "Test9");
2✔
1148
        assert_eq!(projects[1].name, "Test8");
2✔
1149
        projects
2✔
1150
    }
2✔
1151

1152
    async fn create_projects(ctx: &PostgresContext<NoTls>, session: &UserSession) {
2✔
1153
        for i in 0..10 {
22✔
1154
            let create = CreateProject {
20✔
1155
                name: format!("Test{i}"),
20✔
1156
                description: format!("Test{}", 10 - i),
20✔
1157
                bounds: STRectangle::new(
20✔
1158
                    SpatialReferenceOption::Unreferenced,
20✔
1159
                    0.,
20✔
1160
                    0.,
20✔
1161
                    1.,
20✔
1162
                    1.,
20✔
1163
                    0,
20✔
1164
                    1,
20✔
1165
                )
20✔
1166
                .unwrap(),
20✔
1167
                time_step: None,
20✔
1168
            }
20✔
1169
            .validated()
20✔
1170
            .unwrap();
20✔
1171
            ctx.project_db_ref().create(session, create).await.unwrap();
146✔
1172
        }
1173
    }
2✔
1174

1175
    async fn user_reg_login(ctx: &PostgresContext<NoTls>) -> UserId {
1✔
1176
        let db = ctx.user_db_ref();
1✔
1177

1✔
1178
        let user_registration = UserRegistration {
1✔
1179
            email: "foo@example.com".into(),
1✔
1180
            password: "secret123".into(),
1✔
1181
            real_name: "Foo Bar".into(),
1✔
1182
        }
1✔
1183
        .validated()
1✔
1184
        .unwrap();
1✔
1185

1186
        let user_id = db.register(user_registration).await.unwrap();
4✔
1187

1✔
1188
        let credentials = UserCredentials {
1✔
1189
            email: "foo@example.com".into(),
1✔
1190
            password: "secret123".into(),
1✔
1191
        };
1✔
1192

1193
        let session = db.login(credentials).await.unwrap();
2✔
1194

1✔
1195
        db.session(session.id).await.unwrap();
1✔
1196

1✔
1197
        db.logout(session.id).await.unwrap();
1✔
1198

1✔
1199
        assert!(db.session(session.id).await.is_err());
1✔
1200

1201
        user_id
1✔
1202
    }
1✔
1203

1204
    //TODO: No duplicate tests for postgres and hashmap implementation possible?
1205
    async fn external_user_login_twice(ctx: &PostgresContext<NoTls>) -> UserSession {
1✔
1206
        let db = ctx.user_db_ref();
1✔
1207

1✔
1208
        let external_user_claims = ExternalUserClaims {
1✔
1209
            external_id: SubjectIdentifier::new("Foo bar Id".into()),
1✔
1210
            email: "foo@bar.de".into(),
1✔
1211
            real_name: "Foo Bar".into(),
1✔
1212
        };
1✔
1213
        let duration = Duration::minutes(30);
1✔
1214

1215
        //NEW
1216
        let login_result = db
1✔
1217
            .login_external(external_user_claims.clone(), duration)
1✔
1218
            .await;
19✔
1219
        assert!(login_result.is_ok());
1✔
1220

1221
        let session_1 = login_result.unwrap();
1✔
1222
        let user_id = session_1.user.id; //TODO: Not a deterministic test.
1✔
1223

1✔
1224
        assert!(session_1.user.email.is_some());
1✔
1225
        assert_eq!(session_1.user.email.unwrap(), "foo@bar.de");
1✔
1226
        assert!(session_1.user.real_name.is_some());
1✔
1227
        assert_eq!(session_1.user.real_name.unwrap(), "Foo Bar");
1✔
1228

1229
        let expected_duration = session_1.created + duration;
1✔
1230
        assert_eq!(session_1.valid_until, expected_duration);
1✔
1231

1232
        assert!(db.session(session_1.id).await.is_ok());
3✔
1233

1234
        assert!(db.logout(session_1.id).await.is_ok());
3✔
1235

1236
        assert!(db.session(session_1.id).await.is_err());
3✔
1237

1238
        let duration = Duration::minutes(10);
1✔
1239
        let login_result = db
1✔
1240
            .login_external(external_user_claims.clone(), duration)
1✔
1241
            .await;
7✔
1242
        assert!(login_result.is_ok());
1✔
1243

1244
        let session_2 = login_result.unwrap();
1✔
1245
        let result = session_2.clone();
1✔
1246

1✔
1247
        assert!(session_2.user.email.is_some()); //TODO: Technically, user details could change for each login. For simplicity, this is not covered yet.
1✔
1248
        assert_eq!(session_2.user.email.unwrap(), "foo@bar.de");
1✔
1249
        assert!(session_2.user.real_name.is_some());
1✔
1250
        assert_eq!(session_2.user.real_name.unwrap(), "Foo Bar");
1✔
1251
        assert_eq!(session_2.user.id, user_id);
1✔
1252

1253
        let expected_duration = session_2.created + duration;
1✔
1254
        assert_eq!(session_2.valid_until, expected_duration);
1✔
1255

1256
        assert!(db.session(session_2.id).await.is_ok());
3✔
1257

1258
        result
1✔
1259
    }
1✔
1260

1261
    async fn anonymous(ctx: &PostgresContext<NoTls>) {
2✔
1262
        let db = ctx.user_db_ref();
2✔
1263

2✔
1264
        let now: DateTime = chrono::offset::Utc::now().into();
2✔
1265
        let session = db.anonymous().await.unwrap();
26✔
1266
        let then: DateTime = chrono::offset::Utc::now().into();
2✔
1267

2✔
1268
        assert!(session.created >= now && session.created <= then);
2✔
1269
        assert!(session.valid_until > session.created);
2✔
1270

1271
        let session = db.session(session.id).await.unwrap();
36✔
1272

2✔
1273
        db.logout(session.id).await.unwrap();
6✔
1274

2✔
1275
        assert!(db.session(session.id).await.is_err());
6✔
1276
    }
2✔
1277

1278
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1279
    async fn it_persists_workflows() {
1✔
1280
        with_temp_context(|ctx, pg_config| async move {
1✔
1281
            let workflow = Workflow {
1✔
1282
                operator: TypedOperator::Vector(
1✔
1283
                    MockPointSource {
1✔
1284
                        params: MockPointSourceParams {
1✔
1285
                            points: vec![Coordinate2D::new(1., 2.); 3],
1✔
1286
                        },
1✔
1287
                    }
1✔
1288
                    .boxed(),
1✔
1289
                ),
1✔
1290
            };
1✔
1291

1292
            let id = ctx
1✔
1293
                .workflow_registry_ref()
1✔
1294
                .register(workflow)
1✔
1295
                .await
3✔
1296
                .unwrap();
1✔
1297

1✔
1298
            drop(ctx);
1✔
1299

1300
            let ctx = PostgresContext::new_with_context_spec(pg_config.clone(), tokio_postgres::NoTls, TestDefault::test_default(), TestDefault::test_default())
1✔
1301
                .await
3✔
1302
                .unwrap();
1✔
1303

1304
            let workflow = ctx.workflow_registry_ref().load(&id).await.unwrap();
3✔
1305

1✔
1306
            let json = serde_json::to_string(&workflow).unwrap();
1✔
1307
            assert_eq!(json, r#"{"type":"Vector","operator":{"type":"MockPointSource","params":{"points":[{"x":1.0,"y":2.0},{"x":1.0,"y":2.0},{"x":1.0,"y":2.0}]}}}"#);
1✔
1308
        })
1✔
1309
        .await;
10✔
1310
    }
1311

1312
    #[allow(clippy::too_many_lines)]
1313
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1314
    async fn it_persists_datasets() {
1✔
1315
        with_temp_context(|ctx, _| async move {
1✔
1316
            let dataset_id = DatasetId::from_str("2e8af98d-3b98-4e2c-a35b-e487bffad7b6").unwrap();
1✔
1317

1✔
1318
            let loading_info = OgrSourceDataset {
1✔
1319
                file_name: PathBuf::from("test.csv"),
1✔
1320
                layer_name: "test.csv".to_owned(),
1✔
1321
                data_type: Some(VectorDataType::MultiPoint),
1✔
1322
                time: OgrSourceDatasetTimeType::Start {
1✔
1323
                    start_field: "start".to_owned(),
1✔
1324
                    start_format: OgrSourceTimeFormat::Auto,
1✔
1325
                    duration: OgrSourceDurationSpec::Zero,
1✔
1326
                },
1✔
1327
                default_geometry: None,
1✔
1328
                columns: Some(OgrSourceColumnSpec {
1✔
1329
                    format_specifics: Some(FormatSpecifics::Csv {
1✔
1330
                        header: CsvHeader::Auto,
1✔
1331
                    }),
1✔
1332
                    x: "x".to_owned(),
1✔
1333
                    y: None,
1✔
1334
                    int: vec![],
1✔
1335
                    float: vec![],
1✔
1336
                    text: vec![],
1✔
1337
                    bool: vec![],
1✔
1338
                    datetime: vec![],
1✔
1339
                    rename: None,
1✔
1340
                }),
1✔
1341
                force_ogr_time_filter: false,
1✔
1342
                force_ogr_spatial_filter: false,
1✔
1343
                on_error: OgrSourceErrorSpec::Ignore,
1✔
1344
                sql_query: None,
1✔
1345
                attribute_query: None,
1✔
1346
            };
1✔
1347

1✔
1348
            let meta_data = MetaDataDefinition::OgrMetaData(StaticMetaData::<
1✔
1349
                OgrSourceDataset,
1✔
1350
                VectorResultDescriptor,
1✔
1351
                VectorQueryRectangle,
1✔
1352
            > {
1✔
1353
                loading_info: loading_info.clone(),
1✔
1354
                result_descriptor: VectorResultDescriptor {
1✔
1355
                    data_type: VectorDataType::MultiPoint,
1✔
1356
                    spatial_reference: SpatialReference::epsg_4326().into(),
1✔
1357
                    columns: [(
1✔
1358
                        "foo".to_owned(),
1✔
1359
                        VectorColumnInfo {
1✔
1360
                            data_type: FeatureDataType::Float,
1✔
1361
                            measurement: Measurement::Unitless,
1✔
1362
                        },
1✔
1363
                    )]
1✔
1364
                    .into_iter()
1✔
1365
                    .collect(),
1✔
1366
                    time: None,
1✔
1367
                    bbox: None,
1✔
1368
                },
1✔
1369
                phantom: Default::default(),
1✔
1370
            });
1✔
1371

1372
            let session = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1373

1✔
1374
            let db = ctx.dataset_db_ref();
1✔
1375
            let wrap = db.wrap_meta_data(meta_data);
1✔
1376
            db.add_dataset(
1✔
1377
                &session,
1✔
1378
                AddDataset {
1✔
1379
                    id: Some(dataset_id),
1✔
1380
                    name: "Ogr Test".to_owned(),
1✔
1381
                    description: "desc".to_owned(),
1✔
1382
                    source_operator: "OgrSource".to_owned(),
1✔
1383
                    symbology: None,
1✔
1384
                    provenance: Some(Provenance {
1✔
1385
                        citation: "citation".to_owned(),
1✔
1386
                        license: "license".to_owned(),
1✔
1387
                        uri: "uri".to_owned(),
1✔
1388
                    }),
1✔
1389
                }
1✔
1390
                .validated()
1✔
1391
                .unwrap(),
1✔
1392
                wrap,
1✔
1393
            )
1✔
1394
            .await
11✔
1395
            .unwrap();
1✔
1396

1397
            let datasets = db
1✔
1398
                .list(
1✔
1399
                    &session,
1✔
1400
                    DatasetListOptions {
1✔
1401
                        filter: None,
1✔
1402
                        order: crate::datasets::listing::OrderBy::NameAsc,
1✔
1403
                        offset: 0,
1✔
1404
                        limit: 10,
1✔
1405
                    }
1✔
1406
                    .validated()
1✔
1407
                    .unwrap(),
1✔
1408
                )
1✔
1409
                .await
3✔
1410
                .unwrap();
1✔
1411

1✔
1412
            assert_eq!(datasets.len(), 1);
1✔
1413

1414
            assert_eq!(
1✔
1415
                datasets[0],
1✔
1416
                DatasetListing {
1✔
1417
                    id: dataset_id,
1✔
1418
                    name: "Ogr Test".to_owned(),
1✔
1419
                    description: "desc".to_owned(),
1✔
1420
                    source_operator: "OgrSource".to_owned(),
1✔
1421
                    symbology: None,
1✔
1422
                    tags: vec![],
1✔
1423
                    result_descriptor: TypedResultDescriptor::Vector(VectorResultDescriptor {
1✔
1424
                        data_type: VectorDataType::MultiPoint,
1✔
1425
                        spatial_reference: SpatialReference::epsg_4326().into(),
1✔
1426
                        columns: [(
1✔
1427
                            "foo".to_owned(),
1✔
1428
                            VectorColumnInfo {
1✔
1429
                                data_type: FeatureDataType::Float,
1✔
1430
                                measurement: Measurement::Unitless
1✔
1431
                            }
1✔
1432
                        )]
1✔
1433
                        .into_iter()
1✔
1434
                        .collect(),
1✔
1435
                        time: None,
1✔
1436
                        bbox: None,
1✔
1437
                    })
1✔
1438
                    .into(),
1✔
1439
                },
1✔
1440
            );
1✔
1441

1442
            let provenance = db.provenance(&session, &dataset_id).await.unwrap();
3✔
1443

1✔
1444
            assert_eq!(
1✔
1445
                provenance,
1✔
1446
                ProvenanceOutput {
1✔
1447
                    data: dataset_id.into(),
1✔
1448
                    provenance: Some(Provenance {
1✔
1449
                        citation: "citation".to_owned(),
1✔
1450
                        license: "license".to_owned(),
1✔
1451
                        uri: "uri".to_owned(),
1✔
1452
                    })
1✔
1453
                }
1✔
1454
            );
1✔
1455

1456
            let meta_data: Box<dyn MetaData<OgrSourceDataset, _, _>> = db
1✔
1457
                .session_meta_data(&session, &dataset_id.into())
1✔
1458
                .await
3✔
1459
                .unwrap();
1✔
1460

1461
            assert_eq!(
1✔
1462
                meta_data
1✔
1463
                    .loading_info(VectorQueryRectangle {
1✔
1464
                        spatial_bounds: BoundingBox2D::new_unchecked(
1✔
1465
                            (-180., -90.).into(),
1✔
1466
                            (180., 90.).into()
1✔
1467
                        ),
1✔
1468
                        time_interval: TimeInterval::default(),
1✔
1469
                        spatial_resolution: SpatialResolution::zero_point_one(),
1✔
1470
                    })
1✔
1471
                    .await
×
1472
                    .unwrap(),
1✔
1473
                loading_info
1474
            );
1475
        })
1✔
1476
        .await;
12✔
1477
    }
1478

1479
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1480
    async fn it_persists_uploads() {
1✔
1481
        with_temp_context(|ctx, _| async move {
1✔
1482
            let db = ctx.dataset_db_ref();
1✔
1483

1✔
1484
            let id = UploadId::from_str("2de18cd8-4a38-4111-a445-e3734bc18a80").unwrap();
1✔
1485
            let input = Upload {
1✔
1486
                id,
1✔
1487
                files: vec![FileUpload {
1✔
1488
                    id: FileId::from_str("e80afab0-831d-4d40-95d6-1e4dfd277e72").unwrap(),
1✔
1489
                    name: "test.csv".to_owned(),
1✔
1490
                    byte_size: 1337,
1✔
1491
                }],
1✔
1492
            };
1✔
1493

1494
            let session = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1495
            db.create_upload(&session, input.clone()).await.unwrap();
8✔
1496

1497
            let upload = db.get_upload(&session, id).await.unwrap();
3✔
1498

1✔
1499
            assert_eq!(upload, input);
1✔
1500
        })
1✔
1501
        .await;
10✔
1502
    }
1503

1504
    #[allow(clippy::too_many_lines)]
1505
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1506
    async fn it_persists_layer_providers() {
1✔
1507
        with_temp_context(|ctx, _| async move {
1✔
1508
            let db = ctx.layer_provider_db_ref();
1✔
1509

1✔
1510
            let provider_id =
1✔
1511
                DataProviderId::from_str("7b20c8d7-d754-4f8f-ad44-dddd25df22d2").unwrap();
1✔
1512

1✔
1513
            let loading_info = OgrSourceDataset {
1✔
1514
                file_name: PathBuf::from("test.csv"),
1✔
1515
                layer_name: "test.csv".to_owned(),
1✔
1516
                data_type: Some(VectorDataType::MultiPoint),
1✔
1517
                time: OgrSourceDatasetTimeType::Start {
1✔
1518
                    start_field: "start".to_owned(),
1✔
1519
                    start_format: OgrSourceTimeFormat::Auto,
1✔
1520
                    duration: OgrSourceDurationSpec::Zero,
1✔
1521
                },
1✔
1522
                default_geometry: None,
1✔
1523
                columns: Some(OgrSourceColumnSpec {
1✔
1524
                    format_specifics: Some(FormatSpecifics::Csv {
1✔
1525
                        header: CsvHeader::Auto,
1✔
1526
                    }),
1✔
1527
                    x: "x".to_owned(),
1✔
1528
                    y: None,
1✔
1529
                    int: vec![],
1✔
1530
                    float: vec![],
1✔
1531
                    text: vec![],
1✔
1532
                    bool: vec![],
1✔
1533
                    datetime: vec![],
1✔
1534
                    rename: None,
1✔
1535
                }),
1✔
1536
                force_ogr_time_filter: false,
1✔
1537
                force_ogr_spatial_filter: false,
1✔
1538
                on_error: OgrSourceErrorSpec::Ignore,
1✔
1539
                sql_query: None,
1✔
1540
                attribute_query: None,
1✔
1541
            };
1✔
1542

1✔
1543
            let meta_data = MetaDataDefinition::OgrMetaData(StaticMetaData::<
1✔
1544
                OgrSourceDataset,
1✔
1545
                VectorResultDescriptor,
1✔
1546
                VectorQueryRectangle,
1✔
1547
            > {
1✔
1548
                loading_info: loading_info.clone(),
1✔
1549
                result_descriptor: VectorResultDescriptor {
1✔
1550
                    data_type: VectorDataType::MultiPoint,
1✔
1551
                    spatial_reference: SpatialReference::epsg_4326().into(),
1✔
1552
                    columns: [(
1✔
1553
                        "foo".to_owned(),
1✔
1554
                        VectorColumnInfo {
1✔
1555
                            data_type: FeatureDataType::Float,
1✔
1556
                            measurement: Measurement::Unitless,
1✔
1557
                        },
1✔
1558
                    )]
1✔
1559
                    .into_iter()
1✔
1560
                    .collect(),
1✔
1561
                    time: None,
1✔
1562
                    bbox: None,
1✔
1563
                },
1✔
1564
                phantom: Default::default(),
1✔
1565
            });
1✔
1566

1✔
1567
            let provider = MockExternalLayerProviderDefinition {
1✔
1568
                id: provider_id,
1✔
1569
                root_collection: MockCollection {
1✔
1570
                    id: LayerCollectionId("b5f82c7c-9133-4ac1-b4ae-8faac3b9a6df".to_owned()),
1✔
1571
                    name: "Mock Collection A".to_owned(),
1✔
1572
                    description: "Some description".to_owned(),
1✔
1573
                    collections: vec![MockCollection {
1✔
1574
                        id: LayerCollectionId("21466897-37a1-4666-913a-50b5244699ad".to_owned()),
1✔
1575
                        name: "Mock Collection B".to_owned(),
1✔
1576
                        description: "Some description".to_owned(),
1✔
1577
                        collections: vec![],
1✔
1578
                        layers: vec![],
1✔
1579
                    }],
1✔
1580
                    layers: vec![],
1✔
1581
                },
1✔
1582
                data: [("myData".to_owned(), meta_data)].into_iter().collect(),
1✔
1583
            };
1✔
1584

1✔
1585
            db.add_layer_provider(Box::new(provider)).await.unwrap();
3✔
1586

1587
            let providers = db
1✔
1588
                .list_layer_providers(
1✔
1589
                    LayerProviderListingOptions {
1✔
1590
                        offset: 0,
1✔
1591
                        limit: 10,
1✔
1592
                    }
1✔
1593
                    .validated()
1✔
1594
                    .unwrap(),
1✔
1595
                )
1✔
1596
                .await
3✔
1597
                .unwrap();
1✔
1598

1✔
1599
            assert_eq!(providers.len(), 1);
1✔
1600

1601
            assert_eq!(
1✔
1602
                providers[0],
1✔
1603
                LayerProviderListing {
1✔
1604
                    id: provider_id,
1✔
1605
                    name: "MockName".to_owned(),
1✔
1606
                    description: "MockType".to_owned(),
1✔
1607
                }
1✔
1608
            );
1✔
1609

1610
            let provider = db.layer_provider(provider_id).await.unwrap();
3✔
1611

1612
            let datasets = provider
1✔
1613
                .collection(
1614
                    &provider.root_collection_id().await.unwrap(),
1✔
1615
                    LayerCollectionListOptions {
1✔
1616
                        offset: 0,
1✔
1617
                        limit: 10,
1✔
1618
                    }
1✔
1619
                    .validated()
1✔
1620
                    .unwrap(),
1✔
1621
                )
1622
                .await
×
1623
                .unwrap();
1✔
1624

1✔
1625
            assert_eq!(datasets.items.len(), 1);
1✔
1626
        })
1✔
1627
        .await;
11✔
1628
    }
1629

1630
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1631
    async fn it_lists_only_permitted_datasets() {
1✔
1632
        with_temp_context(|ctx, _| async move {
1✔
1633
            let session1 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1634
            let session2 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1635

1✔
1636
            let descriptor = VectorResultDescriptor {
1✔
1637
                data_type: VectorDataType::Data,
1✔
1638
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1639
                columns: Default::default(),
1✔
1640
                time: None,
1✔
1641
                bbox: None,
1✔
1642
            };
1✔
1643

1✔
1644
            let ds = AddDataset {
1✔
1645
                id: None,
1✔
1646
                name: "OgrDataset".to_string(),
1✔
1647
                description: "My Ogr dataset".to_string(),
1✔
1648
                source_operator: "OgrSource".to_string(),
1✔
1649
                symbology: None,
1✔
1650
                provenance: None,
1✔
1651
            };
1✔
1652

1✔
1653
            let meta = StaticMetaData {
1✔
1654
                loading_info: OgrSourceDataset {
1✔
1655
                    file_name: Default::default(),
1✔
1656
                    layer_name: String::new(),
1✔
1657
                    data_type: None,
1✔
1658
                    time: Default::default(),
1✔
1659
                    default_geometry: None,
1✔
1660
                    columns: None,
1✔
1661
                    force_ogr_time_filter: false,
1✔
1662
                    force_ogr_spatial_filter: false,
1✔
1663
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1664
                    sql_query: None,
1✔
1665
                    attribute_query: None,
1✔
1666
                },
1✔
1667
                result_descriptor: descriptor.clone(),
1✔
1668
                phantom: Default::default(),
1✔
1669
            };
1✔
1670

1✔
1671
            let meta = ctx
1✔
1672
                .dataset_db_ref()
1✔
1673
                .wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1674

1675
            let _id = ctx
1✔
1676
                .dataset_db_ref()
1✔
1677
                .add_dataset(&session1, ds.validated().unwrap(), meta)
1✔
1678
                .await
11✔
1679
                .unwrap();
1✔
1680

1681
            let list1 = ctx
1✔
1682
                .dataset_db_ref()
1✔
1683
                .list(
1✔
1684
                    &session1,
1✔
1685
                    DatasetListOptions {
1✔
1686
                        filter: None,
1✔
1687
                        order: crate::datasets::listing::OrderBy::NameAsc,
1✔
1688
                        offset: 0,
1✔
1689
                        limit: 1,
1✔
1690
                    }
1✔
1691
                    .validated()
1✔
1692
                    .unwrap(),
1✔
1693
                )
1✔
1694
                .await
3✔
1695
                .unwrap();
1✔
1696

1✔
1697
            assert_eq!(list1.len(), 1);
1✔
1698

1699
            let list2 = ctx
1✔
1700
                .dataset_db_ref()
1✔
1701
                .list(
1✔
1702
                    &session2,
1✔
1703
                    DatasetListOptions {
1✔
1704
                        filter: None,
1✔
1705
                        order: crate::datasets::listing::OrderBy::NameAsc,
1✔
1706
                        offset: 0,
1✔
1707
                        limit: 1,
1✔
1708
                    }
1✔
1709
                    .validated()
1✔
1710
                    .unwrap(),
1✔
1711
                )
1✔
1712
                .await
3✔
1713
                .unwrap();
1✔
1714

1✔
1715
            assert_eq!(list2.len(), 0);
1✔
1716
        })
1✔
1717
        .await;
9✔
1718
    }
1719

1720
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1721
    async fn it_shows_only_permitted_provenance() {
1✔
1722
        with_temp_context(|ctx, _| async move {
1✔
1723
            let session1 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1724
            let session2 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1725

1✔
1726
            let descriptor = VectorResultDescriptor {
1✔
1727
                data_type: VectorDataType::Data,
1✔
1728
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1729
                columns: Default::default(),
1✔
1730
                time: None,
1✔
1731
                bbox: None,
1✔
1732
            };
1✔
1733

1✔
1734
            let ds = AddDataset {
1✔
1735
                id: None,
1✔
1736
                name: "OgrDataset".to_string(),
1✔
1737
                description: "My Ogr dataset".to_string(),
1✔
1738
                source_operator: "OgrSource".to_string(),
1✔
1739
                symbology: None,
1✔
1740
                provenance: None,
1✔
1741
            };
1✔
1742

1✔
1743
            let meta = StaticMetaData {
1✔
1744
                loading_info: OgrSourceDataset {
1✔
1745
                    file_name: Default::default(),
1✔
1746
                    layer_name: String::new(),
1✔
1747
                    data_type: None,
1✔
1748
                    time: Default::default(),
1✔
1749
                    default_geometry: None,
1✔
1750
                    columns: None,
1✔
1751
                    force_ogr_time_filter: false,
1✔
1752
                    force_ogr_spatial_filter: false,
1✔
1753
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1754
                    sql_query: None,
1✔
1755
                    attribute_query: None,
1✔
1756
                },
1✔
1757
                result_descriptor: descriptor.clone(),
1✔
1758
                phantom: Default::default(),
1✔
1759
            };
1✔
1760

1✔
1761
            let meta = ctx
1✔
1762
                .dataset_db_ref()
1✔
1763
                .wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1764

1765
            let id = ctx
1✔
1766
                .dataset_db_ref()
1✔
1767
                .add_dataset(&session1, ds.validated().unwrap(), meta)
1✔
1768
                .await
11✔
1769
                .unwrap();
1✔
1770

1771
            assert!(ctx
1✔
1772
                .dataset_db_ref()
1✔
1773
                .provenance(&session1, &id)
1✔
1774
                .await
3✔
1775
                .is_ok());
1✔
1776

1777
            assert!(ctx
1✔
1778
                .dataset_db_ref()
1✔
1779
                .provenance(&session2, &id)
1✔
1780
                .await
3✔
1781
                .is_err());
1✔
1782
        })
1✔
1783
        .await;
10✔
1784
    }
1785

1786
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1787
    async fn it_updates_permissions() {
1✔
1788
        with_temp_context(|ctx, _| async move {
1✔
1789
            let session1 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1790
            let session2 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1791

1✔
1792
            let descriptor = VectorResultDescriptor {
1✔
1793
                data_type: VectorDataType::Data,
1✔
1794
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1795
                columns: Default::default(),
1✔
1796
                time: None,
1✔
1797
                bbox: None,
1✔
1798
            };
1✔
1799

1✔
1800
            let ds = AddDataset {
1✔
1801
                id: None,
1✔
1802
                name: "OgrDataset".to_string(),
1✔
1803
                description: "My Ogr dataset".to_string(),
1✔
1804
                source_operator: "OgrSource".to_string(),
1✔
1805
                symbology: None,
1✔
1806
                provenance: None,
1✔
1807
            };
1✔
1808

1✔
1809
            let meta = StaticMetaData {
1✔
1810
                loading_info: OgrSourceDataset {
1✔
1811
                    file_name: Default::default(),
1✔
1812
                    layer_name: String::new(),
1✔
1813
                    data_type: None,
1✔
1814
                    time: Default::default(),
1✔
1815
                    default_geometry: None,
1✔
1816
                    columns: None,
1✔
1817
                    force_ogr_time_filter: false,
1✔
1818
                    force_ogr_spatial_filter: false,
1✔
1819
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1820
                    sql_query: None,
1✔
1821
                    attribute_query: None,
1✔
1822
                },
1✔
1823
                result_descriptor: descriptor.clone(),
1✔
1824
                phantom: Default::default(),
1✔
1825
            };
1✔
1826

1✔
1827
            let meta = ctx
1✔
1828
                .dataset_db_ref()
1✔
1829
                .wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1830

1831
            let id = ctx
1✔
1832
                .dataset_db_ref()
1✔
1833
                .add_dataset(&session1, ds.validated().unwrap(), meta)
1✔
1834
                .await
11✔
1835
                .unwrap();
1✔
1836

1837
            assert!(ctx.dataset_db_ref().load(&session1, &id).await.is_ok());
3✔
1838

1839
            assert!(ctx.dataset_db_ref().load(&session2, &id).await.is_err());
3✔
1840

1841
            ctx.dataset_db_ref()
1✔
1842
                .add_dataset_permission(
1✔
1843
                    &session1,
1✔
1844
                    DatasetPermission {
1✔
1845
                        role: session2.user.id.into(),
1✔
1846
                        dataset: id,
1✔
1847
                        permission: Permission::Read,
1✔
1848
                    },
1✔
1849
                )
1✔
1850
                .await
9✔
1851
                .unwrap();
1✔
1852

1853
            assert!(ctx.dataset_db_ref().load(&session2, &id).await.is_ok());
3✔
1854
        })
1✔
1855
        .await;
11✔
1856
    }
1857

1858
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1859
    async fn it_uses_roles_for_permissions() {
1✔
1860
        with_temp_context(|ctx, _| async move {
1✔
1861
            let session1 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1862
            let session2 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1863

1✔
1864
            let descriptor = VectorResultDescriptor {
1✔
1865
                data_type: VectorDataType::Data,
1✔
1866
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1867
                columns: Default::default(),
1✔
1868
                time: None,
1✔
1869
                bbox: None,
1✔
1870
            };
1✔
1871

1✔
1872
            let ds = AddDataset {
1✔
1873
                id: None,
1✔
1874
                name: "OgrDataset".to_string(),
1✔
1875
                description: "My Ogr dataset".to_string(),
1✔
1876
                source_operator: "OgrSource".to_string(),
1✔
1877
                symbology: None,
1✔
1878
                provenance: None,
1✔
1879
            };
1✔
1880

1✔
1881
            let meta = StaticMetaData {
1✔
1882
                loading_info: OgrSourceDataset {
1✔
1883
                    file_name: Default::default(),
1✔
1884
                    layer_name: String::new(),
1✔
1885
                    data_type: None,
1✔
1886
                    time: Default::default(),
1✔
1887
                    default_geometry: None,
1✔
1888
                    columns: None,
1✔
1889
                    force_ogr_time_filter: false,
1✔
1890
                    force_ogr_spatial_filter: false,
1✔
1891
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1892
                    sql_query: None,
1✔
1893
                    attribute_query: None,
1✔
1894
                },
1✔
1895
                result_descriptor: descriptor.clone(),
1✔
1896
                phantom: Default::default(),
1✔
1897
            };
1✔
1898

1✔
1899
            let meta = ctx
1✔
1900
                .dataset_db_ref()
1✔
1901
                .wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1902

1903
            let id = ctx
1✔
1904
                .dataset_db_ref()
1✔
1905
                .add_dataset(&session1, ds.validated().unwrap(), meta)
1✔
1906
                .await
11✔
1907
                .unwrap();
1✔
1908

1909
            assert!(ctx.dataset_db_ref().load(&session1, &id).await.is_ok());
3✔
1910

1911
            assert!(ctx.dataset_db_ref().load(&session2, &id).await.is_err());
3✔
1912

1913
            ctx.dataset_db_ref()
1✔
1914
                .add_dataset_permission(
1✔
1915
                    &session1,
1✔
1916
                    DatasetPermission {
1✔
1917
                        role: session2.user.id.into(),
1✔
1918
                        dataset: id,
1✔
1919
                        permission: Permission::Read,
1✔
1920
                    },
1✔
1921
                )
1✔
1922
                .await
9✔
1923
                .unwrap();
1✔
1924

1925
            assert!(ctx.dataset_db_ref().load(&session2, &id).await.is_ok());
3✔
1926
        })
1✔
1927
        .await;
10✔
1928
    }
1929

1930
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1931
    async fn it_secures_meta_data() {
1✔
1932
        with_temp_context(|ctx, _| async move {
1✔
1933
            let session1 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1934
            let session2 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
1935

1✔
1936
            let descriptor = VectorResultDescriptor {
1✔
1937
                data_type: VectorDataType::Data,
1✔
1938
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1939
                columns: Default::default(),
1✔
1940
                time: None,
1✔
1941
                bbox: None,
1✔
1942
            };
1✔
1943

1✔
1944
            let ds = AddDataset {
1✔
1945
                id: None,
1✔
1946
                name: "OgrDataset".to_string(),
1✔
1947
                description: "My Ogr dataset".to_string(),
1✔
1948
                source_operator: "OgrSource".to_string(),
1✔
1949
                symbology: None,
1✔
1950
                provenance: None,
1✔
1951
            };
1✔
1952

1✔
1953
            let meta = StaticMetaData {
1✔
1954
                loading_info: OgrSourceDataset {
1✔
1955
                    file_name: Default::default(),
1✔
1956
                    layer_name: String::new(),
1✔
1957
                    data_type: None,
1✔
1958
                    time: Default::default(),
1✔
1959
                    default_geometry: None,
1✔
1960
                    columns: None,
1✔
1961
                    force_ogr_time_filter: false,
1✔
1962
                    force_ogr_spatial_filter: false,
1✔
1963
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1964
                    sql_query: None,
1✔
1965
                    attribute_query: None,
1✔
1966
                },
1✔
1967
                result_descriptor: descriptor.clone(),
1✔
1968
                phantom: Default::default(),
1✔
1969
            };
1✔
1970

1✔
1971
            let meta = ctx
1✔
1972
                .dataset_db_ref()
1✔
1973
                .wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1974

1975
            let id = ctx
1✔
1976
                .dataset_db_ref()
1✔
1977
                .add_dataset(&session1, ds.validated().unwrap(), meta)
1✔
1978
                .await
11✔
1979
                .unwrap();
1✔
1980

1981
            let meta: Result<
1✔
1982
                Box<dyn MetaData<OgrSourceDataset, VectorResultDescriptor, VectorQueryRectangle>>,
1✔
1983
            > = ctx
1✔
1984
                .dataset_db_ref()
1✔
1985
                .session_meta_data(&session1, &id.into())
1✔
1986
                .await;
3✔
1987

1988
            assert!(meta.is_ok());
1✔
1989

1990
            let meta: Result<
1✔
1991
                Box<dyn MetaData<OgrSourceDataset, VectorResultDescriptor, VectorQueryRectangle>>,
1✔
1992
            > = ctx
1✔
1993
                .dataset_db_ref()
1✔
1994
                .session_meta_data(&session2, &id.into())
1✔
1995
                .await;
3✔
1996

1997
            assert!(meta.is_err());
1✔
1998

1999
            ctx.dataset_db_ref()
1✔
2000
                .add_dataset_permission(
1✔
2001
                    &session1,
1✔
2002
                    DatasetPermission {
1✔
2003
                        role: session2.user.id.into(),
1✔
2004
                        dataset: id,
1✔
2005
                        permission: Permission::Read,
1✔
2006
                    },
1✔
2007
                )
1✔
2008
                .await
9✔
2009
                .unwrap();
1✔
2010

2011
            let meta: Result<
1✔
2012
                Box<dyn MetaData<OgrSourceDataset, VectorResultDescriptor, VectorQueryRectangle>>,
1✔
2013
            > = ctx
1✔
2014
                .dataset_db_ref()
1✔
2015
                .session_meta_data(&session2, &id.into())
1✔
2016
                .await;
3✔
2017

2018
            assert!(meta.is_ok());
1✔
2019
        })
1✔
2020
        .await;
11✔
2021
    }
2022

2023
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2024
    async fn it_secures_uploads() {
1✔
2025
        with_temp_context(|ctx, _| async move {
1✔
2026
            let session1 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
2027
            let session2 = ctx.user_db_ref().anonymous().await.unwrap();
13✔
2028

1✔
2029
            let upload_id = UploadId::new();
1✔
2030

1✔
2031
            let upload = Upload {
1✔
2032
                id: upload_id,
1✔
2033
                files: vec![FileUpload {
1✔
2034
                    id: FileId::new(),
1✔
2035
                    name: "test.bin".to_owned(),
1✔
2036
                    byte_size: 1024,
1✔
2037
                }],
1✔
2038
            };
1✔
2039

1✔
2040
            ctx.dataset_db_ref()
1✔
2041
                .create_upload(&session1, upload)
1✔
2042
                .await
8✔
2043
                .unwrap();
1✔
2044

2045
            assert!(ctx
1✔
2046
                .dataset_db_ref()
1✔
2047
                .get_upload(&session1, upload_id)
1✔
2048
                .await
3✔
2049
                .is_ok());
1✔
2050

2051
            assert!(ctx
1✔
2052
                .dataset_db_ref()
1✔
2053
                .get_upload(&session2, upload_id)
1✔
2054
                .await
3✔
2055
                .is_err());
1✔
2056
        })
1✔
2057
        .await;
10✔
2058
    }
2059

2060
    #[allow(clippy::too_many_lines)]
2061
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2062
    async fn it_collects_layers() {
1✔
2063
        with_temp_context(|ctx, _| async move {
1✔
2064
            let layer_db = ctx.layer_db_ref();
1✔
2065

1✔
2066
            let workflow = Workflow {
1✔
2067
                operator: TypedOperator::Vector(
1✔
2068
                    MockPointSource {
1✔
2069
                        params: MockPointSourceParams {
1✔
2070
                            points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2071
                        },
1✔
2072
                    }
1✔
2073
                    .boxed(),
1✔
2074
                ),
1✔
2075
            };
1✔
2076

2077
            let root_collection_id = layer_db.root_collection_id().await.unwrap();
1✔
2078

2079
            let layer1 = layer_db
1✔
2080
                .add_layer(
1✔
2081
                    AddLayer {
1✔
2082
                        name: "Layer1".to_string(),
1✔
2083
                        description: "Layer 1".to_string(),
1✔
2084
                        symbology: None,
1✔
2085
                        workflow: workflow.clone(),
1✔
2086
                    }
1✔
2087
                    .validated()
1✔
2088
                    .unwrap(),
1✔
2089
                    &root_collection_id,
1✔
2090
                )
1✔
2091
                .await
7✔
2092
                .unwrap();
1✔
2093

2094
            assert_eq!(
1✔
2095
                layer_db.get_layer(&layer1).await.unwrap(),
3✔
2096
                crate::layers::layer::Layer {
1✔
2097
                    id: ProviderLayerId {
1✔
2098
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2099
                        layer_id: layer1.clone(),
1✔
2100
                    },
1✔
2101
                    name: "Layer1".to_string(),
1✔
2102
                    description: "Layer 1".to_string(),
1✔
2103
                    symbology: None,
1✔
2104
                    workflow: workflow.clone(),
1✔
2105
                    properties: vec![],
1✔
2106
                    metadata: HashMap::new(),
1✔
2107
                }
1✔
2108
            );
2109

2110
            let collection1_id = layer_db
1✔
2111
                .add_collection(
1✔
2112
                    AddLayerCollection {
1✔
2113
                        name: "Collection1".to_string(),
1✔
2114
                        description: "Collection 1".to_string(),
1✔
2115
                    }
1✔
2116
                    .validated()
1✔
2117
                    .unwrap(),
1✔
2118
                    &root_collection_id,
1✔
2119
                )
1✔
2120
                .await
7✔
2121
                .unwrap();
1✔
2122

2123
            let layer2 = layer_db
1✔
2124
                .add_layer(
1✔
2125
                    AddLayer {
1✔
2126
                        name: "Layer2".to_string(),
1✔
2127
                        description: "Layer 2".to_string(),
1✔
2128
                        symbology: None,
1✔
2129
                        workflow: workflow.clone(),
1✔
2130
                    }
1✔
2131
                    .validated()
1✔
2132
                    .unwrap(),
1✔
2133
                    &collection1_id,
1✔
2134
                )
1✔
2135
                .await
7✔
2136
                .unwrap();
1✔
2137

2138
            let collection2_id = layer_db
1✔
2139
                .add_collection(
1✔
2140
                    AddLayerCollection {
1✔
2141
                        name: "Collection2".to_string(),
1✔
2142
                        description: "Collection 2".to_string(),
1✔
2143
                    }
1✔
2144
                    .validated()
1✔
2145
                    .unwrap(),
1✔
2146
                    &collection1_id,
1✔
2147
                )
1✔
2148
                .await
7✔
2149
                .unwrap();
1✔
2150

1✔
2151
            layer_db
1✔
2152
                .add_collection_to_parent(&collection2_id, &collection1_id)
1✔
2153
                .await
3✔
2154
                .unwrap();
1✔
2155

2156
            let root_collection = layer_db
1✔
2157
                .collection(
1✔
2158
                    &root_collection_id,
1✔
2159
                    LayerCollectionListOptions {
1✔
2160
                        offset: 0,
1✔
2161
                        limit: 20,
1✔
2162
                    }
1✔
2163
                    .validated()
1✔
2164
                    .unwrap(),
1✔
2165
                )
1✔
2166
                .await
5✔
2167
                .unwrap();
1✔
2168

1✔
2169
            assert_eq!(
1✔
2170
                root_collection,
1✔
2171
                LayerCollection {
1✔
2172
                    id: ProviderLayerCollectionId {
1✔
2173
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2174
                        collection_id: root_collection_id,
1✔
2175
                    },
1✔
2176
                    name: "Layers".to_string(),
1✔
2177
                    description: "All available Geo Engine layers".to_string(),
1✔
2178
                    items: vec![
1✔
2179
                        CollectionItem::Collection(LayerCollectionListing {
1✔
2180
                            id: ProviderLayerCollectionId {
1✔
2181
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2182
                                collection_id: collection1_id.clone(),
1✔
2183
                            },
1✔
2184
                            name: "Collection1".to_string(),
1✔
2185
                            description: "Collection 1".to_string(),
1✔
2186
                        }),
1✔
2187
                        CollectionItem::Collection(LayerCollectionListing {
1✔
2188
                            id: ProviderLayerCollectionId {
1✔
2189
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2190
                                collection_id: LayerCollectionId(
1✔
2191
                                    UNSORTED_COLLECTION_ID.to_string()
1✔
2192
                                ),
1✔
2193
                            },
1✔
2194
                            name: "Unsorted".to_string(),
1✔
2195
                            description: "Unsorted Layers".to_string(),
1✔
2196
                        }),
1✔
2197
                        CollectionItem::Layer(LayerListing {
1✔
2198
                            id: ProviderLayerId {
1✔
2199
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2200
                                layer_id: layer1,
1✔
2201
                            },
1✔
2202
                            name: "Layer1".to_string(),
1✔
2203
                            description: "Layer 1".to_string(),
1✔
2204
                            properties: vec![],
1✔
2205
                        })
1✔
2206
                    ],
1✔
2207
                    entry_label: None,
1✔
2208
                    properties: vec![],
1✔
2209
                }
1✔
2210
            );
1✔
2211

2212
            let collection1 = layer_db
1✔
2213
                .collection(
1✔
2214
                    &collection1_id,
1✔
2215
                    LayerCollectionListOptions {
1✔
2216
                        offset: 0,
1✔
2217
                        limit: 20,
1✔
2218
                    }
1✔
2219
                    .validated()
1✔
2220
                    .unwrap(),
1✔
2221
                )
1✔
2222
                .await
5✔
2223
                .unwrap();
1✔
2224

1✔
2225
            assert_eq!(
1✔
2226
                collection1,
1✔
2227
                LayerCollection {
1✔
2228
                    id: ProviderLayerCollectionId {
1✔
2229
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2230
                        collection_id: collection1_id,
1✔
2231
                    },
1✔
2232
                    name: "Collection1".to_string(),
1✔
2233
                    description: "Collection 1".to_string(),
1✔
2234
                    items: vec![
1✔
2235
                        CollectionItem::Collection(LayerCollectionListing {
1✔
2236
                            id: ProviderLayerCollectionId {
1✔
2237
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2238
                                collection_id: collection2_id,
1✔
2239
                            },
1✔
2240
                            name: "Collection2".to_string(),
1✔
2241
                            description: "Collection 2".to_string(),
1✔
2242
                        }),
1✔
2243
                        CollectionItem::Layer(LayerListing {
1✔
2244
                            id: ProviderLayerId {
1✔
2245
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2246
                                layer_id: layer2,
1✔
2247
                            },
1✔
2248
                            name: "Layer2".to_string(),
1✔
2249
                            description: "Layer 2".to_string(),
1✔
2250
                            properties: vec![],
1✔
2251
                        })
1✔
2252
                    ],
1✔
2253
                    entry_label: None,
1✔
2254
                    properties: vec![],
1✔
2255
                }
1✔
2256
            );
1✔
2257
        })
1✔
2258
        .await;
9✔
2259
    }
2260

2261
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2262
    async fn it_tracks_quota_in_postgres() {
1✔
2263
        with_temp_context(|ctx, _| async move {
1✔
2264
            let _user = ctx
1✔
2265
                .user_db_ref()
1✔
2266
                .register(
1✔
2267
                    UserRegistration {
1✔
2268
                        email: "foo@example.com".to_string(),
1✔
2269
                        password: "secret1234".to_string(),
1✔
2270
                        real_name: "Foo Bar".to_string(),
1✔
2271
                    }
1✔
2272
                    .validated()
1✔
2273
                    .unwrap(),
1✔
2274
                )
1✔
2275
                .await
3✔
2276
                .unwrap();
1✔
2277

2278
            let session = ctx
1✔
2279
                .user_db_ref()
1✔
2280
                .login(UserCredentials {
1✔
2281
                    email: "foo@example.com".to_string(),
1✔
2282
                    password: "secret1234".to_string(),
1✔
2283
                })
1✔
2284
                .await
4✔
2285
                .unwrap();
1✔
2286

1✔
2287
            let quota = initialize_quota_tracking(ctx.user_db());
1✔
2288

1✔
2289
            let tracking = quota.create_quota_tracking(&session, ComputationContext::new());
1✔
2290

1✔
2291
            tracking.work_unit_done();
1✔
2292
            tracking.work_unit_done();
1✔
2293

1✔
2294
            // wait for quota to be recorded
1✔
2295
            let mut success = false;
1✔
2296
            for _ in 0..10 {
2✔
2297
                let used = ctx.user_db_ref().quota_used(&session).await.unwrap();
6✔
2298
                tokio::time::sleep(std::time::Duration::from_millis(100)).await;
2✔
2299

2300
                if used == 2 {
2✔
2301
                    success = true;
1✔
2302
                    break;
1✔
2303
                }
1✔
2304
            }
2305

2306
            assert!(success);
1✔
2307
        })
1✔
2308
        .await;
10✔
2309
    }
2310

2311
    #[allow(clippy::too_many_lines)]
2312
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2313
    async fn it_removes_layer_collections() {
1✔
2314
        with_temp_context(|ctx, _| async move {
1✔
2315
            let layer_db = ctx.layer_db_ref();
1✔
2316

1✔
2317
            let layer = AddLayer {
1✔
2318
                name: "layer".to_string(),
1✔
2319
                description: "description".to_string(),
1✔
2320
                workflow: Workflow {
1✔
2321
                    operator: TypedOperator::Vector(
1✔
2322
                        MockPointSource {
1✔
2323
                            params: MockPointSourceParams {
1✔
2324
                                points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2325
                            },
1✔
2326
                        }
1✔
2327
                        .boxed(),
1✔
2328
                    ),
1✔
2329
                },
1✔
2330
                symbology: None,
1✔
2331
            }
1✔
2332
            .validated()
1✔
2333
            .unwrap();
1✔
2334

2335
            let root_collection = &layer_db.root_collection_id().await.unwrap();
1✔
2336

1✔
2337
            let collection = AddLayerCollection {
1✔
2338
                name: "top collection".to_string(),
1✔
2339
                description: "description".to_string(),
1✔
2340
            }
1✔
2341
            .validated()
1✔
2342
            .unwrap();
1✔
2343

2344
            let top_c_id = layer_db
1✔
2345
                .add_collection(collection, root_collection)
1✔
2346
                .await
7✔
2347
                .unwrap();
1✔
2348

2349
            let l_id = layer_db.add_layer(layer, &top_c_id).await.unwrap();
7✔
2350

1✔
2351
            let collection = AddLayerCollection {
1✔
2352
                name: "empty collection".to_string(),
1✔
2353
                description: "description".to_string(),
1✔
2354
            }
1✔
2355
            .validated()
1✔
2356
            .unwrap();
1✔
2357

2358
            let empty_c_id = layer_db
1✔
2359
                .add_collection(collection, &top_c_id)
1✔
2360
                .await
7✔
2361
                .unwrap();
1✔
2362

2363
            let items = layer_db
1✔
2364
                .collection(
1✔
2365
                    &top_c_id,
1✔
2366
                    LayerCollectionListOptions {
1✔
2367
                        offset: 0,
1✔
2368
                        limit: 20,
1✔
2369
                    }
1✔
2370
                    .validated()
1✔
2371
                    .unwrap(),
1✔
2372
                )
1✔
2373
                .await
5✔
2374
                .unwrap();
1✔
2375

1✔
2376
            assert_eq!(
1✔
2377
                items,
1✔
2378
                LayerCollection {
1✔
2379
                    id: ProviderLayerCollectionId {
1✔
2380
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2381
                        collection_id: top_c_id.clone(),
1✔
2382
                    },
1✔
2383
                    name: "top collection".to_string(),
1✔
2384
                    description: "description".to_string(),
1✔
2385
                    items: vec![
1✔
2386
                        CollectionItem::Collection(LayerCollectionListing {
1✔
2387
                            id: ProviderLayerCollectionId {
1✔
2388
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2389
                                collection_id: empty_c_id.clone(),
1✔
2390
                            },
1✔
2391
                            name: "empty collection".to_string(),
1✔
2392
                            description: "description".to_string(),
1✔
2393
                        }),
1✔
2394
                        CollectionItem::Layer(LayerListing {
1✔
2395
                            id: ProviderLayerId {
1✔
2396
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2397
                                layer_id: l_id.clone(),
1✔
2398
                            },
1✔
2399
                            name: "layer".to_string(),
1✔
2400
                            description: "description".to_string(),
1✔
2401
                            properties: vec![],
1✔
2402
                        })
1✔
2403
                    ],
1✔
2404
                    entry_label: None,
1✔
2405
                    properties: vec![],
1✔
2406
                }
1✔
2407
            );
1✔
2408

2409
            // remove empty collection
2410
            layer_db.remove_collection(&empty_c_id).await.unwrap();
9✔
2411

2412
            let items = layer_db
1✔
2413
                .collection(
1✔
2414
                    &top_c_id,
1✔
2415
                    LayerCollectionListOptions {
1✔
2416
                        offset: 0,
1✔
2417
                        limit: 20,
1✔
2418
                    }
1✔
2419
                    .validated()
1✔
2420
                    .unwrap(),
1✔
2421
                )
1✔
2422
                .await
5✔
2423
                .unwrap();
1✔
2424

1✔
2425
            assert_eq!(
1✔
2426
                items,
1✔
2427
                LayerCollection {
1✔
2428
                    id: ProviderLayerCollectionId {
1✔
2429
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2430
                        collection_id: top_c_id.clone(),
1✔
2431
                    },
1✔
2432
                    name: "top collection".to_string(),
1✔
2433
                    description: "description".to_string(),
1✔
2434
                    items: vec![CollectionItem::Layer(LayerListing {
1✔
2435
                        id: ProviderLayerId {
1✔
2436
                            provider_id: INTERNAL_PROVIDER_ID,
1✔
2437
                            layer_id: l_id.clone(),
1✔
2438
                        },
1✔
2439
                        name: "layer".to_string(),
1✔
2440
                        description: "description".to_string(),
1✔
2441
                        properties: vec![],
1✔
2442
                    })],
1✔
2443
                    entry_label: None,
1✔
2444
                    properties: vec![],
1✔
2445
                }
1✔
2446
            );
1✔
2447

2448
            // remove top (not root) collection
2449
            layer_db.remove_collection(&top_c_id).await.unwrap();
9✔
2450

1✔
2451
            layer_db
1✔
2452
                .collection(
1✔
2453
                    &top_c_id,
1✔
2454
                    LayerCollectionListOptions {
1✔
2455
                        offset: 0,
1✔
2456
                        limit: 20,
1✔
2457
                    }
1✔
2458
                    .validated()
1✔
2459
                    .unwrap(),
1✔
2460
                )
1✔
2461
                .await
3✔
2462
                .unwrap_err();
1✔
2463

1✔
2464
            // should be deleted automatically
1✔
2465
            layer_db.get_layer(&l_id).await.unwrap_err();
3✔
2466

1✔
2467
            // it is not allowed to remove the root collection
1✔
2468
            layer_db
1✔
2469
                .remove_collection(root_collection)
1✔
2470
                .await
×
2471
                .unwrap_err();
1✔
2472
            layer_db
1✔
2473
                .collection(
1✔
2474
                    root_collection,
1✔
2475
                    LayerCollectionListOptions {
1✔
2476
                        offset: 0,
1✔
2477
                        limit: 20,
1✔
2478
                    }
1✔
2479
                    .validated()
1✔
2480
                    .unwrap(),
1✔
2481
                )
1✔
2482
                .await
5✔
2483
                .unwrap();
1✔
2484
        })
1✔
2485
        .await;
10✔
2486
    }
2487

2488
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2489
    #[allow(clippy::too_many_lines)]
2490
    async fn it_removes_collections_from_collections() {
1✔
2491
        with_temp_context(|ctx, _| async move {
1✔
2492
            let db = ctx.layer_db_ref();
1✔
2493

2494
            let root_collection_id = &db.root_collection_id().await.unwrap();
1✔
2495

2496
            let mid_collection_id = db
1✔
2497
                .add_collection(
1✔
2498
                    AddLayerCollection {
1✔
2499
                        name: "mid collection".to_string(),
1✔
2500
                        description: "description".to_string(),
1✔
2501
                    }
1✔
2502
                    .validated()
1✔
2503
                    .unwrap(),
1✔
2504
                    root_collection_id,
1✔
2505
                )
1✔
2506
                .await
7✔
2507
                .unwrap();
1✔
2508

2509
            let bottom_collection_id = db
1✔
2510
                .add_collection(
1✔
2511
                    AddLayerCollection {
1✔
2512
                        name: "bottom collection".to_string(),
1✔
2513
                        description: "description".to_string(),
1✔
2514
                    }
1✔
2515
                    .validated()
1✔
2516
                    .unwrap(),
1✔
2517
                    &mid_collection_id,
1✔
2518
                )
1✔
2519
                .await
7✔
2520
                .unwrap();
1✔
2521

2522
            let layer_id = db
1✔
2523
                .add_layer(
1✔
2524
                    AddLayer {
1✔
2525
                        name: "layer".to_string(),
1✔
2526
                        description: "description".to_string(),
1✔
2527
                        workflow: Workflow {
1✔
2528
                            operator: TypedOperator::Vector(
1✔
2529
                                MockPointSource {
1✔
2530
                                    params: MockPointSourceParams {
1✔
2531
                                        points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2532
                                    },
1✔
2533
                                }
1✔
2534
                                .boxed(),
1✔
2535
                            ),
1✔
2536
                        },
1✔
2537
                        symbology: None,
1✔
2538
                    }
1✔
2539
                    .validated()
1✔
2540
                    .unwrap(),
1✔
2541
                    &mid_collection_id,
1✔
2542
                )
1✔
2543
                .await
7✔
2544
                .unwrap();
1✔
2545

1✔
2546
            // removing the mid collection…
1✔
2547
            db.remove_collection_from_parent(&mid_collection_id, root_collection_id)
1✔
2548
                .await
11✔
2549
                .unwrap();
1✔
2550

1✔
2551
            // …should remove itself
1✔
2552
            db.collection(
1✔
2553
                &mid_collection_id,
1✔
2554
                LayerCollectionListOptions::default().validated().unwrap(),
1✔
2555
            )
1✔
2556
            .await
3✔
2557
            .unwrap_err();
1✔
2558

1✔
2559
            // …should remove the bottom collection
1✔
2560
            db.collection(
1✔
2561
                &bottom_collection_id,
1✔
2562
                LayerCollectionListOptions::default().validated().unwrap(),
1✔
2563
            )
1✔
2564
            .await
3✔
2565
            .unwrap_err();
1✔
2566

1✔
2567
            // … and should remove the layer of the bottom collection
1✔
2568
            db.get_layer(&layer_id).await.unwrap_err();
3✔
2569

1✔
2570
            // the root collection is still there
1✔
2571
            db.collection(
1✔
2572
                root_collection_id,
1✔
2573
                LayerCollectionListOptions::default().validated().unwrap(),
1✔
2574
            )
1✔
2575
            .await
5✔
2576
            .unwrap();
1✔
2577
        })
1✔
2578
        .await;
8✔
2579
    }
2580

2581
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2582
    #[allow(clippy::too_many_lines)]
2583
    async fn it_removes_layers_from_collections() {
1✔
2584
        with_temp_context(|ctx, _| async move {
1✔
2585
            let db = ctx.layer_db_ref();
1✔
2586

2587
            let root_collection = &db.root_collection_id().await.unwrap();
1✔
2588

2589
            let another_collection = db
1✔
2590
                .add_collection(
1✔
2591
                    AddLayerCollection {
1✔
2592
                        name: "top collection".to_string(),
1✔
2593
                        description: "description".to_string(),
1✔
2594
                    }
1✔
2595
                    .validated()
1✔
2596
                    .unwrap(),
1✔
2597
                    root_collection,
1✔
2598
                )
1✔
2599
                .await
7✔
2600
                .unwrap();
1✔
2601

2602
            let layer_in_one_collection = db
1✔
2603
                .add_layer(
1✔
2604
                    AddLayer {
1✔
2605
                        name: "layer 1".to_string(),
1✔
2606
                        description: "description".to_string(),
1✔
2607
                        workflow: Workflow {
1✔
2608
                            operator: TypedOperator::Vector(
1✔
2609
                                MockPointSource {
1✔
2610
                                    params: MockPointSourceParams {
1✔
2611
                                        points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2612
                                    },
1✔
2613
                                }
1✔
2614
                                .boxed(),
1✔
2615
                            ),
1✔
2616
                        },
1✔
2617
                        symbology: None,
1✔
2618
                    }
1✔
2619
                    .validated()
1✔
2620
                    .unwrap(),
1✔
2621
                    &another_collection,
1✔
2622
                )
1✔
2623
                .await
7✔
2624
                .unwrap();
1✔
2625

2626
            let layer_in_two_collections = db
1✔
2627
                .add_layer(
1✔
2628
                    AddLayer {
1✔
2629
                        name: "layer 2".to_string(),
1✔
2630
                        description: "description".to_string(),
1✔
2631
                        workflow: Workflow {
1✔
2632
                            operator: TypedOperator::Vector(
1✔
2633
                                MockPointSource {
1✔
2634
                                    params: MockPointSourceParams {
1✔
2635
                                        points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2636
                                    },
1✔
2637
                                }
1✔
2638
                                .boxed(),
1✔
2639
                            ),
1✔
2640
                        },
1✔
2641
                        symbology: None,
1✔
2642
                    }
1✔
2643
                    .validated()
1✔
2644
                    .unwrap(),
1✔
2645
                    &another_collection,
1✔
2646
                )
1✔
2647
                .await
7✔
2648
                .unwrap();
1✔
2649

1✔
2650
            db.add_layer_to_collection(&layer_in_two_collections, root_collection)
1✔
2651
                .await
3✔
2652
                .unwrap();
1✔
2653

1✔
2654
            // remove first layer --> should be deleted entirely
1✔
2655

1✔
2656
            db.remove_layer_from_collection(&layer_in_one_collection, &another_collection)
1✔
2657
                .await
7✔
2658
                .unwrap();
1✔
2659

2660
            let number_of_layer_in_collection = db
1✔
2661
                .collection(
1✔
2662
                    &another_collection,
1✔
2663
                    LayerCollectionListOptions {
1✔
2664
                        offset: 0,
1✔
2665
                        limit: 20,
1✔
2666
                    }
1✔
2667
                    .validated()
1✔
2668
                    .unwrap(),
1✔
2669
                )
1✔
2670
                .await
5✔
2671
                .unwrap()
1✔
2672
                .items
1✔
2673
                .len();
1✔
2674
            assert_eq!(
1✔
2675
                number_of_layer_in_collection,
1✔
2676
                1 /* only the other collection should be here */
1✔
2677
            );
1✔
2678

2679
            db.get_layer(&layer_in_one_collection).await.unwrap_err();
3✔
2680

1✔
2681
            // remove second layer --> should only be gone in collection
1✔
2682

1✔
2683
            db.remove_layer_from_collection(&layer_in_two_collections, &another_collection)
1✔
2684
                .await
7✔
2685
                .unwrap();
1✔
2686

2687
            let number_of_layer_in_collection = db
1✔
2688
                .collection(
1✔
2689
                    &another_collection,
1✔
2690
                    LayerCollectionListOptions {
1✔
2691
                        offset: 0,
1✔
2692
                        limit: 20,
1✔
2693
                    }
1✔
2694
                    .validated()
1✔
2695
                    .unwrap(),
1✔
2696
                )
1✔
2697
                .await
5✔
2698
                .unwrap()
1✔
2699
                .items
1✔
2700
                .len();
1✔
2701
            assert_eq!(
1✔
2702
                number_of_layer_in_collection,
1✔
2703
                0 /* both layers were deleted */
1✔
2704
            );
1✔
2705

2706
            db.get_layer(&layer_in_two_collections).await.unwrap();
3✔
2707
        })
1✔
2708
        .await;
11✔
2709
    }
2710
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc