• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

geo-engine / geoengine / 4595703237

pending completion
4595703237

push

github

GitHub
Merge #770

32 of 32 new or added lines in 2 files covered. (100.0%)

94612 of 105938 relevant lines covered (89.31%)

75593.48 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.58
/services/src/pro/contexts/postgres.rs
1
use crate::contexts::{ApplicationContext, QueryContextImpl, SessionId};
2
use crate::contexts::{GeoEngineDb, SessionContext};
3
use crate::datasets::add_from_directory::add_providers_from_directory;
4
use crate::datasets::upload::{Volume, Volumes};
5
use crate::error::{self, Result};
6
use crate::layers::add_from_directory::UNSORTED_COLLECTION_ID;
7
use crate::layers::storage::INTERNAL_LAYER_DB_ROOT_COLLECTION_ID;
8
use crate::pro::datasets::add_datasets_from_directory;
9
use crate::pro::layers::add_from_directory::{
10
    add_layer_collections_from_directory, add_layers_from_directory,
11
};
12
use crate::pro::permissions::Role;
13
use crate::pro::quota::{initialize_quota_tracking, QuotaTrackingFactory};
14
use crate::pro::tasks::{ProTaskManager, ProTaskManagerBackend};
15
use crate::pro::users::{OidcRequestDb, UserAuth, UserSession};
16
use crate::pro::util::config::Oidc;
17

18
use crate::tasks::SimpleTaskManagerContext;
19
use crate::util::config::get_config_element;
20
use async_trait::async_trait;
21
use bb8_postgres::{
22
    bb8::Pool,
23
    bb8::PooledConnection,
24
    tokio_postgres::{error::SqlState, tls::MakeTlsConnect, tls::TlsConnect, Config, Socket},
25
    PostgresConnectionManager,
26
};
27
use geoengine_datatypes::raster::TilingSpecification;
28
use geoengine_datatypes::util::Identifier;
29
use geoengine_operators::engine::{ChunkByteSize, QueryContextExtensions};
30
use geoengine_operators::pro::meta::quota::{ComputationContext, QuotaChecker};
31
use geoengine_operators::util::create_rayon_thread_pool;
32
use log::{debug, info};
33
use postgres_protocol::escape::escape_literal;
34
use pwhash::bcrypt;
35
use rayon::ThreadPool;
36
use snafu::{ensure, ResultExt};
37
use std::path::PathBuf;
38
use std::sync::Arc;
39

40
use super::{ExecutionContextImpl, ProApplicationContext, ProGeoEngineDb, QuotaCheckerImpl};
41

42
// TODO: do not report postgres error details to user
43

44
/// A contex with references to Postgres backends of the dbs. Automatically migrates schema on instantiation
45
#[derive(Clone)]
54✔
46
pub struct PostgresContext<Tls>
47
where
48
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
49
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
50
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
51
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
52
{
53
    thread_pool: Arc<ThreadPool>,
54
    exe_ctx_tiling_spec: TilingSpecification,
55
    query_ctx_chunk_size: ChunkByteSize,
56
    task_manager: Arc<ProTaskManagerBackend>,
57
    oidc_request_db: Arc<Option<OidcRequestDb>>,
58
    quota: QuotaTrackingFactory,
59
    pub(crate) pool: Pool<PostgresConnectionManager<Tls>>,
60
    volumes: Volumes,
61
}
62

63
impl<Tls> PostgresContext<Tls>
64
where
65
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
66
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
67
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
68
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
69
{
70
    pub async fn new_with_context_spec(
25✔
71
        config: Config,
25✔
72
        tls: Tls,
25✔
73
        exe_ctx_tiling_spec: TilingSpecification,
25✔
74
        query_ctx_chunk_size: ChunkByteSize,
25✔
75
    ) -> Result<Self> {
25✔
76
        let pg_mgr = PostgresConnectionManager::new(config, tls);
25✔
77

78
        let pool = Pool::builder().build(pg_mgr).await?;
25✔
79
        Self::update_schema(pool.get().await?).await?;
50✔
80

81
        let db = PostgresDb::new(pool.clone(), UserSession::admin_session());
25✔
82
        let quota = initialize_quota_tracking(db);
25✔
83

25✔
84
        Ok(PostgresContext {
25✔
85
            task_manager: Default::default(),
25✔
86
            thread_pool: create_rayon_thread_pool(0),
25✔
87
            exe_ctx_tiling_spec,
25✔
88
            query_ctx_chunk_size,
25✔
89
            oidc_request_db: Arc::new(None),
25✔
90
            quota,
25✔
91
            pool,
25✔
92
            volumes: Default::default(),
25✔
93
        })
25✔
94
    }
25✔
95

96
    // TODO: check if the datasets exist already and don't output warnings when skipping them
97
    #[allow(clippy::too_many_arguments)]
98
    pub async fn new_with_data(
×
99
        config: Config,
×
100
        tls: Tls,
×
101
        dataset_defs_path: PathBuf,
×
102
        provider_defs_path: PathBuf,
×
103
        layer_defs_path: PathBuf,
×
104
        layer_collection_defs_path: PathBuf,
×
105
        exe_ctx_tiling_spec: TilingSpecification,
×
106
        query_ctx_chunk_size: ChunkByteSize,
×
107
        oidc_config: Oidc,
×
108
    ) -> Result<Self> {
×
109
        let pg_mgr = PostgresConnectionManager::new(config, tls);
×
110

111
        let pool = Pool::builder().build(pg_mgr).await?;
×
112
        let conn = pool.get().await?;
×
113
        let uninitialized = Self::schema_version(&conn).await? == 0;
×
114
        Self::update_schema(conn).await?;
×
115

116
        let db = PostgresDb::new(pool.clone(), UserSession::admin_session());
×
117
        let quota = initialize_quota_tracking(db);
×
118

×
119
        let app_ctx = PostgresContext {
×
120
            task_manager: Default::default(),
×
121
            thread_pool: create_rayon_thread_pool(0),
×
122
            exe_ctx_tiling_spec,
×
123
            query_ctx_chunk_size,
×
124
            oidc_request_db: Arc::new(OidcRequestDb::try_from(oidc_config).ok()),
×
125
            quota,
×
126
            pool,
×
127
            volumes: Default::default(),
×
128
        };
×
129

×
130
        if uninitialized {
×
131
            info!("Populating database with initial data...");
×
132

133
            let mut db = app_ctx.session_context(UserSession::admin_session()).db();
×
134

×
135
            add_layers_from_directory(&mut db, layer_defs_path).await;
×
136
            add_layer_collections_from_directory(&mut db, layer_collection_defs_path).await;
×
137

138
            add_datasets_from_directory(&mut db, dataset_defs_path).await;
×
139

140
            add_providers_from_directory(
×
141
                &mut db,
×
142
                provider_defs_path.clone(),
×
143
                &[provider_defs_path.join("pro")],
×
144
            )
×
145
            .await;
×
146
        }
×
147

148
        Ok(app_ctx)
×
149
    }
×
150

151
    async fn schema_version(
25✔
152
        conn: &PooledConnection<'_, PostgresConnectionManager<Tls>>,
25✔
153
    ) -> Result<i32> {
25✔
154
        let stmt = match conn.prepare("SELECT version from version").await {
25✔
155
            Ok(stmt) => stmt,
×
156
            Err(e) => {
25✔
157
                if let Some(code) = e.code() {
25✔
158
                    if *code == SqlState::UNDEFINED_TABLE {
25✔
159
                        info!("Version table not found. Initializing schema.");
×
160
                        return Ok(0);
25✔
161
                    }
×
162
                }
×
163
                return Err(error::Error::TokioPostgres { source: e });
×
164
            }
165
        };
166

167
        let row = conn.query_one(&stmt, &[]).await?;
×
168

169
        Ok(row.get(0))
×
170
    }
25✔
171

172
    #[allow(clippy::too_many_lines)]
173
    /// Updates the schema, returns the new schema version
174
    async fn update_schema(
25✔
175
        conn: PooledConnection<'_, PostgresConnectionManager<Tls>>,
25✔
176
    ) -> Result<i32> {
25✔
177
        let mut version = Self::schema_version(&conn).await?;
25✔
178

179
        loop {
180
            match version {
50✔
181
                0 => {
182
                    let user_config = get_config_element::<crate::pro::util::config::User>()?;
25✔
183

184
                    conn.batch_execute(
25✔
185
                        &format!(r#"
25✔
186
                        CREATE TABLE version (
25✔
187
                            version INT
25✔
188
                        );
25✔
189
                        INSERT INTO version VALUES (1);
25✔
190

25✔
191
                        -- TODO: distinguish between roles that are (correspond to) users and roles that are not
25✔
192
                        -- TODO: integrity constraint for roles that correspond to users + DELETE CASCADE
25✔
193
                        CREATE TABLE roles (
25✔
194
                            id UUID PRIMARY KEY,
25✔
195
                            name text UNIQUE NOT NULL
25✔
196
                        );
25✔
197

25✔
198
                        INSERT INTO roles (id, name) VALUES
25✔
199
                            ({admin_role_id}, 'admin'),
25✔
200
                            ({user_role_id}, 'user'),
25✔
201
                            ({anonymous_role_id}, 'anonymous');
25✔
202

25✔
203
                        CREATE TABLE users (
25✔
204
                            id UUID PRIMARY KEY REFERENCES roles(id),
25✔
205
                            email character varying (256) UNIQUE,
25✔
206
                            password_hash character varying (256),
25✔
207
                            real_name character varying (256),
25✔
208
                            active boolean NOT NULL,
25✔
209
                            quota_available bigint NOT NULL DEFAULT 0,
25✔
210
                            quota_used bigint NOT NULL DEFAULT 0, -- TODO: rename to total_quota_used?
25✔
211
                            CONSTRAINT users_anonymous_ck CHECK (
25✔
212
                               (email IS NULL AND password_hash IS NULL AND real_name IS NULL) OR 
25✔
213
                               (email IS NOT NULL AND password_hash IS NOT NULL AND 
25✔
214
                                real_name IS NOT NULL) 
25✔
215
                            ),
25✔
216
                            CONSTRAINT users_quota_used_ck CHECK (quota_used >= 0)
25✔
217
                        );
25✔
218

25✔
219
                        INSERT INTO users (
25✔
220
                            id, 
25✔
221
                            email,
25✔
222
                            password_hash,
25✔
223
                            real_name,
25✔
224
                            active)
25✔
225
                        VALUES (
25✔
226
                            {admin_role_id}, 
25✔
227
                            {admin_email},
25✔
228
                            {admin_password},
25✔
229
                            'admin',
25✔
230
                            true
25✔
231
                        );
25✔
232

25✔
233
                        -- relation between users and roles
25✔
234
                        -- all users have a default role where role_id = user_id
25✔
235
                        CREATE TABLE user_roles (
25✔
236
                            user_id UUID REFERENCES users(id) ON DELETE CASCADE NOT NULL,
25✔
237
                            role_id UUID REFERENCES roles(id) ON DELETE CASCADE NOT NULL,
25✔
238
                            PRIMARY KEY (user_id, role_id)
25✔
239
                        );
25✔
240

25✔
241
                        -- admin user role
25✔
242
                        INSERT INTO user_roles 
25✔
243
                            (user_id, role_id)
25✔
244
                        VALUES 
25✔
245
                            ({admin_role_id}, 
25✔
246
                            {admin_role_id});
25✔
247

25✔
248
                        CREATE TYPE "SpatialReferenceAuthority" AS ENUM (
25✔
249
                            'Epsg', 'SrOrg', 'Iau2000', 'Esri'
25✔
250
                        );
25✔
251

25✔
252
                        CREATE TYPE "SpatialReference" AS (
25✔
253
                            authority "SpatialReferenceAuthority", 
25✔
254
                            code OID
25✔
255
                        );
25✔
256

25✔
257
                        CREATE TYPE "Coordinate2D" AS (
25✔
258
                            x double precision, 
25✔
259
                            y double precision
25✔
260
                        );
25✔
261

25✔
262
                        CREATE TYPE "BoundingBox2D" AS (
25✔
263
                            lower_left_coordinate "Coordinate2D", 
25✔
264
                            upper_right_coordinate "Coordinate2D"
25✔
265
                        );
25✔
266

25✔
267
                        CREATE TYPE "TimeInterval" AS (                                                      
25✔
268
                            start timestamp with time zone,
25✔
269
                            "end" timestamp with time zone
25✔
270
                        );
25✔
271

25✔
272
                        CREATE TYPE "STRectangle" AS (
25✔
273
                            spatial_reference "SpatialReference",
25✔
274
                            bounding_box "BoundingBox2D",
25✔
275
                            time_interval "TimeInterval"
25✔
276
                        );
25✔
277
                        
25✔
278
                        CREATE TYPE "TimeGranularity" AS ENUM (
25✔
279
                            'Millis', 'Seconds', 'Minutes', 'Hours',
25✔
280
                            'Days',  'Months', 'Years'
25✔
281
                        );
25✔
282
                        
25✔
283
                        CREATE TYPE "TimeStep" AS (
25✔
284
                            granularity "TimeGranularity",
25✔
285
                            step OID
25✔
286
                        );
25✔
287

25✔
288
                        CREATE TABLE projects (
25✔
289
                            id UUID PRIMARY KEY
25✔
290
                        );        
25✔
291
                        
25✔
292
                        CREATE TABLE sessions (
25✔
293
                            id UUID PRIMARY KEY,
25✔
294
                            user_id UUID REFERENCES users(id),
25✔
295
                            created timestamp with time zone NOT NULL,
25✔
296
                            valid_until timestamp with time zone NOT NULL,
25✔
297
                            project_id UUID REFERENCES projects(id) ON DELETE SET NULL,
25✔
298
                            view "STRectangle"
25✔
299
                        );                
25✔
300

25✔
301
                        CREATE TABLE project_versions (
25✔
302
                            id UUID PRIMARY KEY,
25✔
303
                            project_id UUID REFERENCES projects(id) ON DELETE CASCADE NOT NULL,
25✔
304
                            name character varying (256) NOT NULL,
25✔
305
                            description text NOT NULL,
25✔
306
                            bounds "STRectangle" NOT NULL,
25✔
307
                            time_step "TimeStep" NOT NULL,
25✔
308
                            changed timestamp with time zone,
25✔
309
                            author_user_id UUID REFERENCES users(id) NOT NULL
25✔
310
                        );
25✔
311

25✔
312
                        CREATE INDEX project_version_idx 
25✔
313
                        ON project_versions (project_id, changed DESC, author_user_id DESC);
25✔
314

25✔
315
                        CREATE TYPE "LayerType" AS ENUM ('Raster', 'Vector');
25✔
316
                        
25✔
317
                        CREATE TYPE "LayerVisibility" AS (
25✔
318
                            data BOOLEAN,
25✔
319
                            legend BOOLEAN
25✔
320
                        );
25✔
321

25✔
322
                        CREATE TABLE project_version_layers (
25✔
323
                            layer_index integer NOT NULL,
25✔
324
                            project_id UUID REFERENCES projects(id) ON DELETE CASCADE NOT NULL,
25✔
325
                            project_version_id UUID REFERENCES project_versions(id) ON DELETE CASCADE NOT NULL,                            
25✔
326
                            name character varying (256) NOT NULL,
25✔
327
                            workflow_id UUID NOT NULL, -- TODO: REFERENCES workflows(id)
25✔
328
                            symbology json,
25✔
329
                            visibility "LayerVisibility" NOT NULL,
25✔
330
                            PRIMARY KEY (project_id, project_version_id, layer_index)            
25✔
331
                        );
25✔
332
                        
25✔
333
                        CREATE TABLE project_version_plots (
25✔
334
                            plot_index integer NOT NULL,
25✔
335
                            project_id UUID REFERENCES projects(id) ON DELETE CASCADE NOT NULL,
25✔
336
                            project_version_id UUID REFERENCES project_versions(id) ON DELETE CASCADE NOT NULL,                            
25✔
337
                            name character varying (256) NOT NULL,
25✔
338
                            workflow_id UUID NOT NULL, -- TODO: REFERENCES workflows(id)
25✔
339
                            PRIMARY KEY (project_id, project_version_id, plot_index)            
25✔
340
                        );
25✔
341

25✔
342
                        CREATE TABLE workflows (
25✔
343
                            id UUID PRIMARY KEY,
25✔
344
                            workflow json NOT NULL
25✔
345
                        );
25✔
346

25✔
347
                        CREATE TABLE datasets (
25✔
348
                            id UUID PRIMARY KEY,
25✔
349
                            name text NOT NULL,
25✔
350
                            description text NOT NULL, 
25✔
351
                            tags text[], 
25✔
352
                            source_operator text NOT NULL,
25✔
353

25✔
354
                            result_descriptor json NOT NULL,
25✔
355
                            meta_data json NOT NULL,
25✔
356

25✔
357
                            symbology json,
25✔
358
                            provenance json
25✔
359
                        );
25✔
360

25✔
361
                        -- TODO: add constraint not null
25✔
362
                        -- TODO: add constaint byte_size >= 0
25✔
363
                        CREATE TYPE "FileUpload" AS (
25✔
364
                            id UUID,
25✔
365
                            name text,
25✔
366
                            byte_size bigint
25✔
367
                        );
25✔
368

25✔
369
                        -- TODO: store user
25✔
370
                        -- TODO: time of creation and last update
25✔
371
                        -- TODO: upload directory that is not directly derived from id
25✔
372
                        CREATE TABLE uploads (
25✔
373
                            id UUID PRIMARY KEY,
25✔
374
                            user_id UUID REFERENCES users(id) ON DELETE CASCADE NOT NULL,
25✔
375
                            files "FileUpload"[] NOT NULL
25✔
376
                        );
25✔
377

25✔
378
                        CREATE TYPE "Permission" AS ENUM (
25✔
379
                            'Read', 'Owner'
25✔
380
                        );  
25✔
381

25✔
382
                        CREATE TYPE "PropertyType" AS (
25✔
383
                            key text,
25✔
384
                            value text
25✔
385
                        );
25✔
386

25✔
387
                        CREATE TABLE layer_collections (
25✔
388
                            id UUID PRIMARY KEY,
25✔
389
                            name text NOT NULL,
25✔
390
                            description text NOT NULL,
25✔
391
                            properties "PropertyType"[] NOT NULL
25✔
392
                        );
25✔
393

25✔
394
                        -- insert the root layer collection
25✔
395
                        INSERT INTO layer_collections (
25✔
396
                            id,
25✔
397
                            name,
25✔
398
                            description,
25✔
399
                            properties
25✔
400
                        ) VALUES (
25✔
401
                            {root_layer_collection_id},
25✔
402
                            'Layers',
25✔
403
                            'All available Geo Engine layers',
25✔
404
                            ARRAY[]::"PropertyType"[]
25✔
405
                        );
25✔
406

25✔
407
                        -- insert the unsorted layer collection
25✔
408
                        INSERT INTO layer_collections (
25✔
409
                            id,
25✔
410
                            name,
25✔
411
                            description,
25✔
412
                            properties
25✔
413
                        ) VALUES (
25✔
414
                            {unsorted_layer_collection_id},
25✔
415
                            'Unsorted',
25✔
416
                            'Unsorted Layers',
25✔
417
                            ARRAY[]::"PropertyType"[]
25✔
418
                        );
25✔
419
    
25✔
420

25✔
421
                        CREATE TABLE layers (
25✔
422
                            id UUID PRIMARY KEY,
25✔
423
                            name text NOT NULL,
25✔
424
                            description text NOT NULL,
25✔
425
                            workflow json NOT NULL,
25✔
426
                            symbology json,
25✔
427
                            properties "PropertyType"[] NOT NULL,
25✔
428
                            metadata json NOT NULL
25✔
429
                        );
25✔
430

25✔
431
                        CREATE TABLE collection_layers (
25✔
432
                            collection UUID REFERENCES layer_collections(id) ON DELETE CASCADE NOT NULL,
25✔
433
                            layer UUID REFERENCES layers(id) ON DELETE CASCADE NOT NULL,
25✔
434
                            PRIMARY KEY (collection, layer)
25✔
435
                        );
25✔
436

25✔
437
                        CREATE TABLE collection_children (
25✔
438
                            parent UUID REFERENCES layer_collections(id) ON DELETE CASCADE NOT NULL,
25✔
439
                            child UUID REFERENCES layer_collections(id) ON DELETE CASCADE NOT NULL,
25✔
440
                            PRIMARY KEY (parent, child)
25✔
441
                        );
25✔
442

25✔
443
                        -- add unsorted layers to root layer collection
25✔
444
                        INSERT INTO collection_children (parent, child) VALUES
25✔
445
                        ({root_layer_collection_id}, {unsorted_layer_collection_id});
25✔
446

25✔
447
                        -- TODO: should name be unique (per user)?
25✔
448
                        CREATE TABLE layer_providers (
25✔
449
                            id UUID PRIMARY KEY,
25✔
450
                            type_name text NOT NULL,
25✔
451
                            name text NOT NULL,
25✔
452

25✔
453
                            definition json NOT NULL
25✔
454
                        );
25✔
455

25✔
456
                        -- TODO: uploads, providers permissions
25✔
457

25✔
458
                        -- TODO: relationship between uploads and datasets?
25✔
459

25✔
460
                        CREATE TABLE external_users (
25✔
461
                            id UUID PRIMARY KEY REFERENCES users(id),
25✔
462
                            external_id character varying (256) UNIQUE,
25✔
463
                            email character varying (256),
25✔
464
                            real_name character varying (256),
25✔
465
                            active boolean NOT NULL
25✔
466
                        );
25✔
467

25✔
468
                        CREATE TABLE permissions (
25✔
469
                            -- resource_type "ResourceType" NOT NULL,
25✔
470
                            role_id UUID REFERENCES roles(id) ON DELETE CASCADE NOT NULL,
25✔
471
                            permission "Permission" NOT NULL,
25✔
472
                            dataset_id UUID REFERENCES datasets(id) ON DELETE CASCADE,
25✔
473
                            layer_id UUID REFERENCES layers(id) ON DELETE CASCADE,
25✔
474
                            layer_collection_id UUID REFERENCES layer_collections(id) ON DELETE CASCADE,
25✔
475
                            project_id UUID REFERENCES projects(id) ON DELETE CASCADE,
25✔
476
                            check(
25✔
477
                                (
25✔
478
                                    (dataset_id is not null)::integer +
25✔
479
                                    (layer_id is not null)::integer +
25✔
480
                                    (layer_collection_id is not null)::integer +
25✔
481
                                    (project_id is not null)::integer 
25✔
482
                                ) = 1
25✔
483
                            )
25✔
484
                        );
25✔
485

25✔
486
                        CREATE UNIQUE INDEX ON permissions (role_id, permission, dataset_id);
25✔
487
                        CREATE UNIQUE INDEX ON permissions (role_id, permission, layer_id);
25✔
488
                        CREATE UNIQUE INDEX ON permissions (role_id, permission, layer_collection_id);
25✔
489
                        CREATE UNIQUE INDEX ON permissions (role_id, permission, project_id);   
25✔
490

25✔
491
                        CREATE VIEW user_permitted_datasets AS
25✔
492
                            SELECT 
25✔
493
                                r.user_id,
25✔
494
                                p.dataset_id,
25✔
495
                                p.permission
25✔
496
                            FROM 
25✔
497
                                user_roles r JOIN permissions p ON (r.role_id = p.role_id AND dataset_id IS NOT NULL);
25✔
498

25✔
499
                        CREATE VIEW user_permitted_projects AS
25✔
500
                            SELECT 
25✔
501
                                r.user_id,
25✔
502
                                p.project_id,
25✔
503
                                p.permission
25✔
504
                            FROM 
25✔
505
                                user_roles r JOIN permissions p ON (r.role_id = p.role_id AND project_id IS NOT NULL); 
25✔
506

25✔
507
                        CREATE VIEW user_permitted_layer_collections AS
25✔
508
                            SELECT 
25✔
509
                                r.user_id,
25✔
510
                                p.layer_collection_id,
25✔
511
                                p.permission
25✔
512
                            FROM 
25✔
513
                                user_roles r JOIN permissions p ON (r.role_id = p.role_id AND layer_collection_id IS NOT NULL); 
25✔
514

25✔
515
                        CREATE VIEW user_permitted_layers AS
25✔
516
                            SELECT 
25✔
517
                                r.user_id,
25✔
518
                                p.layer_id,
25✔
519
                                p.permission
25✔
520
                            FROM 
25✔
521
                                user_roles r JOIN permissions p ON (r.role_id = p.role_id AND layer_id IS NOT NULL); 
25✔
522

25✔
523
                        --- permission for unsorted layers and root layer collection
25✔
524
                        INSERT INTO permissions
25✔
525
                            (role_id, layer_collection_id, permission)  
25✔
526
                        VALUES 
25✔
527
                            ({admin_role_id}, {root_layer_collection_id}, 'Owner'),
25✔
528
                            ({user_role_id}, {root_layer_collection_id}, 'Read'),
25✔
529
                            ({anonymous_role_id}, {root_layer_collection_id}, 'Read'),
25✔
530
                            ({admin_role_id}, {unsorted_layer_collection_id}, 'Owner'),
25✔
531
                            ({user_role_id}, {unsorted_layer_collection_id}, 'Read'),
25✔
532
                            ({anonymous_role_id}, {unsorted_layer_collection_id}, 'Read');
25✔
533
                        "#
25✔
534
                    ,
25✔
535
                    admin_role_id = escape_literal(&Role::admin_role_id().to_string()),
25✔
536
                    admin_email = escape_literal(&user_config.admin_email),
25✔
537
                    admin_password = escape_literal(&bcrypt::hash(user_config.admin_password).expect("Admin password hash should be valid")),
25✔
538
                    user_role_id = escape_literal(&Role::registered_user_role_id().to_string()),
25✔
539
                    anonymous_role_id = escape_literal(&Role::anonymous_role_id().to_string()),
25✔
540
                    root_layer_collection_id = escape_literal(&INTERNAL_LAYER_DB_ROOT_COLLECTION_ID.to_string()),
25✔
541
                    unsorted_layer_collection_id = escape_literal(&UNSORTED_COLLECTION_ID.to_string())))
25✔
542
                    .await?;
25✔
543
                    debug!("Updated user database to schema version {}", version + 1);
×
544
                }
545
                // 1 => {
546
                // next version
547
                // conn.batch_execute(
548
                //     "\
549
                //     ALTER TABLE users ...
550
                //
551
                //     UPDATE version SET version = 2;\
552
                //     ",
553
                // )
554
                // .await?;
555
                // eprintln!("Updated user database to schema version {}", version + 1);
556
                // }
557
                _ => return Ok(version),
25✔
558
            }
559
            version += 1;
25✔
560
        }
561
    }
25✔
562
}
563

564
#[async_trait]
565
impl<Tls> ApplicationContext for PostgresContext<Tls>
566
where
567
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
568
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
569
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
570
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
571
{
572
    type SessionContext = PostgresSessionContext<Tls>;
573
    type Session = UserSession;
574

575
    fn session_context(&self, session: Self::Session) -> Self::SessionContext {
54✔
576
        PostgresSessionContext {
54✔
577
            session,
54✔
578
            context: self.clone(),
54✔
579
        }
54✔
580
    }
54✔
581

582
    async fn session_by_id(&self, session_id: SessionId) -> Result<Self::Session> {
13✔
583
        self.user_session_by_id(session_id)
13✔
584
            .await
72✔
585
            .map_err(Box::new)
13✔
586
            .context(error::Authorization)
13✔
587
    }
26✔
588
}
589

590
#[async_trait]
591
impl<Tls> ProApplicationContext for PostgresContext<Tls>
592
where
593
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
594
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
595
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
596
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
597
{
598
    fn oidc_request_db(&self) -> Option<&OidcRequestDb> {
×
599
        self.oidc_request_db.as_ref().as_ref()
×
600
    }
×
601
}
602

603
#[derive(Clone)]
×
604
pub struct PostgresSessionContext<Tls>
605
where
606
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
607
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
608
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
609
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
610
{
611
    session: UserSession,
612
    context: PostgresContext<Tls>,
613
}
614

615
#[async_trait]
616
impl<Tls> SessionContext for PostgresSessionContext<Tls>
617
where
618
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
619
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
620
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
621
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
622
{
623
    type Session = UserSession;
624
    type GeoEngineDB = PostgresDb<Tls>;
625

626
    type TaskContext = SimpleTaskManagerContext;
627
    type TaskManager = ProTaskManager; // this does not persist across restarts
628
    type QueryContext = QueryContextImpl;
629
    type ExecutionContext = ExecutionContextImpl<Self::GeoEngineDB>;
630

631
    fn db(&self) -> Self::GeoEngineDB {
54✔
632
        PostgresDb::new(self.context.pool.clone(), self.session.clone())
54✔
633
    }
54✔
634

635
    fn tasks(&self) -> Self::TaskManager {
×
636
        ProTaskManager::new(self.context.task_manager.clone(), self.session.clone())
×
637
    }
×
638

639
    fn query_context(&self) -> Result<Self::QueryContext> {
×
640
        // TODO: load config only once
×
641

×
642
        let mut extensions = QueryContextExtensions::default();
×
643
        extensions.insert(
×
644
            self.context
×
645
                .quota
×
646
                .create_quota_tracking(&self.session, ComputationContext::new()),
×
647
        );
×
648
        extensions.insert(Box::new(QuotaCheckerImpl { user_db: self.db() }) as QuotaChecker);
×
649

×
650
        Ok(QueryContextImpl::new_with_extensions(
×
651
            self.context.query_ctx_chunk_size,
×
652
            self.context.thread_pool.clone(),
×
653
            extensions,
×
654
        ))
×
655
    }
×
656

657
    fn execution_context(&self) -> Result<Self::ExecutionContext> {
×
658
        Ok(ExecutionContextImpl::<PostgresDb<Tls>>::new(
×
659
            self.db(),
×
660
            self.context.thread_pool.clone(),
×
661
            self.context.exe_ctx_tiling_spec,
×
662
        ))
×
663
    }
×
664

665
    fn volumes(&self) -> Result<Vec<Volume>> {
×
666
        ensure!(self.session.is_admin(), error::PermissionDenied);
×
667

668
        Ok(self.context.volumes.volumes.clone())
×
669
    }
×
670

671
    fn session(&self) -> &Self::Session {
×
672
        &self.session
×
673
    }
×
674
}
675

676
pub struct PostgresDb<Tls>
677
where
678
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
679
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
680
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
681
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
682
{
683
    pub(crate) conn_pool: Pool<PostgresConnectionManager<Tls>>,
684
    pub(crate) session: UserSession,
685
}
686

687
impl<Tls> PostgresDb<Tls>
688
where
689
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
690
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
691
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
692
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
693
{
694
    pub fn new(conn_pool: Pool<PostgresConnectionManager<Tls>>, session: UserSession) -> Self {
79✔
695
        Self { conn_pool, session }
79✔
696
    }
79✔
697
}
698

699
impl<Tls> GeoEngineDb for PostgresDb<Tls>
700
where
701
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
702
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
703
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
704
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
705
{
706
}
707

708
impl<Tls> ProGeoEngineDb for PostgresDb<Tls>
709
where
710
    Tls: MakeTlsConnect<Socket> + Clone + Send + Sync + 'static,
711
    <Tls as MakeTlsConnect<Socket>>::Stream: Send + Sync,
712
    <Tls as MakeTlsConnect<Socket>>::TlsConnect: Send,
713
    <<Tls as MakeTlsConnect<Socket>>::TlsConnect as TlsConnect<Socket>>::Future: Send,
714
{
715
}
716

717
#[cfg(test)]
718
mod tests {
719
    use std::str::FromStr;
720

721
    use super::*;
722
    use crate::api::model::datatypes::{DataProviderId, DatasetId, LayerId};
723
    use crate::api::model::services::AddDataset;
724
    use crate::datasets::external::mock::{MockCollection, MockExternalLayerProviderDefinition};
725
    use crate::datasets::listing::{DatasetListOptions, DatasetListing, ProvenanceOutput};
726
    use crate::datasets::listing::{DatasetProvider, Provenance};
727
    use crate::datasets::storage::{DatasetStore, MetaDataDefinition};
728
    use crate::datasets::upload::{FileId, UploadId};
729
    use crate::datasets::upload::{FileUpload, Upload, UploadDb};
730
    use crate::layers::layer::{
731
        AddLayer, AddLayerCollection, CollectionItem, LayerCollection, LayerCollectionListOptions,
732
        LayerCollectionListing, LayerListing, ProviderLayerCollectionId, ProviderLayerId,
733
    };
734
    use crate::layers::listing::{LayerCollectionId, LayerCollectionProvider};
735
    use crate::layers::storage::{
736
        LayerDb, LayerProviderDb, LayerProviderListing, LayerProviderListingOptions,
737
        INTERNAL_PROVIDER_ID,
738
    };
739
    use crate::pro::permissions::{Permission, PermissionDb};
740
    use crate::pro::projects::{LoadVersion, ProProjectDb};
741
    use crate::pro::users::{
742
        ExternalUserClaims, RoleDb, UserCredentials, UserDb, UserId, UserRegistration,
743
    };
744
    use crate::pro::util::tests::{admin_login, register_ndvi_workflow_helper};
745
    use crate::projects::{
746
        CreateProject, LayerUpdate, OrderBy, Plot, PlotUpdate, PointSymbology, ProjectDb,
747
        ProjectFilter, ProjectId, ProjectLayer, ProjectListOptions, ProjectListing, STRectangle,
748
        UpdateProject,
749
    };
750
    use crate::util::config::{get_config_element, Postgres};
751
    use crate::workflows::registry::WorkflowRegistry;
752
    use crate::workflows::workflow::Workflow;
753
    use bb8_postgres::bb8::ManageConnection;
754
    use bb8_postgres::tokio_postgres::{self, NoTls};
755
    use futures::{join, Future};
756
    use geoengine_datatypes::collections::VectorDataType;
757
    use geoengine_datatypes::primitives::{
758
        BoundingBox2D, Coordinate2D, DateTime, Duration, FeatureDataType, Measurement,
759
        SpatialResolution, TimeInterval, VectorQueryRectangle,
760
    };
761
    use geoengine_datatypes::spatial_reference::{SpatialReference, SpatialReferenceOption};
762
    use geoengine_datatypes::util::test::TestDefault;
763
    use geoengine_datatypes::util::Identifier;
764
    use geoengine_operators::engine::{
765
        MetaData, MetaDataProvider, MultipleRasterOrSingleVectorSource, PlotOperator,
766
        StaticMetaData, TypedOperator, TypedResultDescriptor, VectorColumnInfo, VectorOperator,
767
        VectorResultDescriptor,
768
    };
769
    use geoengine_operators::mock::{MockPointSource, MockPointSourceParams};
770
    use geoengine_operators::plot::{Statistics, StatisticsParams};
771
    use geoengine_operators::source::{
772
        CsvHeader, FormatSpecifics, OgrSourceColumnSpec, OgrSourceDataset,
773
        OgrSourceDatasetTimeType, OgrSourceDurationSpec, OgrSourceErrorSpec, OgrSourceTimeFormat,
774
    };
775
    use geoengine_operators::util::input::MultiRasterOrVectorOperator::Raster;
776
    use openidconnect::SubjectIdentifier;
777
    use rand::RngCore;
778
    use serde_json::json;
779
    use tokio::runtime::Handle;
780

781
    /// Setup database schema and return its name.
782
    async fn setup_db() -> (tokio_postgres::Config, String) {
25✔
783
        let mut db_config = get_config_element::<Postgres>().unwrap();
25✔
784
        db_config.schema = format!("geoengine_test_{}", rand::thread_rng().next_u64()); // generate random temp schema
25✔
785

25✔
786
        let mut pg_config = tokio_postgres::Config::new();
25✔
787
        pg_config
25✔
788
            .user(&db_config.user)
25✔
789
            .password(&db_config.password)
25✔
790
            .host(&db_config.host)
25✔
791
            .dbname(&db_config.database);
25✔
792

25✔
793
        // generate schema with prior connection
25✔
794
        PostgresConnectionManager::new(pg_config.clone(), NoTls)
25✔
795
            .connect()
25✔
796
            .await
114✔
797
            .unwrap()
25✔
798
            .batch_execute(&format!("CREATE SCHEMA {};", &db_config.schema))
25✔
799
            .await
25✔
800
            .unwrap();
25✔
801

25✔
802
        // fix schema by providing `search_path` option
25✔
803
        pg_config.options(&format!("-c search_path={}", db_config.schema));
25✔
804

25✔
805
        (pg_config, db_config.schema)
25✔
806
    }
25✔
807

808
    /// Tear down database schema.
809
    async fn tear_down_db(pg_config: tokio_postgres::Config, schema: &str) {
25✔
810
        // generate schema with prior connection
25✔
811
        PostgresConnectionManager::new(pg_config, NoTls)
25✔
812
            .connect()
25✔
813
            .await
103✔
814
            .unwrap()
25✔
815
            .batch_execute(&format!("DROP SCHEMA {schema} CASCADE;"))
25✔
816
            .await
25✔
817
            .unwrap();
25✔
818
    }
25✔
819

820
    async fn with_temp_context<F, Fut>(f: F)
25✔
821
    where
25✔
822
        F: FnOnce(PostgresContext<NoTls>, tokio_postgres::Config) -> Fut
25✔
823
            + std::panic::UnwindSafe
25✔
824
            + Send
25✔
825
            + 'static,
25✔
826
        Fut: Future<Output = ()> + Send,
25✔
827
    {
25✔
828
        let (pg_config, schema) = setup_db().await;
139✔
829

830
        // catch all panics and clean up first…
831
        let executed_fn = {
25✔
832
            let pg_config = pg_config.clone();
25✔
833
            std::panic::catch_unwind(move || {
25✔
834
                tokio::task::block_in_place(move || {
25✔
835
                    Handle::current().block_on(async move {
25✔
836
                        let ctx = PostgresContext::new_with_context_spec(
25✔
837
                            pg_config.clone(),
25✔
838
                            tokio_postgres::NoTls,
25✔
839
                            TestDefault::test_default(),
25✔
840
                            TestDefault::test_default(),
25✔
841
                        )
25✔
842
                        .await
75✔
843
                        .unwrap();
25✔
844
                        f(ctx, pg_config.clone()).await;
1,928✔
845
                    });
25✔
846
                });
25✔
847
            })
25✔
848
        };
25✔
849

25✔
850
        tear_down_db(pg_config, &schema).await;
128✔
851

852
        // then throw errors afterwards
853
        if let Err(err) = executed_fn {
25✔
854
            std::panic::resume_unwind(err);
×
855
        }
25✔
856
    }
25✔
857

858
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
859
    async fn test() {
1✔
860
        with_temp_context(|app_ctx, _| async move {
1✔
861
            anonymous(&app_ctx).await;
28✔
862

863
            let _user_id = user_reg_login(&app_ctx).await;
5✔
864

865
            let session = app_ctx
1✔
866
                .login(UserCredentials {
1✔
867
                    email: "foo@example.com".into(),
1✔
868
                    password: "secret123".into(),
1✔
869
                })
1✔
870
                .await
4✔
871
                .unwrap();
1✔
872

1✔
873
            create_projects(&app_ctx, &session).await;
66✔
874

875
            let projects = list_projects(&app_ctx, &session).await;
11✔
876

877
            set_session(&app_ctx, &projects).await;
6✔
878

879
            let project_id = projects[0].id;
1✔
880

1✔
881
            update_projects(&app_ctx, &session, project_id).await;
157✔
882

883
            add_permission(&app_ctx, &session, project_id).await;
8✔
884

885
            delete_project(&app_ctx, &session, project_id).await;
1✔
886
        })
1✔
887
        .await;
10✔
888
    }
889

890
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
891
    async fn test_external() {
1✔
892
        with_temp_context(|app_ctx, _| async move {
1✔
893
            anonymous(&app_ctx).await;
39✔
894

895
            let session = external_user_login_twice(&app_ctx).await;
45✔
896

897
            create_projects(&app_ctx, &session).await;
96✔
898

899
            let projects = list_projects(&app_ctx, &session).await;
11✔
900

901
            set_session_external(&app_ctx, &projects).await;
25✔
902

903
            let project_id = projects[0].id;
1✔
904

1✔
905
            update_projects(&app_ctx, &session, project_id).await;
158✔
906

907
            add_permission(&app_ctx, &session, project_id).await;
15✔
908

909
            delete_project(&app_ctx, &session, project_id).await;
5✔
910
        })
1✔
911
        .await;
9✔
912
    }
913

914
    async fn set_session(app_ctx: &PostgresContext<NoTls>, projects: &[ProjectListing]) {
1✔
915
        let credentials = UserCredentials {
1✔
916
            email: "foo@example.com".into(),
1✔
917
            password: "secret123".into(),
1✔
918
        };
1✔
919

920
        let session = app_ctx.login(credentials).await.unwrap();
4✔
921

1✔
922
        set_session_in_database(app_ctx, projects, session).await;
2✔
923
    }
1✔
924

925
    async fn set_session_external(app_ctx: &PostgresContext<NoTls>, projects: &[ProjectListing]) {
1✔
926
        let external_user_claims = ExternalUserClaims {
1✔
927
            external_id: SubjectIdentifier::new("Foo bar Id".into()),
1✔
928
            email: "foo@bar.de".into(),
1✔
929
            real_name: "Foo Bar".into(),
1✔
930
        };
1✔
931

932
        let session = app_ctx
1✔
933
            .login_external(external_user_claims, Duration::minutes(10))
1✔
934
            .await
7✔
935
            .unwrap();
1✔
936

1✔
937
        set_session_in_database(app_ctx, projects, session).await;
18✔
938
    }
1✔
939

940
    async fn set_session_in_database(
2✔
941
        app_ctx: &PostgresContext<NoTls>,
2✔
942
        projects: &[ProjectListing],
2✔
943
        session: UserSession,
2✔
944
    ) {
2✔
945
        let db = app_ctx.session_context(session.clone()).db();
2✔
946

2✔
947
        db.set_session_project(projects[0].id).await.unwrap();
4✔
948

2✔
949
        assert_eq!(
2✔
950
            app_ctx.session_by_id(session.id).await.unwrap().project,
6✔
951
            Some(projects[0].id)
2✔
952
        );
953

954
        let rect = STRectangle::new_unchecked(SpatialReference::epsg_4326(), 0., 1., 2., 3., 1, 2);
2✔
955
        db.set_session_view(rect.clone()).await.unwrap();
4✔
956
        assert_eq!(
2✔
957
            app_ctx.session_by_id(session.id).await.unwrap().view,
6✔
958
            Some(rect)
2✔
959
        );
960
    }
2✔
961

962
    async fn delete_project(
2✔
963
        app_ctx: &PostgresContext<NoTls>,
2✔
964
        session: &UserSession,
2✔
965
        project_id: ProjectId,
2✔
966
    ) {
2✔
967
        let db = app_ctx.session_context(session.clone()).db();
2✔
968

2✔
969
        db.delete_project(project_id).await.unwrap();
4✔
970

2✔
971
        assert!(db.load_project(project_id).await.is_err());
2✔
972
    }
2✔
973

974
    async fn add_permission(
2✔
975
        app_ctx: &PostgresContext<NoTls>,
2✔
976
        session: &UserSession,
2✔
977
        project_id: ProjectId,
2✔
978
    ) {
2✔
979
        let db = app_ctx.session_context(session.clone()).db();
2✔
980

2✔
981
        assert!(db
2✔
982
            .has_permission(project_id, Permission::Owner)
2✔
983
            .await
6✔
984
            .unwrap());
2✔
985

986
        let user2 = app_ctx
2✔
987
            .register_user(UserRegistration {
2✔
988
                email: "user2@example.com".into(),
2✔
989
                password: "12345678".into(),
2✔
990
                real_name: "User2".into(),
2✔
991
            })
2✔
992
            .await
6✔
993
            .unwrap();
2✔
994

995
        let session2 = app_ctx
2✔
996
            .login(UserCredentials {
2✔
997
                email: "user2@example.com".into(),
2✔
998
                password: "12345678".into(),
2✔
999
            })
2✔
1000
            .await
8✔
1001
            .unwrap();
2✔
1002

2✔
1003
        let db2 = app_ctx.session_context(session2.clone()).db();
2✔
1004
        assert!(!db2
2✔
1005
            .has_permission(project_id, Permission::Owner)
2✔
1006
            .await
×
1007
            .unwrap());
2✔
1008

1009
        db.add_permission(user2.into(), project_id, Permission::Read)
2✔
1010
            .await
3✔
1011
            .unwrap();
2✔
1012

2✔
1013
        assert!(db2
2✔
1014
            .has_permission(project_id, Permission::Read)
2✔
1015
            .await
×
1016
            .unwrap());
2✔
1017
    }
2✔
1018

1019
    #[allow(clippy::too_many_lines)]
1020
    async fn update_projects(
2✔
1021
        app_ctx: &PostgresContext<NoTls>,
2✔
1022
        session: &UserSession,
2✔
1023
        project_id: ProjectId,
2✔
1024
    ) {
2✔
1025
        let db = app_ctx.session_context(session.clone()).db();
2✔
1026

1027
        let project = db
2✔
1028
            .load_project_version(project_id, LoadVersion::Latest)
2✔
1029
            .await
33✔
1030
            .unwrap();
2✔
1031

1032
        let layer_workflow_id = db
2✔
1033
            .register_workflow(Workflow {
2✔
1034
                operator: TypedOperator::Vector(
2✔
1035
                    MockPointSource {
2✔
1036
                        params: MockPointSourceParams {
2✔
1037
                            points: vec![Coordinate2D::new(1., 2.); 3],
2✔
1038
                        },
2✔
1039
                    }
2✔
1040
                    .boxed(),
2✔
1041
                ),
2✔
1042
            })
2✔
1043
            .await
6✔
1044
            .unwrap();
2✔
1045

2✔
1046
        assert!(db.load_workflow(&layer_workflow_id).await.is_ok());
6✔
1047

1048
        let plot_workflow_id = db
2✔
1049
            .register_workflow(Workflow {
2✔
1050
                operator: Statistics {
2✔
1051
                    params: StatisticsParams {
2✔
1052
                        column_names: vec![],
2✔
1053
                    },
2✔
1054
                    sources: MultipleRasterOrSingleVectorSource {
2✔
1055
                        source: Raster(vec![]),
2✔
1056
                    },
2✔
1057
                }
2✔
1058
                .boxed()
2✔
1059
                .into(),
2✔
1060
            })
2✔
1061
            .await
6✔
1062
            .unwrap();
2✔
1063

2✔
1064
        assert!(db.load_workflow(&plot_workflow_id).await.is_ok());
6✔
1065

1066
        // add a plot
1067
        let update = UpdateProject {
2✔
1068
            id: project.id,
2✔
1069
            name: Some("Test9 Updated".into()),
2✔
1070
            description: None,
2✔
1071
            layers: Some(vec![LayerUpdate::UpdateOrInsert(ProjectLayer {
2✔
1072
                workflow: layer_workflow_id,
2✔
1073
                name: "TestLayer".into(),
2✔
1074
                symbology: PointSymbology::default().into(),
2✔
1075
                visibility: Default::default(),
2✔
1076
            })]),
2✔
1077
            plots: Some(vec![PlotUpdate::UpdateOrInsert(Plot {
2✔
1078
                workflow: plot_workflow_id,
2✔
1079
                name: "Test Plot".into(),
2✔
1080
            })]),
2✔
1081
            bounds: None,
2✔
1082
            time_step: None,
2✔
1083
        };
2✔
1084
        db.update_project(update).await.unwrap();
94✔
1085

1086
        let versions = db.list_project_versions(project_id).await.unwrap();
12✔
1087
        assert_eq!(versions.len(), 2);
2✔
1088

1089
        // add second plot
1090
        let update = UpdateProject {
2✔
1091
            id: project.id,
2✔
1092
            name: Some("Test9 Updated".into()),
2✔
1093
            description: None,
2✔
1094
            layers: Some(vec![LayerUpdate::UpdateOrInsert(ProjectLayer {
2✔
1095
                workflow: layer_workflow_id,
2✔
1096
                name: "TestLayer".into(),
2✔
1097
                symbology: PointSymbology::default().into(),
2✔
1098
                visibility: Default::default(),
2✔
1099
            })]),
2✔
1100
            plots: Some(vec![
2✔
1101
                PlotUpdate::UpdateOrInsert(Plot {
2✔
1102
                    workflow: plot_workflow_id,
2✔
1103
                    name: "Test Plot".into(),
2✔
1104
                }),
2✔
1105
                PlotUpdate::UpdateOrInsert(Plot {
2✔
1106
                    workflow: plot_workflow_id,
2✔
1107
                    name: "Test Plot".into(),
2✔
1108
                }),
2✔
1109
            ]),
2✔
1110
            bounds: None,
2✔
1111
            time_step: None,
2✔
1112
        };
2✔
1113
        db.update_project(update).await.unwrap();
87✔
1114

1115
        let versions = db.list_project_versions(project_id).await.unwrap();
13✔
1116
        assert_eq!(versions.len(), 3);
2✔
1117

1118
        // delete plots
1119
        let update = UpdateProject {
2✔
1120
            id: project.id,
2✔
1121
            name: None,
2✔
1122
            description: None,
2✔
1123
            layers: None,
2✔
1124
            plots: Some(vec![]),
2✔
1125
            bounds: None,
2✔
1126
            time_step: None,
2✔
1127
        };
2✔
1128
        db.update_project(update).await.unwrap();
40✔
1129

1130
        let versions = db.list_project_versions(project_id).await.unwrap();
12✔
1131
        assert_eq!(versions.len(), 4);
2✔
1132
    }
2✔
1133

1134
    async fn list_projects(
2✔
1135
        app_ctx: &PostgresContext<NoTls>,
2✔
1136
        session: &UserSession,
2✔
1137
    ) -> Vec<ProjectListing> {
2✔
1138
        let options = ProjectListOptions {
2✔
1139
            filter: ProjectFilter::None,
2✔
1140
            order: OrderBy::NameDesc,
2✔
1141
            offset: 0,
2✔
1142
            limit: 2,
2✔
1143
        };
2✔
1144

2✔
1145
        let db = app_ctx.session_context(session.clone()).db();
2✔
1146

1147
        let projects = db.list_projects(options).await.unwrap();
22✔
1148

2✔
1149
        assert_eq!(projects.len(), 2);
2✔
1150
        assert_eq!(projects[0].name, "Test9");
2✔
1151
        assert_eq!(projects[1].name, "Test8");
2✔
1152
        projects
2✔
1153
    }
2✔
1154

1155
    async fn create_projects(app_ctx: &PostgresContext<NoTls>, session: &UserSession) {
2✔
1156
        let db = app_ctx.session_context(session.clone()).db();
2✔
1157

1158
        for i in 0..10 {
22✔
1159
            let create = CreateProject {
20✔
1160
                name: format!("Test{i}"),
20✔
1161
                description: format!("Test{}", 10 - i),
20✔
1162
                bounds: STRectangle::new(
20✔
1163
                    SpatialReferenceOption::Unreferenced,
20✔
1164
                    0.,
20✔
1165
                    0.,
20✔
1166
                    1.,
20✔
1167
                    1.,
20✔
1168
                    0,
20✔
1169
                    1,
20✔
1170
                )
20✔
1171
                .unwrap(),
20✔
1172
                time_step: None,
20✔
1173
            };
20✔
1174
            db.create_project(create).await.unwrap();
162✔
1175
        }
1176
    }
2✔
1177

1178
    async fn user_reg_login(app_ctx: &PostgresContext<NoTls>) -> UserId {
1✔
1179
        let user_registration = UserRegistration {
1✔
1180
            email: "foo@example.com".into(),
1✔
1181
            password: "secret123".into(),
1✔
1182
            real_name: "Foo Bar".into(),
1✔
1183
        };
1✔
1184

1185
        let user_id = app_ctx.register_user(user_registration).await.unwrap();
3✔
1186

1✔
1187
        let credentials = UserCredentials {
1✔
1188
            email: "foo@example.com".into(),
1✔
1189
            password: "secret123".into(),
1✔
1190
        };
1✔
1191

1192
        let session = app_ctx.login(credentials).await.unwrap();
1✔
1193

1✔
1194
        let db = app_ctx.session_context(session.clone()).db();
1✔
1195

1✔
1196
        app_ctx.session_by_id(session.id).await.unwrap();
1✔
1197

1✔
1198
        db.logout().await.unwrap();
1✔
1199

1✔
1200
        assert!(app_ctx.session_by_id(session.id).await.is_err());
1✔
1201

1202
        user_id
1✔
1203
    }
1✔
1204

1205
    //TODO: No duplicate tests for postgres and hashmap implementation possible?
1206
    async fn external_user_login_twice(app_ctx: &PostgresContext<NoTls>) -> UserSession {
1✔
1207
        let external_user_claims = ExternalUserClaims {
1✔
1208
            external_id: SubjectIdentifier::new("Foo bar Id".into()),
1✔
1209
            email: "foo@bar.de".into(),
1✔
1210
            real_name: "Foo Bar".into(),
1✔
1211
        };
1✔
1212
        let duration = Duration::minutes(30);
1✔
1213

1214
        //NEW
1215
        let login_result = app_ctx
1✔
1216
            .login_external(external_user_claims.clone(), duration)
1✔
1217
            .await;
19✔
1218
        assert!(login_result.is_ok());
1✔
1219

1220
        let session_1 = login_result.unwrap();
1✔
1221
        let user_id = session_1.user.id; //TODO: Not a deterministic test.
1✔
1222

1✔
1223
        let db1 = app_ctx.session_context(session_1.clone()).db();
1✔
1224

1✔
1225
        assert!(session_1.user.email.is_some());
1✔
1226
        assert_eq!(session_1.user.email.unwrap(), "foo@bar.de");
1✔
1227
        assert!(session_1.user.real_name.is_some());
1✔
1228
        assert_eq!(session_1.user.real_name.unwrap(), "Foo Bar");
1✔
1229

1230
        let expected_duration = session_1.created + duration;
1✔
1231
        assert_eq!(session_1.valid_until, expected_duration);
1✔
1232

1233
        assert!(app_ctx.session_by_id(session_1.id).await.is_ok());
6✔
1234

1235
        assert!(db1.logout().await.is_ok());
3✔
1236

1237
        assert!(app_ctx.session_by_id(session_1.id).await.is_err());
4✔
1238

1239
        let duration = Duration::minutes(10);
1✔
1240
        let login_result = app_ctx
1✔
1241
            .login_external(external_user_claims.clone(), duration)
1✔
1242
            .await;
7✔
1243
        assert!(login_result.is_ok());
1✔
1244

1245
        let session_2 = login_result.unwrap();
1✔
1246
        let result = session_2.clone();
1✔
1247

1✔
1248
        assert!(session_2.user.email.is_some()); //TODO: Technically, user details could change for each login. For simplicity, this is not covered yet.
1✔
1249
        assert_eq!(session_2.user.email.unwrap(), "foo@bar.de");
1✔
1250
        assert!(session_2.user.real_name.is_some());
1✔
1251
        assert_eq!(session_2.user.real_name.unwrap(), "Foo Bar");
1✔
1252
        assert_eq!(session_2.user.id, user_id);
1✔
1253

1254
        let expected_duration = session_2.created + duration;
1✔
1255
        assert_eq!(session_2.valid_until, expected_duration);
1✔
1256

1257
        assert!(app_ctx.session_by_id(session_2.id).await.is_ok());
6✔
1258

1259
        result
1✔
1260
    }
1✔
1261

1262
    async fn anonymous(app_ctx: &PostgresContext<NoTls>) {
2✔
1263
        let now: DateTime = chrono::offset::Utc::now().into();
2✔
1264
        let session = app_ctx.create_anonymous_session().await.unwrap();
17✔
1265
        let then: DateTime = chrono::offset::Utc::now().into();
2✔
1266

2✔
1267
        assert!(session.created >= now && session.created <= then);
2✔
1268
        assert!(session.valid_until > session.created);
2✔
1269

1270
        let session = app_ctx.session_by_id(session.id).await.unwrap();
36✔
1271

2✔
1272
        let db = app_ctx.session_context(session.clone()).db();
2✔
1273

2✔
1274
        db.logout().await.unwrap();
6✔
1275

2✔
1276
        assert!(app_ctx.session_by_id(session.id).await.is_err());
8✔
1277
    }
2✔
1278

1279
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1280
    async fn it_persists_workflows() {
1✔
1281
        with_temp_context(|app_ctx, _pg_config| async move {
1✔
1282
            let workflow = Workflow {
1✔
1283
                operator: TypedOperator::Vector(
1✔
1284
                    MockPointSource {
1✔
1285
                        params: MockPointSourceParams {
1✔
1286
                            points: vec![Coordinate2D::new(1., 2.); 3],
1✔
1287
                        },
1✔
1288
                    }
1✔
1289
                    .boxed(),
1✔
1290
                ),
1✔
1291
            };
1✔
1292

1293
            let session = app_ctx.create_anonymous_session().await.unwrap();
13✔
1294
let ctx = app_ctx.session_context(session);
1✔
1295

1✔
1296
            let db = ctx
1✔
1297
                .db();
1✔
1298
            let id = db
1✔
1299
                .register_workflow(workflow)
1✔
1300
                .await
3✔
1301
                .unwrap();
1✔
1302

1✔
1303
            drop(ctx);
1✔
1304

1305
            let workflow = db.load_workflow(&id).await.unwrap();
3✔
1306

1✔
1307
            let json = serde_json::to_string(&workflow).unwrap();
1✔
1308
            assert_eq!(json, r#"{"type":"Vector","operator":{"type":"MockPointSource","params":{"points":[{"x":1.0,"y":2.0},{"x":1.0,"y":2.0},{"x":1.0,"y":2.0}]}}}"#);
1✔
1309
        })
1✔
1310
        .await;
12✔
1311
    }
1312

1313
    #[allow(clippy::too_many_lines)]
1314
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1315
    async fn it_persists_datasets() {
1✔
1316
        with_temp_context(|app_ctx, _| async move {
1✔
1317
            let dataset_id = DatasetId::from_str("2e8af98d-3b98-4e2c-a35b-e487bffad7b6").unwrap();
1✔
1318

1✔
1319
            let loading_info = OgrSourceDataset {
1✔
1320
                file_name: PathBuf::from("test.csv"),
1✔
1321
                layer_name: "test.csv".to_owned(),
1✔
1322
                data_type: Some(VectorDataType::MultiPoint),
1✔
1323
                time: OgrSourceDatasetTimeType::Start {
1✔
1324
                    start_field: "start".to_owned(),
1✔
1325
                    start_format: OgrSourceTimeFormat::Auto,
1✔
1326
                    duration: OgrSourceDurationSpec::Zero,
1✔
1327
                },
1✔
1328
                default_geometry: None,
1✔
1329
                columns: Some(OgrSourceColumnSpec {
1✔
1330
                    format_specifics: Some(FormatSpecifics::Csv {
1✔
1331
                        header: CsvHeader::Auto,
1✔
1332
                    }),
1✔
1333
                    x: "x".to_owned(),
1✔
1334
                    y: None,
1✔
1335
                    int: vec![],
1✔
1336
                    float: vec![],
1✔
1337
                    text: vec![],
1✔
1338
                    bool: vec![],
1✔
1339
                    datetime: vec![],
1✔
1340
                    rename: None,
1✔
1341
                }),
1✔
1342
                force_ogr_time_filter: false,
1✔
1343
                force_ogr_spatial_filter: false,
1✔
1344
                on_error: OgrSourceErrorSpec::Ignore,
1✔
1345
                sql_query: None,
1✔
1346
                attribute_query: None,
1✔
1347
            };
1✔
1348

1✔
1349
            let meta_data = MetaDataDefinition::OgrMetaData(StaticMetaData::<
1✔
1350
                OgrSourceDataset,
1✔
1351
                VectorResultDescriptor,
1✔
1352
                VectorQueryRectangle,
1✔
1353
            > {
1✔
1354
                loading_info: loading_info.clone(),
1✔
1355
                result_descriptor: VectorResultDescriptor {
1✔
1356
                    data_type: VectorDataType::MultiPoint,
1✔
1357
                    spatial_reference: SpatialReference::epsg_4326().into(),
1✔
1358
                    columns: [(
1✔
1359
                        "foo".to_owned(),
1✔
1360
                        VectorColumnInfo {
1✔
1361
                            data_type: FeatureDataType::Float,
1✔
1362
                            measurement: Measurement::Unitless,
1✔
1363
                        },
1✔
1364
                    )]
1✔
1365
                    .into_iter()
1✔
1366
                    .collect(),
1✔
1367
                    time: None,
1✔
1368
                    bbox: None,
1✔
1369
                },
1✔
1370
                phantom: Default::default(),
1✔
1371
            });
1✔
1372

1373
            let session = app_ctx.create_anonymous_session().await.unwrap();
13✔
1374

1✔
1375
            let db = app_ctx.session_context(session.clone()).db();
1✔
1376
            let wrap = db.wrap_meta_data(meta_data);
1✔
1377
            db.add_dataset(
1✔
1378
                AddDataset {
1✔
1379
                    id: Some(dataset_id),
1✔
1380
                    name: "Ogr Test".to_owned(),
1✔
1381
                    description: "desc".to_owned(),
1✔
1382
                    source_operator: "OgrSource".to_owned(),
1✔
1383
                    symbology: None,
1✔
1384
                    provenance: Some(vec![Provenance {
1✔
1385
                        citation: "citation".to_owned(),
1✔
1386
                        license: "license".to_owned(),
1✔
1387
                        uri: "uri".to_owned(),
1✔
1388
                    }]),
1✔
1389
                },
1✔
1390
                wrap,
1✔
1391
            )
1✔
1392
            .await
11✔
1393
            .unwrap();
1✔
1394

1395
            let datasets = db
1✔
1396
                .list_datasets(DatasetListOptions {
1✔
1397
                    filter: None,
1✔
1398
                    order: crate::datasets::listing::OrderBy::NameAsc,
1✔
1399
                    offset: 0,
1✔
1400
                    limit: 10,
1✔
1401
                })
1✔
1402
                .await
3✔
1403
                .unwrap();
1✔
1404

1✔
1405
            assert_eq!(datasets.len(), 1);
1✔
1406

1407
            assert_eq!(
1✔
1408
                datasets[0],
1✔
1409
                DatasetListing {
1✔
1410
                    id: dataset_id,
1✔
1411
                    name: "Ogr Test".to_owned(),
1✔
1412
                    description: "desc".to_owned(),
1✔
1413
                    source_operator: "OgrSource".to_owned(),
1✔
1414
                    symbology: None,
1✔
1415
                    tags: vec![],
1✔
1416
                    result_descriptor: TypedResultDescriptor::Vector(VectorResultDescriptor {
1✔
1417
                        data_type: VectorDataType::MultiPoint,
1✔
1418
                        spatial_reference: SpatialReference::epsg_4326().into(),
1✔
1419
                        columns: [(
1✔
1420
                            "foo".to_owned(),
1✔
1421
                            VectorColumnInfo {
1✔
1422
                                data_type: FeatureDataType::Float,
1✔
1423
                                measurement: Measurement::Unitless
1✔
1424
                            }
1✔
1425
                        )]
1✔
1426
                        .into_iter()
1✔
1427
                        .collect(),
1✔
1428
                        time: None,
1✔
1429
                        bbox: None,
1✔
1430
                    })
1✔
1431
                    .into(),
1✔
1432
                },
1✔
1433
            );
1✔
1434

1435
            let provenance = db.load_provenance(&dataset_id).await.unwrap();
3✔
1436

1✔
1437
            assert_eq!(
1✔
1438
                provenance,
1✔
1439
                ProvenanceOutput {
1✔
1440
                    data: dataset_id.into(),
1✔
1441
                    provenance: Some(vec![Provenance {
1✔
1442
                        citation: "citation".to_owned(),
1✔
1443
                        license: "license".to_owned(),
1✔
1444
                        uri: "uri".to_owned(),
1✔
1445
                    }])
1✔
1446
                }
1✔
1447
            );
1✔
1448

1449
            let meta_data: Box<dyn MetaData<OgrSourceDataset, _, _>> =
1✔
1450
                db.meta_data(&dataset_id.into()).await.unwrap();
7✔
1451

1452
            assert_eq!(
1✔
1453
                meta_data
1✔
1454
                    .loading_info(VectorQueryRectangle {
1✔
1455
                        spatial_bounds: BoundingBox2D::new_unchecked(
1✔
1456
                            (-180., -90.).into(),
1✔
1457
                            (180., 90.).into()
1✔
1458
                        ),
1✔
1459
                        time_interval: TimeInterval::default(),
1✔
1460
                        spatial_resolution: SpatialResolution::zero_point_one(),
1✔
1461
                    })
1✔
1462
                    .await
×
1463
                    .unwrap(),
1✔
1464
                loading_info
1465
            );
1466
        })
1✔
1467
        .await;
12✔
1468
    }
1469

1470
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1471
    async fn it_persists_uploads() {
1✔
1472
        with_temp_context(|app_ctx, _| async move {
1✔
1473
            let id = UploadId::from_str("2de18cd8-4a38-4111-a445-e3734bc18a80").unwrap();
1✔
1474
            let input = Upload {
1✔
1475
                id,
1✔
1476
                files: vec![FileUpload {
1✔
1477
                    id: FileId::from_str("e80afab0-831d-4d40-95d6-1e4dfd277e72").unwrap(),
1✔
1478
                    name: "test.csv".to_owned(),
1✔
1479
                    byte_size: 1337,
1✔
1480
                }],
1✔
1481
            };
1✔
1482

1483
            let session = app_ctx.create_anonymous_session().await.unwrap();
13✔
1484

1✔
1485
            let db = app_ctx.session_context(session.clone()).db();
1✔
1486

1✔
1487
            db.create_upload(input.clone()).await.unwrap();
8✔
1488

1489
            let upload = db.load_upload(id).await.unwrap();
3✔
1490

1✔
1491
            assert_eq!(upload, input);
1✔
1492
        })
1✔
1493
        .await;
9✔
1494
    }
1495

1496
    #[allow(clippy::too_many_lines)]
1497
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1498
    async fn it_persists_layer_providers() {
1✔
1499
        with_temp_context(|app_ctx, _| async move {
1✔
1500
            let db = app_ctx.session_context(UserSession::admin_session()).db();
1✔
1501

1✔
1502
            let provider_id =
1✔
1503
                DataProviderId::from_str("7b20c8d7-d754-4f8f-ad44-dddd25df22d2").unwrap();
1✔
1504

1✔
1505
            let loading_info = OgrSourceDataset {
1✔
1506
                file_name: PathBuf::from("test.csv"),
1✔
1507
                layer_name: "test.csv".to_owned(),
1✔
1508
                data_type: Some(VectorDataType::MultiPoint),
1✔
1509
                time: OgrSourceDatasetTimeType::Start {
1✔
1510
                    start_field: "start".to_owned(),
1✔
1511
                    start_format: OgrSourceTimeFormat::Auto,
1✔
1512
                    duration: OgrSourceDurationSpec::Zero,
1✔
1513
                },
1✔
1514
                default_geometry: None,
1✔
1515
                columns: Some(OgrSourceColumnSpec {
1✔
1516
                    format_specifics: Some(FormatSpecifics::Csv {
1✔
1517
                        header: CsvHeader::Auto,
1✔
1518
                    }),
1✔
1519
                    x: "x".to_owned(),
1✔
1520
                    y: None,
1✔
1521
                    int: vec![],
1✔
1522
                    float: vec![],
1✔
1523
                    text: vec![],
1✔
1524
                    bool: vec![],
1✔
1525
                    datetime: vec![],
1✔
1526
                    rename: None,
1✔
1527
                }),
1✔
1528
                force_ogr_time_filter: false,
1✔
1529
                force_ogr_spatial_filter: false,
1✔
1530
                on_error: OgrSourceErrorSpec::Ignore,
1✔
1531
                sql_query: None,
1✔
1532
                attribute_query: None,
1✔
1533
            };
1✔
1534

1✔
1535
            let meta_data = MetaDataDefinition::OgrMetaData(StaticMetaData::<
1✔
1536
                OgrSourceDataset,
1✔
1537
                VectorResultDescriptor,
1✔
1538
                VectorQueryRectangle,
1✔
1539
            > {
1✔
1540
                loading_info: loading_info.clone(),
1✔
1541
                result_descriptor: VectorResultDescriptor {
1✔
1542
                    data_type: VectorDataType::MultiPoint,
1✔
1543
                    spatial_reference: SpatialReference::epsg_4326().into(),
1✔
1544
                    columns: [(
1✔
1545
                        "foo".to_owned(),
1✔
1546
                        VectorColumnInfo {
1✔
1547
                            data_type: FeatureDataType::Float,
1✔
1548
                            measurement: Measurement::Unitless,
1✔
1549
                        },
1✔
1550
                    )]
1✔
1551
                    .into_iter()
1✔
1552
                    .collect(),
1✔
1553
                    time: None,
1✔
1554
                    bbox: None,
1✔
1555
                },
1✔
1556
                phantom: Default::default(),
1✔
1557
            });
1✔
1558

1✔
1559
            let provider = MockExternalLayerProviderDefinition {
1✔
1560
                id: provider_id,
1✔
1561
                root_collection: MockCollection {
1✔
1562
                    id: LayerCollectionId("b5f82c7c-9133-4ac1-b4ae-8faac3b9a6df".to_owned()),
1✔
1563
                    name: "Mock Collection A".to_owned(),
1✔
1564
                    description: "Some description".to_owned(),
1✔
1565
                    collections: vec![MockCollection {
1✔
1566
                        id: LayerCollectionId("21466897-37a1-4666-913a-50b5244699ad".to_owned()),
1✔
1567
                        name: "Mock Collection B".to_owned(),
1✔
1568
                        description: "Some description".to_owned(),
1✔
1569
                        collections: vec![],
1✔
1570
                        layers: vec![],
1✔
1571
                    }],
1✔
1572
                    layers: vec![],
1✔
1573
                },
1✔
1574
                data: [("myData".to_owned(), meta_data)].into_iter().collect(),
1✔
1575
            };
1✔
1576

1✔
1577
            db.add_layer_provider(Box::new(provider)).await.unwrap();
3✔
1578

1579
            let providers = db
1✔
1580
                .list_layer_providers(LayerProviderListingOptions {
1✔
1581
                    offset: 0,
1✔
1582
                    limit: 10,
1✔
1583
                })
1✔
1584
                .await
3✔
1585
                .unwrap();
1✔
1586

1✔
1587
            assert_eq!(providers.len(), 1);
1✔
1588

1589
            assert_eq!(
1✔
1590
                providers[0],
1✔
1591
                LayerProviderListing {
1✔
1592
                    id: provider_id,
1✔
1593
                    name: "MockName".to_owned(),
1✔
1594
                    description: "MockType".to_owned(),
1✔
1595
                }
1✔
1596
            );
1✔
1597

1598
            let provider = db.load_layer_provider(provider_id).await.unwrap();
3✔
1599

1600
            let datasets = provider
1✔
1601
                .load_layer_collection(
1602
                    &provider.get_root_layer_collection_id().await.unwrap(),
1✔
1603
                    LayerCollectionListOptions {
1✔
1604
                        offset: 0,
1✔
1605
                        limit: 10,
1✔
1606
                    },
1✔
1607
                )
1608
                .await
×
1609
                .unwrap();
1✔
1610

1✔
1611
            assert_eq!(datasets.items.len(), 1);
1✔
1612
        })
1✔
1613
        .await;
11✔
1614
    }
1615

1616
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1617
    async fn it_lists_only_permitted_datasets() {
1✔
1618
        with_temp_context(|app_ctx, _| async move {
1✔
1619
            let session1 = app_ctx.create_anonymous_session().await.unwrap();
13✔
1620
            let session2 = app_ctx.create_anonymous_session().await.unwrap();
13✔
1621

1✔
1622
            let db1 = app_ctx.session_context(session1.clone()).db();
1✔
1623
            let db2 = app_ctx.session_context(session2.clone()).db();
1✔
1624

1✔
1625
            let descriptor = VectorResultDescriptor {
1✔
1626
                data_type: VectorDataType::Data,
1✔
1627
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1628
                columns: Default::default(),
1✔
1629
                time: None,
1✔
1630
                bbox: None,
1✔
1631
            };
1✔
1632

1✔
1633
            let ds = AddDataset {
1✔
1634
                id: None,
1✔
1635
                name: "OgrDataset".to_string(),
1✔
1636
                description: "My Ogr dataset".to_string(),
1✔
1637
                source_operator: "OgrSource".to_string(),
1✔
1638
                symbology: None,
1✔
1639
                provenance: None,
1✔
1640
            };
1✔
1641

1✔
1642
            let meta = StaticMetaData {
1✔
1643
                loading_info: OgrSourceDataset {
1✔
1644
                    file_name: Default::default(),
1✔
1645
                    layer_name: String::new(),
1✔
1646
                    data_type: None,
1✔
1647
                    time: Default::default(),
1✔
1648
                    default_geometry: None,
1✔
1649
                    columns: None,
1✔
1650
                    force_ogr_time_filter: false,
1✔
1651
                    force_ogr_spatial_filter: false,
1✔
1652
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1653
                    sql_query: None,
1✔
1654
                    attribute_query: None,
1✔
1655
                },
1✔
1656
                result_descriptor: descriptor.clone(),
1✔
1657
                phantom: Default::default(),
1✔
1658
            };
1✔
1659

1✔
1660
            let meta = db1.wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1661

1662
            let _id = db1.add_dataset(ds, meta).await.unwrap();
11✔
1663

1664
            let list1 = db1
1✔
1665
                .list_datasets(DatasetListOptions {
1✔
1666
                    filter: None,
1✔
1667
                    order: crate::datasets::listing::OrderBy::NameAsc,
1✔
1668
                    offset: 0,
1✔
1669
                    limit: 1,
1✔
1670
                })
1✔
1671
                .await
3✔
1672
                .unwrap();
1✔
1673

1✔
1674
            assert_eq!(list1.len(), 1);
1✔
1675

1676
            let list2 = db2
1✔
1677
                .list_datasets(DatasetListOptions {
1✔
1678
                    filter: None,
1✔
1679
                    order: crate::datasets::listing::OrderBy::NameAsc,
1✔
1680
                    offset: 0,
1✔
1681
                    limit: 1,
1✔
1682
                })
1✔
1683
                .await
3✔
1684
                .unwrap();
1✔
1685

1✔
1686
            assert_eq!(list2.len(), 0);
1✔
1687
        })
1✔
1688
        .await;
12✔
1689
    }
1690

1691
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1692
    async fn it_shows_only_permitted_provenance() {
1✔
1693
        with_temp_context(|app_ctx, _| async move {
1✔
1694
            let session1 = app_ctx.create_anonymous_session().await.unwrap();
13✔
1695
            let session2 = app_ctx.create_anonymous_session().await.unwrap();
13✔
1696

1✔
1697
            let db1 = app_ctx.session_context(session1.clone()).db();
1✔
1698
            let db2 = app_ctx.session_context(session2.clone()).db();
1✔
1699

1✔
1700
            let descriptor = VectorResultDescriptor {
1✔
1701
                data_type: VectorDataType::Data,
1✔
1702
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1703
                columns: Default::default(),
1✔
1704
                time: None,
1✔
1705
                bbox: None,
1✔
1706
            };
1✔
1707

1✔
1708
            let ds = AddDataset {
1✔
1709
                id: None,
1✔
1710
                name: "OgrDataset".to_string(),
1✔
1711
                description: "My Ogr dataset".to_string(),
1✔
1712
                source_operator: "OgrSource".to_string(),
1✔
1713
                symbology: None,
1✔
1714
                provenance: None,
1✔
1715
            };
1✔
1716

1✔
1717
            let meta = StaticMetaData {
1✔
1718
                loading_info: OgrSourceDataset {
1✔
1719
                    file_name: Default::default(),
1✔
1720
                    layer_name: String::new(),
1✔
1721
                    data_type: None,
1✔
1722
                    time: Default::default(),
1✔
1723
                    default_geometry: None,
1✔
1724
                    columns: None,
1✔
1725
                    force_ogr_time_filter: false,
1✔
1726
                    force_ogr_spatial_filter: false,
1✔
1727
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1728
                    sql_query: None,
1✔
1729
                    attribute_query: None,
1✔
1730
                },
1✔
1731
                result_descriptor: descriptor.clone(),
1✔
1732
                phantom: Default::default(),
1✔
1733
            };
1✔
1734

1✔
1735
            let meta = db1.wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1736

1737
            let id = db1.add_dataset(ds, meta).await.unwrap();
11✔
1738

1739
            assert!(db1.load_provenance(&id).await.is_ok());
3✔
1740

1741
            assert!(db2.load_provenance(&id).await.is_err());
3✔
1742
        })
1✔
1743
        .await;
10✔
1744
    }
1745

1746
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1747
    async fn it_updates_permissions() {
1✔
1748
        with_temp_context(|app_ctx, _| async move {
1✔
1749
            let session1 = app_ctx.create_anonymous_session().await.unwrap();
13✔
1750
            let session2 = app_ctx.create_anonymous_session().await.unwrap();
13✔
1751

1✔
1752
            let db1 = app_ctx.session_context(session1.clone()).db();
1✔
1753
            let db2 = app_ctx.session_context(session2.clone()).db();
1✔
1754

1✔
1755
            let descriptor = VectorResultDescriptor {
1✔
1756
                data_type: VectorDataType::Data,
1✔
1757
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1758
                columns: Default::default(),
1✔
1759
                time: None,
1✔
1760
                bbox: None,
1✔
1761
            };
1✔
1762

1✔
1763
            let ds = AddDataset {
1✔
1764
                id: None,
1✔
1765
                name: "OgrDataset".to_string(),
1✔
1766
                description: "My Ogr dataset".to_string(),
1✔
1767
                source_operator: "OgrSource".to_string(),
1✔
1768
                symbology: None,
1✔
1769
                provenance: None,
1✔
1770
            };
1✔
1771

1✔
1772
            let meta = StaticMetaData {
1✔
1773
                loading_info: OgrSourceDataset {
1✔
1774
                    file_name: Default::default(),
1✔
1775
                    layer_name: String::new(),
1✔
1776
                    data_type: None,
1✔
1777
                    time: Default::default(),
1✔
1778
                    default_geometry: None,
1✔
1779
                    columns: None,
1✔
1780
                    force_ogr_time_filter: false,
1✔
1781
                    force_ogr_spatial_filter: false,
1✔
1782
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1783
                    sql_query: None,
1✔
1784
                    attribute_query: None,
1✔
1785
                },
1✔
1786
                result_descriptor: descriptor.clone(),
1✔
1787
                phantom: Default::default(),
1✔
1788
            };
1✔
1789

1✔
1790
            let meta = db1.wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1791

1792
            let id = db1.add_dataset(ds, meta).await.unwrap();
11✔
1793

1794
            assert!(db1.load_dataset(&id).await.is_ok());
3✔
1795

1796
            assert!(db2.load_dataset(&id).await.is_err());
3✔
1797

1798
            db1.add_permission(session2.user.id.into(), id, Permission::Read)
1✔
1799
                .await
7✔
1800
                .unwrap();
1✔
1801

1802
            assert!(db2.load_dataset(&id).await.is_ok());
3✔
1803
        })
1✔
1804
        .await;
12✔
1805
    }
1806

1807
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1808
    async fn it_uses_roles_for_permissions() {
1✔
1809
        with_temp_context(|app_ctx, _| async move {
1✔
1810
            let session1 = app_ctx.create_anonymous_session().await.unwrap();
5✔
1811
            let session2 = app_ctx.create_anonymous_session().await.unwrap();
4✔
1812

1✔
1813
            let db1 = app_ctx.session_context(session1.clone()).db();
1✔
1814
            let db2 = app_ctx.session_context(session2.clone()).db();
1✔
1815

1✔
1816
            let descriptor = VectorResultDescriptor {
1✔
1817
                data_type: VectorDataType::Data,
1✔
1818
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1819
                columns: Default::default(),
1✔
1820
                time: None,
1✔
1821
                bbox: None,
1✔
1822
            };
1✔
1823

1✔
1824
            let ds = AddDataset {
1✔
1825
                id: None,
1✔
1826
                name: "OgrDataset".to_string(),
1✔
1827
                description: "My Ogr dataset".to_string(),
1✔
1828
                source_operator: "OgrSource".to_string(),
1✔
1829
                symbology: None,
1✔
1830
                provenance: None,
1✔
1831
            };
1✔
1832

1✔
1833
            let meta = StaticMetaData {
1✔
1834
                loading_info: OgrSourceDataset {
1✔
1835
                    file_name: Default::default(),
1✔
1836
                    layer_name: String::new(),
1✔
1837
                    data_type: None,
1✔
1838
                    time: Default::default(),
1✔
1839
                    default_geometry: None,
1✔
1840
                    columns: None,
1✔
1841
                    force_ogr_time_filter: false,
1✔
1842
                    force_ogr_spatial_filter: false,
1✔
1843
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1844
                    sql_query: None,
1✔
1845
                    attribute_query: None,
1✔
1846
                },
1✔
1847
                result_descriptor: descriptor.clone(),
1✔
1848
                phantom: Default::default(),
1✔
1849
            };
1✔
1850

1✔
1851
            let meta = db1.wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1852

1853
            let id = db1.add_dataset(ds, meta).await.unwrap();
11✔
1854

1855
            assert!(db1.load_dataset(&id).await.is_ok());
3✔
1856

1857
            assert!(db2.load_dataset(&id).await.is_err());
3✔
1858

1859
            db1.add_permission(session2.user.id.into(), id, Permission::Read)
1✔
1860
                .await
7✔
1861
                .unwrap();
1✔
1862

1863
            assert!(db2.load_dataset(&id).await.is_ok());
3✔
1864
        })
1✔
1865
        .await;
10✔
1866
    }
1867

1868
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1869
    async fn it_secures_meta_data() {
1✔
1870
        with_temp_context(|app_ctx, _| async move {
1✔
1871
            let session1 = app_ctx.create_anonymous_session().await.unwrap();
12✔
1872
            let session2 = app_ctx.create_anonymous_session().await.unwrap();
11✔
1873

1✔
1874
            let db1 = app_ctx.session_context(session1.clone()).db();
1✔
1875
            let db2 = app_ctx.session_context(session2.clone()).db();
1✔
1876

1✔
1877
            let descriptor = VectorResultDescriptor {
1✔
1878
                data_type: VectorDataType::Data,
1✔
1879
                spatial_reference: SpatialReferenceOption::Unreferenced,
1✔
1880
                columns: Default::default(),
1✔
1881
                time: None,
1✔
1882
                bbox: None,
1✔
1883
            };
1✔
1884

1✔
1885
            let ds = AddDataset {
1✔
1886
                id: None,
1✔
1887
                name: "OgrDataset".to_string(),
1✔
1888
                description: "My Ogr dataset".to_string(),
1✔
1889
                source_operator: "OgrSource".to_string(),
1✔
1890
                symbology: None,
1✔
1891
                provenance: None,
1✔
1892
            };
1✔
1893

1✔
1894
            let meta = StaticMetaData {
1✔
1895
                loading_info: OgrSourceDataset {
1✔
1896
                    file_name: Default::default(),
1✔
1897
                    layer_name: String::new(),
1✔
1898
                    data_type: None,
1✔
1899
                    time: Default::default(),
1✔
1900
                    default_geometry: None,
1✔
1901
                    columns: None,
1✔
1902
                    force_ogr_time_filter: false,
1✔
1903
                    force_ogr_spatial_filter: false,
1✔
1904
                    on_error: OgrSourceErrorSpec::Ignore,
1✔
1905
                    sql_query: None,
1✔
1906
                    attribute_query: None,
1✔
1907
                },
1✔
1908
                result_descriptor: descriptor.clone(),
1✔
1909
                phantom: Default::default(),
1✔
1910
            };
1✔
1911

1✔
1912
            let meta = db1.wrap_meta_data(MetaDataDefinition::OgrMetaData(meta));
1✔
1913

1914
            let id = db1.add_dataset(ds, meta).await.unwrap();
11✔
1915

1916
            let meta: geoengine_operators::util::Result<
1✔
1917
                Box<dyn MetaData<OgrSourceDataset, VectorResultDescriptor, VectorQueryRectangle>>,
1✔
1918
            > = db1.meta_data(&id.into()).await;
7✔
1919

1920
            assert!(meta.is_ok());
1✔
1921

1922
            let meta: geoengine_operators::util::Result<
1✔
1923
                Box<dyn MetaData<OgrSourceDataset, VectorResultDescriptor, VectorQueryRectangle>>,
1✔
1924
            > = db2.meta_data(&id.into()).await;
3✔
1925

1926
            assert!(meta.is_err());
1✔
1927

1928
            db1.add_permission(session2.user.id.into(), id, Permission::Read)
1✔
1929
                .await
6✔
1930
                .unwrap();
1✔
1931

1932
            let meta: geoengine_operators::util::Result<
1✔
1933
                Box<dyn MetaData<OgrSourceDataset, VectorResultDescriptor, VectorQueryRectangle>>,
1✔
1934
            > = db2.meta_data(&id.into()).await;
6✔
1935

1936
            assert!(meta.is_ok());
1✔
1937
        })
1✔
1938
        .await;
11✔
1939
    }
1940

1941
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1942
    async fn it_secures_uploads() {
1✔
1943
        with_temp_context(|app_ctx, _| async move {
1✔
1944
            let session1 = app_ctx.create_anonymous_session().await.unwrap();
13✔
1945
            let session2 = app_ctx.create_anonymous_session().await.unwrap();
13✔
1946

1✔
1947
            let db1 = app_ctx.session_context(session1.clone()).db();
1✔
1948
            let db2 = app_ctx.session_context(session2.clone()).db();
1✔
1949

1✔
1950
            let upload_id = UploadId::new();
1✔
1951

1✔
1952
            let upload = Upload {
1✔
1953
                id: upload_id,
1✔
1954
                files: vec![FileUpload {
1✔
1955
                    id: FileId::new(),
1✔
1956
                    name: "test.bin".to_owned(),
1✔
1957
                    byte_size: 1024,
1✔
1958
                }],
1✔
1959
            };
1✔
1960

1✔
1961
            db1.create_upload(upload).await.unwrap();
8✔
1962

1963
            assert!(db1.load_upload(upload_id).await.is_ok());
3✔
1964

1965
            assert!(db2.load_upload(upload_id).await.is_err());
3✔
1966
        })
1✔
1967
        .await;
11✔
1968
    }
1969

1970
    #[allow(clippy::too_many_lines)]
1971
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
1972
    async fn it_collects_layers() {
1✔
1973
        with_temp_context(|app_ctx, _| async move {
1✔
1974
            let session = admin_login(&app_ctx).await;
5✔
1975

1976
            let layer_db = app_ctx.session_context(session).db();
1✔
1977

1✔
1978
            let workflow = Workflow {
1✔
1979
                operator: TypedOperator::Vector(
1✔
1980
                    MockPointSource {
1✔
1981
                        params: MockPointSourceParams {
1✔
1982
                            points: vec![Coordinate2D::new(1., 2.); 3],
1✔
1983
                        },
1✔
1984
                    }
1✔
1985
                    .boxed(),
1✔
1986
                ),
1✔
1987
            };
1✔
1988

1989
            let root_collection_id = layer_db.get_root_layer_collection_id().await.unwrap();
1✔
1990

1991
            let layer1 = layer_db
1✔
1992
                .add_layer(
1✔
1993
                    AddLayer {
1✔
1994
                        name: "Layer1".to_string(),
1✔
1995
                        description: "Layer 1".to_string(),
1✔
1996
                        symbology: None,
1✔
1997
                        workflow: workflow.clone(),
1✔
1998
                        metadata: [("meta".to_string(), "datum".to_string())].into(),
1✔
1999
                        properties: vec![("proper".to_string(), "tee".to_string()).into()],
1✔
2000
                    },
1✔
2001
                    &root_collection_id,
1✔
2002
                )
1✔
2003
                .await
4✔
2004
                .unwrap();
1✔
2005

2006
            assert_eq!(
1✔
2007
                layer_db.load_layer(&layer1).await.unwrap(),
1✔
2008
                crate::layers::layer::Layer {
1✔
2009
                    id: ProviderLayerId {
1✔
2010
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2011
                        layer_id: layer1.clone(),
1✔
2012
                    },
1✔
2013
                    name: "Layer1".to_string(),
1✔
2014
                    description: "Layer 1".to_string(),
1✔
2015
                    symbology: None,
1✔
2016
                    workflow: workflow.clone(),
1✔
2017
                    metadata: [("meta".to_string(), "datum".to_string())].into(),
1✔
2018
                    properties: vec![("proper".to_string(), "tee".to_string()).into()],
1✔
2019
                }
1✔
2020
            );
2021

2022
            let collection1_id = layer_db
1✔
2023
                .add_layer_collection(
1✔
2024
                    AddLayerCollection {
1✔
2025
                        name: "Collection1".to_string(),
1✔
2026
                        description: "Collection 1".to_string(),
1✔
2027
                        properties: Default::default(),
1✔
2028
                    },
1✔
2029
                    &root_collection_id,
1✔
2030
                )
1✔
2031
                .await
7✔
2032
                .unwrap();
1✔
2033

2034
            let layer2 = layer_db
1✔
2035
                .add_layer(
1✔
2036
                    AddLayer {
1✔
2037
                        name: "Layer2".to_string(),
1✔
2038
                        description: "Layer 2".to_string(),
1✔
2039
                        symbology: None,
1✔
2040
                        workflow: workflow.clone(),
1✔
2041
                        metadata: Default::default(),
1✔
2042
                        properties: Default::default(),
1✔
2043
                    },
1✔
2044
                    &collection1_id,
1✔
2045
                )
1✔
2046
                .await
12✔
2047
                .unwrap();
1✔
2048

2049
            let collection2_id = layer_db
1✔
2050
                .add_layer_collection(
1✔
2051
                    AddLayerCollection {
1✔
2052
                        name: "Collection2".to_string(),
1✔
2053
                        description: "Collection 2".to_string(),
1✔
2054
                        properties: Default::default(),
1✔
2055
                    },
1✔
2056
                    &collection1_id,
1✔
2057
                )
1✔
2058
                .await
12✔
2059
                .unwrap();
1✔
2060

1✔
2061
            layer_db
1✔
2062
                .add_collection_to_parent(&collection2_id, &collection1_id)
1✔
2063
                .await
6✔
2064
                .unwrap();
1✔
2065

2066
            let root_collection = layer_db
1✔
2067
                .load_layer_collection(
1✔
2068
                    &root_collection_id,
1✔
2069
                    LayerCollectionListOptions {
1✔
2070
                        offset: 0,
1✔
2071
                        limit: 20,
1✔
2072
                    },
1✔
2073
                )
1✔
2074
                .await
8✔
2075
                .unwrap();
1✔
2076

1✔
2077
            assert_eq!(
1✔
2078
                root_collection,
1✔
2079
                LayerCollection {
1✔
2080
                    id: ProviderLayerCollectionId {
1✔
2081
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2082
                        collection_id: root_collection_id,
1✔
2083
                    },
1✔
2084
                    name: "Layers".to_string(),
1✔
2085
                    description: "All available Geo Engine layers".to_string(),
1✔
2086
                    items: vec![
1✔
2087
                        CollectionItem::Collection(LayerCollectionListing {
1✔
2088
                            id: ProviderLayerCollectionId {
1✔
2089
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2090
                                collection_id: collection1_id.clone(),
1✔
2091
                            },
1✔
2092
                            name: "Collection1".to_string(),
1✔
2093
                            description: "Collection 1".to_string(),
1✔
2094
                            properties: Default::default(),
1✔
2095
                        }),
1✔
2096
                        CollectionItem::Collection(LayerCollectionListing {
1✔
2097
                            id: ProviderLayerCollectionId {
1✔
2098
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2099
                                collection_id: LayerCollectionId(
1✔
2100
                                    UNSORTED_COLLECTION_ID.to_string()
1✔
2101
                                ),
1✔
2102
                            },
1✔
2103
                            name: "Unsorted".to_string(),
1✔
2104
                            description: "Unsorted Layers".to_string(),
1✔
2105
                            properties: Default::default(),
1✔
2106
                        }),
1✔
2107
                        CollectionItem::Layer(LayerListing {
1✔
2108
                            id: ProviderLayerId {
1✔
2109
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2110
                                layer_id: layer1,
1✔
2111
                            },
1✔
2112
                            name: "Layer1".to_string(),
1✔
2113
                            description: "Layer 1".to_string(),
1✔
2114
                            properties: vec![("proper".to_string(), "tee".to_string()).into()],
1✔
2115
                        })
1✔
2116
                    ],
1✔
2117
                    entry_label: None,
1✔
2118
                    properties: vec![],
1✔
2119
                }
1✔
2120
            );
1✔
2121

2122
            let collection1 = layer_db
1✔
2123
                .load_layer_collection(
1✔
2124
                    &collection1_id,
1✔
2125
                    LayerCollectionListOptions {
1✔
2126
                        offset: 0,
1✔
2127
                        limit: 20,
1✔
2128
                    },
1✔
2129
                )
1✔
2130
                .await
8✔
2131
                .unwrap();
1✔
2132

1✔
2133
            assert_eq!(
1✔
2134
                collection1,
1✔
2135
                LayerCollection {
1✔
2136
                    id: ProviderLayerCollectionId {
1✔
2137
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2138
                        collection_id: collection1_id,
1✔
2139
                    },
1✔
2140
                    name: "Collection1".to_string(),
1✔
2141
                    description: "Collection 1".to_string(),
1✔
2142
                    items: vec![
1✔
2143
                        CollectionItem::Collection(LayerCollectionListing {
1✔
2144
                            id: ProviderLayerCollectionId {
1✔
2145
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2146
                                collection_id: collection2_id,
1✔
2147
                            },
1✔
2148
                            name: "Collection2".to_string(),
1✔
2149
                            description: "Collection 2".to_string(),
1✔
2150
                            properties: Default::default(),
1✔
2151
                        }),
1✔
2152
                        CollectionItem::Layer(LayerListing {
1✔
2153
                            id: ProviderLayerId {
1✔
2154
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2155
                                layer_id: layer2,
1✔
2156
                            },
1✔
2157
                            name: "Layer2".to_string(),
1✔
2158
                            description: "Layer 2".to_string(),
1✔
2159
                            properties: vec![],
1✔
2160
                        })
1✔
2161
                    ],
1✔
2162
                    entry_label: None,
1✔
2163
                    properties: vec![],
1✔
2164
                }
1✔
2165
            );
1✔
2166
        })
1✔
2167
        .await;
11✔
2168
    }
2169

2170
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2171
    async fn it_tracks_used_quota_in_postgres() {
1✔
2172
        with_temp_context(|app_ctx, _| async move {
1✔
2173
            let _user = app_ctx
1✔
2174
                .register_user(UserRegistration {
1✔
2175
                    email: "foo@example.com".to_string(),
1✔
2176
                    password: "secret1234".to_string(),
1✔
2177
                    real_name: "Foo Bar".to_string(),
1✔
2178
                })
1✔
2179
                .await
11✔
2180
                .unwrap();
1✔
2181

2182
            let session = app_ctx
1✔
2183
                .login(UserCredentials {
1✔
2184
                    email: "foo@example.com".to_string(),
1✔
2185
                    password: "secret1234".to_string(),
1✔
2186
                })
1✔
2187
                .await
7✔
2188
                .unwrap();
1✔
2189

2190
            let admin_session = admin_login(&app_ctx).await;
6✔
2191

2192
            let quota = initialize_quota_tracking(app_ctx.session_context(admin_session).db());
1✔
2193

1✔
2194
            let tracking = quota.create_quota_tracking(&session, ComputationContext::new());
1✔
2195

1✔
2196
            tracking.work_unit_done();
1✔
2197
            tracking.work_unit_done();
1✔
2198

1✔
2199
            let db = app_ctx.session_context(session).db();
1✔
2200

1✔
2201
            // wait for quota to be recorded
1✔
2202
            let mut success = false;
1✔
2203
            for _ in 0..10 {
2✔
2204
                let used = db.quota_used().await.unwrap();
6✔
2205
                tokio::time::sleep(std::time::Duration::from_millis(100)).await;
2✔
2206

2207
                if used == 2 {
2✔
2208
                    success = true;
1✔
2209
                    break;
1✔
2210
                }
1✔
2211
            }
2212

2213
            assert!(success);
1✔
2214
        })
1✔
2215
        .await;
9✔
2216
    }
2217

2218
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2219
    async fn it_tracks_available_quota() {
1✔
2220
        with_temp_context(|app_ctx, _| async move {
1✔
2221
            let user = app_ctx
1✔
2222
                .register_user(UserRegistration {
1✔
2223
                    email: "foo@example.com".to_string(),
1✔
2224
                    password: "secret1234".to_string(),
1✔
2225
                    real_name: "Foo Bar".to_string(),
1✔
2226
                })
1✔
2227
                .await
3✔
2228
                .unwrap();
1✔
2229

2230
            let session = app_ctx
1✔
2231
                .login(UserCredentials {
1✔
2232
                    email: "foo@example.com".to_string(),
1✔
2233
                    password: "secret1234".to_string(),
1✔
2234
                })
1✔
2235
                .await
1✔
2236
                .unwrap();
1✔
2237

2238
            let admin_session = admin_login(&app_ctx).await;
1✔
2239

2240
            app_ctx
1✔
2241
                .session_context(admin_session.clone())
1✔
2242
                .db()
1✔
2243
                .update_quota_available_by_user(&user, 1)
1✔
2244
                .await
1✔
2245
                .unwrap();
1✔
2246

1✔
2247
            let quota = initialize_quota_tracking(app_ctx.session_context(admin_session).db());
1✔
2248

1✔
2249
            let tracking = quota.create_quota_tracking(&session, ComputationContext::new());
1✔
2250

1✔
2251
            tracking.work_unit_done();
1✔
2252
            tracking.work_unit_done();
1✔
2253

1✔
2254
            let db = app_ctx.session_context(session).db();
1✔
2255

1✔
2256
            // wait for quota to be recorded
1✔
2257
            let mut success = false;
1✔
2258
            for _ in 0..10 {
2✔
2259
                let available = db.quota_available().await.unwrap();
5✔
2260
                tokio::time::sleep(std::time::Duration::from_millis(100)).await;
2✔
2261

2262
                if available == -1 {
2✔
2263
                    success = true;
1✔
2264
                    break;
1✔
2265
                }
1✔
2266
            }
2267

2268
            assert!(success);
1✔
2269
        })
1✔
2270
        .await;
11✔
2271
    }
2272

2273
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2274
    async fn it_updates_quota_in_postgres() {
1✔
2275
        with_temp_context(|app_ctx, _| async move {
1✔
2276
            let user = app_ctx
1✔
2277
                .register_user(UserRegistration {
1✔
2278
                    email: "foo@example.com".to_string(),
1✔
2279
                    password: "secret1234".to_string(),
1✔
2280
                    real_name: "Foo Bar".to_string(),
1✔
2281
                })
1✔
2282
                .await
4✔
2283
                .unwrap();
1✔
2284

2285
            let session = app_ctx
1✔
2286
                .login(UserCredentials {
1✔
2287
                    email: "foo@example.com".to_string(),
1✔
2288
                    password: "secret1234".to_string(),
1✔
2289
                })
1✔
2290
                .await
2✔
2291
                .unwrap();
1✔
2292

1✔
2293
            let db = app_ctx.session_context(session.clone()).db();
1✔
2294
            let admin_db = app_ctx.session_context(UserSession::admin_session()).db();
1✔
2295

2296
            assert_eq!(
1✔
2297
                db.quota_available().await.unwrap(),
2✔
2298
                crate::util::config::get_config_element::<crate::pro::util::config::User>()
1✔
2299
                    .unwrap()
1✔
2300
                    .default_available_quota
2301
            );
2302

2303
            assert_eq!(
1✔
2304
                admin_db.quota_available_by_user(&user).await.unwrap(),
1✔
2305
                crate::util::config::get_config_element::<crate::pro::util::config::User>()
1✔
2306
                    .unwrap()
1✔
2307
                    .default_available_quota
2308
            );
2309

2310
            admin_db
1✔
2311
                .update_quota_available_by_user(&user, 123)
1✔
2312
                .await
1✔
2313
                .unwrap();
1✔
2314

2315
            assert_eq!(db.quota_available().await.unwrap(), 123);
1✔
2316

2317
            assert_eq!(admin_db.quota_available_by_user(&user).await.unwrap(), 123);
3✔
2318
        })
1✔
2319
        .await;
10✔
2320
    }
2321

2322
    #[allow(clippy::too_many_lines)]
2323
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2324
    async fn it_removes_layer_collections() {
1✔
2325
        with_temp_context(|app_ctx, _| async move {
1✔
2326
            let session = admin_login(&app_ctx).await;
5✔
2327

2328
            let layer_db = app_ctx.session_context(session).db();
1✔
2329

1✔
2330
            let layer = AddLayer {
1✔
2331
                name: "layer".to_string(),
1✔
2332
                description: "description".to_string(),
1✔
2333
                workflow: Workflow {
1✔
2334
                    operator: TypedOperator::Vector(
1✔
2335
                        MockPointSource {
1✔
2336
                            params: MockPointSourceParams {
1✔
2337
                                points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2338
                            },
1✔
2339
                        }
1✔
2340
                        .boxed(),
1✔
2341
                    ),
1✔
2342
                },
1✔
2343
                symbology: None,
1✔
2344
                metadata: Default::default(),
1✔
2345
                properties: Default::default(),
1✔
2346
            };
1✔
2347

2348
            let root_collection = &layer_db.get_root_layer_collection_id().await.unwrap();
1✔
2349

1✔
2350
            let collection = AddLayerCollection {
1✔
2351
                name: "top collection".to_string(),
1✔
2352
                description: "description".to_string(),
1✔
2353
                properties: Default::default(),
1✔
2354
            };
1✔
2355

2356
            let top_c_id = layer_db
1✔
2357
                .add_layer_collection(collection, root_collection)
1✔
2358
                .await
16✔
2359
                .unwrap();
1✔
2360

2361
            let l_id = layer_db.add_layer(layer, &top_c_id).await.unwrap();
9✔
2362

1✔
2363
            let collection = AddLayerCollection {
1✔
2364
                name: "empty collection".to_string(),
1✔
2365
                description: "description".to_string(),
1✔
2366
                properties: Default::default(),
1✔
2367
            };
1✔
2368

2369
            let empty_c_id = layer_db
1✔
2370
                .add_layer_collection(collection, &top_c_id)
1✔
2371
                .await
12✔
2372
                .unwrap();
1✔
2373

2374
            let items = layer_db
1✔
2375
                .load_layer_collection(
1✔
2376
                    &top_c_id,
1✔
2377
                    LayerCollectionListOptions {
1✔
2378
                        offset: 0,
1✔
2379
                        limit: 20,
1✔
2380
                    },
1✔
2381
                )
1✔
2382
                .await
8✔
2383
                .unwrap();
1✔
2384

1✔
2385
            assert_eq!(
1✔
2386
                items,
1✔
2387
                LayerCollection {
1✔
2388
                    id: ProviderLayerCollectionId {
1✔
2389
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2390
                        collection_id: top_c_id.clone(),
1✔
2391
                    },
1✔
2392
                    name: "top collection".to_string(),
1✔
2393
                    description: "description".to_string(),
1✔
2394
                    items: vec![
1✔
2395
                        CollectionItem::Collection(LayerCollectionListing {
1✔
2396
                            id: ProviderLayerCollectionId {
1✔
2397
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2398
                                collection_id: empty_c_id.clone(),
1✔
2399
                            },
1✔
2400
                            name: "empty collection".to_string(),
1✔
2401
                            description: "description".to_string(),
1✔
2402
                            properties: Default::default(),
1✔
2403
                        }),
1✔
2404
                        CollectionItem::Layer(LayerListing {
1✔
2405
                            id: ProviderLayerId {
1✔
2406
                                provider_id: INTERNAL_PROVIDER_ID,
1✔
2407
                                layer_id: l_id.clone(),
1✔
2408
                            },
1✔
2409
                            name: "layer".to_string(),
1✔
2410
                            description: "description".to_string(),
1✔
2411
                            properties: vec![],
1✔
2412
                        })
1✔
2413
                    ],
1✔
2414
                    entry_label: None,
1✔
2415
                    properties: vec![],
1✔
2416
                }
1✔
2417
            );
1✔
2418

2419
            // remove empty collection
2420
            layer_db.remove_layer_collection(&empty_c_id).await.unwrap();
12✔
2421

2422
            let items = layer_db
1✔
2423
                .load_layer_collection(
1✔
2424
                    &top_c_id,
1✔
2425
                    LayerCollectionListOptions {
1✔
2426
                        offset: 0,
1✔
2427
                        limit: 20,
1✔
2428
                    },
1✔
2429
                )
1✔
2430
                .await
8✔
2431
                .unwrap();
1✔
2432

1✔
2433
            assert_eq!(
1✔
2434
                items,
1✔
2435
                LayerCollection {
1✔
2436
                    id: ProviderLayerCollectionId {
1✔
2437
                        provider_id: INTERNAL_PROVIDER_ID,
1✔
2438
                        collection_id: top_c_id.clone(),
1✔
2439
                    },
1✔
2440
                    name: "top collection".to_string(),
1✔
2441
                    description: "description".to_string(),
1✔
2442
                    items: vec![CollectionItem::Layer(LayerListing {
1✔
2443
                        id: ProviderLayerId {
1✔
2444
                            provider_id: INTERNAL_PROVIDER_ID,
1✔
2445
                            layer_id: l_id.clone(),
1✔
2446
                        },
1✔
2447
                        name: "layer".to_string(),
1✔
2448
                        description: "description".to_string(),
1✔
2449
                        properties: vec![],
1✔
2450
                    })],
1✔
2451
                    entry_label: None,
1✔
2452
                    properties: vec![],
1✔
2453
                }
1✔
2454
            );
1✔
2455

2456
            // remove top (not root) collection
2457
            layer_db.remove_layer_collection(&top_c_id).await.unwrap();
12✔
2458

1✔
2459
            layer_db
1✔
2460
                .load_layer_collection(
1✔
2461
                    &top_c_id,
1✔
2462
                    LayerCollectionListOptions {
1✔
2463
                        offset: 0,
1✔
2464
                        limit: 20,
1✔
2465
                    },
1✔
2466
                )
1✔
2467
                .await
3✔
2468
                .unwrap_err();
1✔
2469

1✔
2470
            // should be deleted automatically
1✔
2471
            layer_db.load_layer(&l_id).await.unwrap_err();
3✔
2472

1✔
2473
            // it is not allowed to remove the root collection
1✔
2474
            layer_db
1✔
2475
                .remove_layer_collection(root_collection)
1✔
2476
                .await
3✔
2477
                .unwrap_err();
1✔
2478
            layer_db
1✔
2479
                .load_layer_collection(
1✔
2480
                    root_collection,
1✔
2481
                    LayerCollectionListOptions {
1✔
2482
                        offset: 0,
1✔
2483
                        limit: 20,
1✔
2484
                    },
1✔
2485
                )
1✔
2486
                .await
8✔
2487
                .unwrap();
1✔
2488
        })
1✔
2489
        .await;
10✔
2490
    }
2491

2492
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2493
    #[allow(clippy::too_many_lines)]
2494
    async fn it_removes_collections_from_collections() {
1✔
2495
        with_temp_context(|app_ctx, _| async move {
1✔
2496
            let session = admin_login(&app_ctx).await;
7✔
2497

2498
            let db = app_ctx.session_context(session).db();
1✔
2499

2500
            let root_collection_id = &db.get_root_layer_collection_id().await.unwrap();
1✔
2501

2502
            let mid_collection_id = db
1✔
2503
                .add_layer_collection(
1✔
2504
                    AddLayerCollection {
1✔
2505
                        name: "mid collection".to_string(),
1✔
2506
                        description: "description".to_string(),
1✔
2507
                        properties: Default::default(),
1✔
2508
                    },
1✔
2509
                    root_collection_id,
1✔
2510
                )
1✔
2511
                .await
18✔
2512
                .unwrap();
1✔
2513

2514
            let bottom_collection_id = db
1✔
2515
                .add_layer_collection(
1✔
2516
                    AddLayerCollection {
1✔
2517
                        name: "bottom collection".to_string(),
1✔
2518
                        description: "description".to_string(),
1✔
2519
                        properties: Default::default(),
1✔
2520
                    },
1✔
2521
                    &mid_collection_id,
1✔
2522
                )
1✔
2523
                .await
8✔
2524
                .unwrap();
1✔
2525

2526
            let layer_id = db
1✔
2527
                .add_layer(
1✔
2528
                    AddLayer {
1✔
2529
                        name: "layer".to_string(),
1✔
2530
                        description: "description".to_string(),
1✔
2531
                        workflow: Workflow {
1✔
2532
                            operator: TypedOperator::Vector(
1✔
2533
                                MockPointSource {
1✔
2534
                                    params: MockPointSourceParams {
1✔
2535
                                        points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2536
                                    },
1✔
2537
                                }
1✔
2538
                                .boxed(),
1✔
2539
                            ),
1✔
2540
                        },
1✔
2541
                        symbology: None,
1✔
2542
                        metadata: Default::default(),
1✔
2543
                        properties: Default::default(),
1✔
2544
                    },
1✔
2545
                    &mid_collection_id,
1✔
2546
                )
1✔
2547
                .await
12✔
2548
                .unwrap();
1✔
2549

1✔
2550
            // removing the mid collection…
1✔
2551
            db.remove_layer_collection_from_parent(&mid_collection_id, root_collection_id)
1✔
2552
                .await
14✔
2553
                .unwrap();
1✔
2554

1✔
2555
            // …should remove itself
1✔
2556
            db.load_layer_collection(&mid_collection_id, LayerCollectionListOptions::default())
1✔
2557
                .await
3✔
2558
                .unwrap_err();
1✔
2559

1✔
2560
            // …should remove the bottom collection
1✔
2561
            db.load_layer_collection(&bottom_collection_id, LayerCollectionListOptions::default())
1✔
2562
                .await
3✔
2563
                .unwrap_err();
1✔
2564

1✔
2565
            // … and should remove the layer of the bottom collection
1✔
2566
            db.load_layer(&layer_id).await.unwrap_err();
3✔
2567

1✔
2568
            // the root collection is still there
1✔
2569
            db.load_layer_collection(root_collection_id, LayerCollectionListOptions::default())
1✔
2570
                .await
8✔
2571
                .unwrap();
1✔
2572
        })
1✔
2573
        .await;
10✔
2574
    }
2575

2576
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2577
    #[allow(clippy::too_many_lines)]
2578
    async fn it_removes_layers_from_collections() {
1✔
2579
        with_temp_context(|app_ctx, _| async move {
1✔
2580
            let session = admin_login(&app_ctx).await;
5✔
2581

2582
            let db = app_ctx.session_context(session).db();
1✔
2583

2584
            let root_collection = &db.get_root_layer_collection_id().await.unwrap();
1✔
2585

2586
            let another_collection = db
1✔
2587
                .add_layer_collection(
1✔
2588
                    AddLayerCollection {
1✔
2589
                        name: "top collection".to_string(),
1✔
2590
                        description: "description".to_string(),
1✔
2591
                        properties: Default::default(),
1✔
2592
                    },
1✔
2593
                    root_collection,
1✔
2594
                )
1✔
2595
                .await
1✔
2596
                .unwrap();
1✔
2597

2598
            let layer_in_one_collection = db
1✔
2599
                .add_layer(
1✔
2600
                    AddLayer {
1✔
2601
                        name: "layer 1".to_string(),
1✔
2602
                        description: "description".to_string(),
1✔
2603
                        workflow: Workflow {
1✔
2604
                            operator: TypedOperator::Vector(
1✔
2605
                                MockPointSource {
1✔
2606
                                    params: MockPointSourceParams {
1✔
2607
                                        points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2608
                                    },
1✔
2609
                                }
1✔
2610
                                .boxed(),
1✔
2611
                            ),
1✔
2612
                        },
1✔
2613
                        symbology: None,
1✔
2614
                        metadata: Default::default(),
1✔
2615
                        properties: Default::default(),
1✔
2616
                    },
1✔
2617
                    &another_collection,
1✔
2618
                )
1✔
2619
                .await
1✔
2620
                .unwrap();
1✔
2621

2622
            let layer_in_two_collections = db
1✔
2623
                .add_layer(
1✔
2624
                    AddLayer {
1✔
2625
                        name: "layer 2".to_string(),
1✔
2626
                        description: "description".to_string(),
1✔
2627
                        workflow: Workflow {
1✔
2628
                            operator: TypedOperator::Vector(
1✔
2629
                                MockPointSource {
1✔
2630
                                    params: MockPointSourceParams {
1✔
2631
                                        points: vec![Coordinate2D::new(1., 2.); 3],
1✔
2632
                                    },
1✔
2633
                                }
1✔
2634
                                .boxed(),
1✔
2635
                            ),
1✔
2636
                        },
1✔
2637
                        symbology: None,
1✔
2638
                        metadata: Default::default(),
1✔
2639
                        properties: Default::default(),
1✔
2640
                    },
1✔
2641
                    &another_collection,
1✔
2642
                )
1✔
2643
                .await
11✔
2644
                .unwrap();
1✔
2645

1✔
2646
            db.add_layer_to_collection(&layer_in_two_collections, root_collection)
1✔
2647
                .await
6✔
2648
                .unwrap();
1✔
2649

1✔
2650
            // remove first layer --> should be deleted entirely
1✔
2651

1✔
2652
            db.remove_layer_from_collection(&layer_in_one_collection, &another_collection)
1✔
2653
                .await
10✔
2654
                .unwrap();
1✔
2655

2656
            let number_of_layer_in_collection = db
1✔
2657
                .load_layer_collection(
1✔
2658
                    &another_collection,
1✔
2659
                    LayerCollectionListOptions {
1✔
2660
                        offset: 0,
1✔
2661
                        limit: 20,
1✔
2662
                    },
1✔
2663
                )
1✔
2664
                .await
8✔
2665
                .unwrap()
1✔
2666
                .items
1✔
2667
                .len();
1✔
2668
            assert_eq!(
1✔
2669
                number_of_layer_in_collection,
1✔
2670
                1 /* only the other collection should be here */
1✔
2671
            );
1✔
2672

2673
            db.load_layer(&layer_in_one_collection).await.unwrap_err();
3✔
2674

1✔
2675
            // remove second layer --> should only be gone in collection
1✔
2676

1✔
2677
            db.remove_layer_from_collection(&layer_in_two_collections, &another_collection)
1✔
2678
                .await
10✔
2679
                .unwrap();
1✔
2680

2681
            let number_of_layer_in_collection = db
1✔
2682
                .load_layer_collection(
1✔
2683
                    &another_collection,
1✔
2684
                    LayerCollectionListOptions {
1✔
2685
                        offset: 0,
1✔
2686
                        limit: 20,
1✔
2687
                    },
1✔
2688
                )
1✔
2689
                .await
8✔
2690
                .unwrap()
1✔
2691
                .items
1✔
2692
                .len();
1✔
2693
            assert_eq!(
1✔
2694
                number_of_layer_in_collection,
1✔
2695
                0 /* both layers were deleted */
1✔
2696
            );
1✔
2697

2698
            db.load_layer(&layer_in_two_collections).await.unwrap();
6✔
2699
        })
1✔
2700
        .await;
11✔
2701
    }
2702

2703
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2704
    #[allow(clippy::too_many_lines)]
2705
    async fn it_deletes_dataset() {
1✔
2706
        with_temp_context(|app_ctx, _| async move {
1✔
2707
            let dataset_id = DatasetId::from_str("2e8af98d-3b98-4e2c-a35b-e487bffad7b6").unwrap();
1✔
2708

1✔
2709
            let loading_info = OgrSourceDataset {
1✔
2710
                file_name: PathBuf::from("test.csv"),
1✔
2711
                layer_name: "test.csv".to_owned(),
1✔
2712
                data_type: Some(VectorDataType::MultiPoint),
1✔
2713
                time: OgrSourceDatasetTimeType::Start {
1✔
2714
                    start_field: "start".to_owned(),
1✔
2715
                    start_format: OgrSourceTimeFormat::Auto,
1✔
2716
                    duration: OgrSourceDurationSpec::Zero,
1✔
2717
                },
1✔
2718
                default_geometry: None,
1✔
2719
                columns: Some(OgrSourceColumnSpec {
1✔
2720
                    format_specifics: Some(FormatSpecifics::Csv {
1✔
2721
                        header: CsvHeader::Auto,
1✔
2722
                    }),
1✔
2723
                    x: "x".to_owned(),
1✔
2724
                    y: None,
1✔
2725
                    int: vec![],
1✔
2726
                    float: vec![],
1✔
2727
                    text: vec![],
1✔
2728
                    bool: vec![],
1✔
2729
                    datetime: vec![],
1✔
2730
                    rename: None,
1✔
2731
                }),
1✔
2732
                force_ogr_time_filter: false,
1✔
2733
                force_ogr_spatial_filter: false,
1✔
2734
                on_error: OgrSourceErrorSpec::Ignore,
1✔
2735
                sql_query: None,
1✔
2736
                attribute_query: None,
1✔
2737
            };
1✔
2738

1✔
2739
            let meta_data = MetaDataDefinition::OgrMetaData(StaticMetaData::<
1✔
2740
                OgrSourceDataset,
1✔
2741
                VectorResultDescriptor,
1✔
2742
                VectorQueryRectangle,
1✔
2743
            > {
1✔
2744
                loading_info: loading_info.clone(),
1✔
2745
                result_descriptor: VectorResultDescriptor {
1✔
2746
                    data_type: VectorDataType::MultiPoint,
1✔
2747
                    spatial_reference: SpatialReference::epsg_4326().into(),
1✔
2748
                    columns: [(
1✔
2749
                        "foo".to_owned(),
1✔
2750
                        VectorColumnInfo {
1✔
2751
                            data_type: FeatureDataType::Float,
1✔
2752
                            measurement: Measurement::Unitless,
1✔
2753
                        },
1✔
2754
                    )]
1✔
2755
                    .into_iter()
1✔
2756
                    .collect(),
1✔
2757
                    time: None,
1✔
2758
                    bbox: None,
1✔
2759
                },
1✔
2760
                phantom: Default::default(),
1✔
2761
            });
1✔
2762

2763
            let session = app_ctx.create_anonymous_session().await.unwrap();
13✔
2764

1✔
2765
            let db = app_ctx.session_context(session.clone()).db();
1✔
2766
            let wrap = db.wrap_meta_data(meta_data);
1✔
2767
            db.add_dataset(
1✔
2768
                AddDataset {
1✔
2769
                    id: Some(dataset_id),
1✔
2770
                    name: "Ogr Test".to_owned(),
1✔
2771
                    description: "desc".to_owned(),
1✔
2772
                    source_operator: "OgrSource".to_owned(),
1✔
2773
                    symbology: None,
1✔
2774
                    provenance: Some(vec![Provenance {
1✔
2775
                        citation: "citation".to_owned(),
1✔
2776
                        license: "license".to_owned(),
1✔
2777
                        uri: "uri".to_owned(),
1✔
2778
                    }]),
1✔
2779
                },
1✔
2780
                wrap,
1✔
2781
            )
1✔
2782
            .await
11✔
2783
            .unwrap();
1✔
2784

2785
            assert!(db.load_dataset(&dataset_id).await.is_ok());
3✔
2786

2787
            db.delete_dataset(dataset_id).await.unwrap();
11✔
2788

2789
            assert!(db.load_dataset(&dataset_id).await.is_err());
3✔
2790
        })
1✔
2791
        .await;
12✔
2792
    }
2793

2794
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2795
    #[allow(clippy::too_many_lines)]
2796
    async fn it_deletes_admin_dataset() {
1✔
2797
        with_temp_context(|app_ctx, _| async move {
1✔
2798
            let dataset_id = DatasetId::from_str("2e8af98d-3b98-4e2c-a35b-e487bffad7b6").unwrap();
1✔
2799

1✔
2800
            let loading_info = OgrSourceDataset {
1✔
2801
                file_name: PathBuf::from("test.csv"),
1✔
2802
                layer_name: "test.csv".to_owned(),
1✔
2803
                data_type: Some(VectorDataType::MultiPoint),
1✔
2804
                time: OgrSourceDatasetTimeType::Start {
1✔
2805
                    start_field: "start".to_owned(),
1✔
2806
                    start_format: OgrSourceTimeFormat::Auto,
1✔
2807
                    duration: OgrSourceDurationSpec::Zero,
1✔
2808
                },
1✔
2809
                default_geometry: None,
1✔
2810
                columns: Some(OgrSourceColumnSpec {
1✔
2811
                    format_specifics: Some(FormatSpecifics::Csv {
1✔
2812
                        header: CsvHeader::Auto,
1✔
2813
                    }),
1✔
2814
                    x: "x".to_owned(),
1✔
2815
                    y: None,
1✔
2816
                    int: vec![],
1✔
2817
                    float: vec![],
1✔
2818
                    text: vec![],
1✔
2819
                    bool: vec![],
1✔
2820
                    datetime: vec![],
1✔
2821
                    rename: None,
1✔
2822
                }),
1✔
2823
                force_ogr_time_filter: false,
1✔
2824
                force_ogr_spatial_filter: false,
1✔
2825
                on_error: OgrSourceErrorSpec::Ignore,
1✔
2826
                sql_query: None,
1✔
2827
                attribute_query: None,
1✔
2828
            };
1✔
2829

1✔
2830
            let meta_data = MetaDataDefinition::OgrMetaData(StaticMetaData::<
1✔
2831
                OgrSourceDataset,
1✔
2832
                VectorResultDescriptor,
1✔
2833
                VectorQueryRectangle,
1✔
2834
            > {
1✔
2835
                loading_info: loading_info.clone(),
1✔
2836
                result_descriptor: VectorResultDescriptor {
1✔
2837
                    data_type: VectorDataType::MultiPoint,
1✔
2838
                    spatial_reference: SpatialReference::epsg_4326().into(),
1✔
2839
                    columns: [(
1✔
2840
                        "foo".to_owned(),
1✔
2841
                        VectorColumnInfo {
1✔
2842
                            data_type: FeatureDataType::Float,
1✔
2843
                            measurement: Measurement::Unitless,
1✔
2844
                        },
1✔
2845
                    )]
1✔
2846
                    .into_iter()
1✔
2847
                    .collect(),
1✔
2848
                    time: None,
1✔
2849
                    bbox: None,
1✔
2850
                },
1✔
2851
                phantom: Default::default(),
1✔
2852
            });
1✔
2853

2854
            let session = admin_login(&app_ctx).await;
7✔
2855

2856
            let db = app_ctx.session_context(session).db();
1✔
2857
            let wrap = db.wrap_meta_data(meta_data);
1✔
2858
            db.add_dataset(
1✔
2859
                AddDataset {
1✔
2860
                    id: Some(dataset_id),
1✔
2861
                    name: "Ogr Test".to_owned(),
1✔
2862
                    description: "desc".to_owned(),
1✔
2863
                    source_operator: "OgrSource".to_owned(),
1✔
2864
                    symbology: None,
1✔
2865
                    provenance: Some(vec![Provenance {
1✔
2866
                        citation: "citation".to_owned(),
1✔
2867
                        license: "license".to_owned(),
1✔
2868
                        uri: "uri".to_owned(),
1✔
2869
                    }]),
1✔
2870
                },
1✔
2871
                wrap,
1✔
2872
            )
1✔
2873
            .await
8✔
2874
            .unwrap();
1✔
2875

2876
            assert!(db.load_dataset(&dataset_id).await.is_ok());
3✔
2877

2878
            db.delete_dataset(dataset_id).await.unwrap();
11✔
2879

2880
            assert!(db.load_dataset(&dataset_id).await.is_err());
3✔
2881
        })
1✔
2882
        .await;
10✔
2883
    }
2884

2885
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2886
    async fn test_missing_layer_dataset_in_collection_listing() {
1✔
2887
        with_temp_context(|app_ctx, _| async move {
1✔
2888
            let session = admin_login(&app_ctx).await;
7✔
2889
            let db = app_ctx.session_context(session).db();
1✔
2890

2891
            let root_collection_id = &db.get_root_layer_collection_id().await.unwrap();
1✔
2892

2893
            let top_collection_id = db
1✔
2894
                .add_layer_collection(
1✔
2895
                    AddLayerCollection {
1✔
2896
                        name: "top collection".to_string(),
1✔
2897
                        description: "description".to_string(),
1✔
2898
                        properties: Default::default(),
1✔
2899
                    },
1✔
2900
                    root_collection_id,
1✔
2901
                )
1✔
2902
                .await
20✔
2903
                .unwrap();
1✔
2904

1✔
2905
            let faux_layer = LayerId("faux".to_string());
1✔
2906

1✔
2907
            // this should fail
1✔
2908
            db.add_layer_to_collection(&faux_layer, &top_collection_id)
1✔
2909
                .await
3✔
2910
                .unwrap_err();
1✔
2911

2912
            let root_collection_layers = db
1✔
2913
                .load_layer_collection(
1✔
2914
                    &top_collection_id,
1✔
2915
                    LayerCollectionListOptions {
1✔
2916
                        offset: 0,
1✔
2917
                        limit: 20,
1✔
2918
                    },
1✔
2919
                )
1✔
2920
                .await
8✔
2921
                .unwrap();
1✔
2922

1✔
2923
            assert_eq!(
1✔
2924
                root_collection_layers,
1✔
2925
                LayerCollection {
1✔
2926
                    id: ProviderLayerCollectionId {
1✔
2927
                        provider_id: DataProviderId(
1✔
2928
                            "ce5e84db-cbf9-48a2-9a32-d4b7cc56ea74".try_into().unwrap()
1✔
2929
                        ),
1✔
2930
                        collection_id: top_collection_id.clone(),
1✔
2931
                    },
1✔
2932
                    name: "top collection".to_string(),
1✔
2933
                    description: "description".to_string(),
1✔
2934
                    items: vec![],
1✔
2935
                    entry_label: None,
1✔
2936
                    properties: vec![],
1✔
2937
                }
1✔
2938
            );
1✔
2939
        })
1✔
2940
        .await;
10✔
2941
    }
2942

2943
    #[allow(clippy::too_many_lines)]
2944
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
2945
    async fn it_restricts_layer_permissions() {
1✔
2946
        with_temp_context(|app_ctx, _| async move {
1✔
2947
            let admin_session = admin_login(&app_ctx).await;
7✔
2948
            let session1 = app_ctx.create_anonymous_session().await.unwrap();
13✔
2949

1✔
2950
            let admin_db = app_ctx.session_context(admin_session.clone()).db();
1✔
2951
            let db1 = app_ctx.session_context(session1.clone()).db();
1✔
2952

2953
            let root = admin_db.get_root_layer_collection_id().await.unwrap();
1✔
2954

2955
            // add new collection as admin
2956
            let new_collection_id = admin_db
1✔
2957
                .add_layer_collection(
1✔
2958
                    AddLayerCollection {
1✔
2959
                        name: "admin collection".to_string(),
1✔
2960
                        description: String::new(),
1✔
2961
                        properties: Default::default(),
1✔
2962
                    },
1✔
2963
                    &root,
1✔
2964
                )
1✔
2965
                .await
20✔
2966
                .unwrap();
1✔
2967

2968
            // load as regular user, not visible
2969
            let collection = db1
1✔
2970
                .load_layer_collection(
1✔
2971
                    &root,
1✔
2972
                    LayerCollectionListOptions {
1✔
2973
                        offset: 0,
1✔
2974
                        limit: 10,
1✔
2975
                    },
1✔
2976
                )
1✔
2977
                .await
8✔
2978
                .unwrap();
1✔
2979
            assert!(!collection.items.iter().any(|c| match c {
1✔
2980
                CollectionItem::Collection(c) => c.id.collection_id == new_collection_id,
1✔
2981
                CollectionItem::Layer(_) => false,
×
2982
            }));
1✔
2983

2984
            // give user read permission
2985
            admin_db
1✔
2986
                .add_permission(
1✔
2987
                    session1.user.id.into(),
1✔
2988
                    new_collection_id.clone(),
1✔
2989
                    Permission::Read,
1✔
2990
                )
1✔
2991
                .await
6✔
2992
                .unwrap();
1✔
2993

2994
            // now visible
2995
            let collection = db1
1✔
2996
                .load_layer_collection(
1✔
2997
                    &root,
1✔
2998
                    LayerCollectionListOptions {
1✔
2999
                        offset: 0,
1✔
3000
                        limit: 10,
1✔
3001
                    },
1✔
3002
                )
1✔
3003
                .await
8✔
3004
                .unwrap();
1✔
3005

1✔
3006
            assert!(collection.items.iter().any(|c| match c {
1✔
3007
                CollectionItem::Collection(c) => c.id.collection_id == new_collection_id,
1✔
3008
                CollectionItem::Layer(_) => false,
×
3009
            }));
1✔
3010

3011
            // add new layer in the collection as user, fails because only read permission
3012
            let result = db1
1✔
3013
                .add_layer_collection(
1✔
3014
                    AddLayerCollection {
1✔
3015
                        name: "user layer".to_string(),
1✔
3016
                        description: String::new(),
1✔
3017
                        properties: Default::default(),
1✔
3018
                    },
1✔
3019
                    &new_collection_id,
1✔
3020
                )
1✔
3021
                .await;
3✔
3022

3023
            assert!(result.is_err());
1✔
3024

3025
            // give user owner permission
3026
            admin_db
1✔
3027
                .add_permission(
1✔
3028
                    session1.user.id.into(),
1✔
3029
                    new_collection_id.clone(),
1✔
3030
                    Permission::Owner,
1✔
3031
                )
1✔
3032
                .await
6✔
3033
                .unwrap();
1✔
3034

1✔
3035
            // add now works
1✔
3036
            db1.add_layer_collection(
1✔
3037
                AddLayerCollection {
1✔
3038
                    name: "user layer".to_string(),
1✔
3039
                    description: String::new(),
1✔
3040
                    properties: Default::default(),
1✔
3041
                },
1✔
3042
                &new_collection_id,
1✔
3043
            )
1✔
3044
            .await
12✔
3045
            .unwrap();
1✔
3046

1✔
3047
            // remove permissions again
1✔
3048
            admin_db
1✔
3049
                .remove_permission(
1✔
3050
                    session1.user.id.into(),
1✔
3051
                    new_collection_id.clone(),
1✔
3052
                    Permission::Read,
1✔
3053
                )
1✔
3054
                .await
6✔
3055
                .unwrap();
1✔
3056
            admin_db
1✔
3057
                .remove_permission(
1✔
3058
                    session1.user.id.into(),
1✔
3059
                    new_collection_id.clone(),
1✔
3060
                    Permission::Owner,
1✔
3061
                )
1✔
3062
                .await
6✔
3063
                .unwrap();
1✔
3064

3065
            // access is gone now
3066
            let result = db1
1✔
3067
                .add_layer_collection(
1✔
3068
                    AddLayerCollection {
1✔
3069
                        name: "user layer".to_string(),
1✔
3070
                        description: String::new(),
1✔
3071
                        properties: Default::default(),
1✔
3072
                    },
1✔
3073
                    &root,
1✔
3074
                )
1✔
3075
                .await;
3✔
3076

3077
            assert!(result.is_err());
1✔
3078

3079
            let collection = db1
1✔
3080
                .load_layer_collection(
1✔
3081
                    &root,
1✔
3082
                    LayerCollectionListOptions {
1✔
3083
                        offset: 0,
1✔
3084
                        limit: 10,
1✔
3085
                    },
1✔
3086
                )
1✔
3087
                .await
8✔
3088
                .unwrap();
1✔
3089

1✔
3090
            assert!(!collection.items.iter().any(|c| match c {
1✔
3091
                CollectionItem::Collection(c) => c.id.collection_id == new_collection_id,
1✔
3092
                CollectionItem::Layer(_) => false,
×
3093
            }));
1✔
3094
        })
1✔
3095
        .await;
12✔
3096
    }
3097

3098
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
3099
    async fn it_handles_user_roles() {
1✔
3100
        with_temp_context(|app_ctx, _| async move {
1✔
3101
            let admin_session = admin_login(&app_ctx).await;
7✔
3102
            let user_id = app_ctx
1✔
3103
                .register_user(UserRegistration {
1✔
3104
                    email: "foo@example.com".to_string(),
1✔
3105
                    password: "secret123".to_string(),
1✔
3106
                    real_name: "Foo Bar".to_string(),
1✔
3107
                })
1✔
3108
                .await
11✔
3109
                .unwrap();
1✔
3110

1✔
3111
            let admin_db = app_ctx.session_context(admin_session.clone()).db();
1✔
3112

3113
            // create a new role
3114
            let role_id = admin_db.add_role("foo").await.unwrap();
3✔
3115

3116
            let user_session = app_ctx
1✔
3117
                .login(UserCredentials {
1✔
3118
                    email: "foo@example.com".to_string(),
1✔
3119
                    password: "secret123".to_string(),
1✔
3120
                })
1✔
3121
                .await
7✔
3122
                .unwrap();
1✔
3123

1✔
3124
            // user does not have the role yet
1✔
3125

1✔
3126
            assert!(!user_session.roles.contains(&role_id));
1✔
3127

3128
            // we assign the role to the user
3129
            admin_db.assign_role(&role_id, &user_id).await.unwrap();
3✔
3130

3131
            let user_session = app_ctx
1✔
3132
                .login(UserCredentials {
1✔
3133
                    email: "foo@example.com".to_string(),
1✔
3134
                    password: "secret123".to_string(),
1✔
3135
                })
1✔
3136
                .await
7✔
3137
                .unwrap();
1✔
3138

1✔
3139
            // should be present now
1✔
3140
            assert!(user_session.roles.contains(&role_id));
1✔
3141

3142
            // we revoke it
3143
            admin_db.revoke_role(&role_id, &user_id).await.unwrap();
3✔
3144

3145
            let user_session = app_ctx
1✔
3146
                .login(UserCredentials {
1✔
3147
                    email: "foo@example.com".to_string(),
1✔
3148
                    password: "secret123".to_string(),
1✔
3149
                })
1✔
3150
                .await
7✔
3151
                .unwrap();
1✔
3152

1✔
3153
            // the role is gone now
1✔
3154
            assert!(!user_session.roles.contains(&role_id));
1✔
3155

3156
            // assign it again and then delete the whole role, should not be present at user
3157

3158
            admin_db.assign_role(&role_id, &user_id).await.unwrap();
3✔
3159

1✔
3160
            admin_db.remove_role(&role_id).await.unwrap();
3✔
3161

3162
            let user_session = app_ctx
1✔
3163
                .login(UserCredentials {
1✔
3164
                    email: "foo@example.com".to_string(),
1✔
3165
                    password: "secret123".to_string(),
1✔
3166
                })
1✔
3167
                .await
7✔
3168
                .unwrap();
1✔
3169

1✔
3170
            assert!(!user_session.roles.contains(&role_id));
1✔
3171
        })
1✔
3172
        .await;
11✔
3173
    }
3174

3175
    #[allow(clippy::too_many_lines)]
3176
    #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
1✔
3177
    async fn it_updates_project_layer_symbology() {
1✔
3178
        with_temp_context(|app_ctx, _| async move {
1✔
3179
            let session = app_ctx.create_anonymous_session().await.unwrap();
13✔
3180

3181
            let (_, workflow_id) = register_ndvi_workflow_helper(&app_ctx).await;
27✔
3182

3183
            let db = app_ctx.session_context(session.clone()).db();
1✔
3184

1✔
3185
            let create_project: CreateProject = serde_json::from_value(json!({
1✔
3186
                "name": "Default",
1✔
3187
                "description": "Default project",
1✔
3188
                "bounds": {
1✔
3189
                    "boundingBox": {
1✔
3190
                        "lowerLeftCoordinate": {
1✔
3191
                            "x": -180,
1✔
3192
                            "y": -90
1✔
3193
                        },
1✔
3194
                        "upperRightCoordinate": {
1✔
3195
                            "x": 180,
1✔
3196
                            "y": 90
1✔
3197
                        }
1✔
3198
                    },
1✔
3199
                    "spatialReference": "EPSG:4326",
1✔
3200
                    "timeInterval": {
1✔
3201
                        "start": 1_396_353_600_000i64,
1✔
3202
                        "end": 1_396_353_600_000i64
1✔
3203
                    }
1✔
3204
                },
1✔
3205
                "timeStep": {
1✔
3206
                    "step": 1,
1✔
3207
                    "granularity": "months"
1✔
3208
                }
1✔
3209
            }))
1✔
3210
            .unwrap();
1✔
3211

3212
            let project_id = db.create_project(create_project).await.unwrap();
26✔
3213

1✔
3214
            let update: UpdateProject = serde_json::from_value(json!({
1✔
3215
                "id": project_id.to_string(),
1✔
3216
                "layers": [{
1✔
3217
                    "name": "NDVI",
1✔
3218
                    "workflow": workflow_id.to_string(),
1✔
3219
                    "visibility": {
1✔
3220
                        "data": true,
1✔
3221
                        "legend": false
1✔
3222
                    },
1✔
3223
                    "symbology": {
1✔
3224
                        "type": "raster",
1✔
3225
                        "opacity": 1,
1✔
3226
                        "colorizer": {
1✔
3227
                            "type": "linearGradient",
1✔
3228
                            "breakpoints": [{
1✔
3229
                                "value": 1,
1✔
3230
                                "color": [0, 0, 0, 255]
1✔
3231
                            }, {
1✔
3232
                                "value": 255,
1✔
3233
                                "color": [255, 255, 255, 255]
1✔
3234
                            }],
1✔
3235
                            "noDataColor": [0, 0, 0, 0],
1✔
3236
                            "overColor": [255, 255, 255, 127],
1✔
3237
                            "underColor": [255, 255, 255, 127]
1✔
3238
                        }
1✔
3239
                    }
1✔
3240
                }]
1✔
3241
            }))
1✔
3242
            .unwrap();
1✔
3243

1✔
3244
            db.update_project(update).await.unwrap();
51✔
3245

1✔
3246
            let update: UpdateProject = serde_json::from_value(json!({
1✔
3247
                "id": project_id.to_string(),
1✔
3248
                "layers": [{
1✔
3249
                    "name": "NDVI",
1✔
3250
                    "workflow": workflow_id.to_string(),
1✔
3251
                    "visibility": {
1✔
3252
                        "data": true,
1✔
3253
                        "legend": false
1✔
3254
                    },
1✔
3255
                    "symbology": {
1✔
3256
                        "type": "raster",
1✔
3257
                        "opacity": 1,
1✔
3258
                        "colorizer": {
1✔
3259
                            "type": "linearGradient",
1✔
3260
                            "breakpoints": [{
1✔
3261
                                "value": 1,
1✔
3262
                                "color": [0, 0, 4, 255]
1✔
3263
                            }, {
1✔
3264
                                "value": 17.866_666_666_666_667,
1✔
3265
                                "color": [11, 9, 36, 255]
1✔
3266
                            }, {
1✔
3267
                                "value": 34.733_333_333_333_334,
1✔
3268
                                "color": [32, 17, 75, 255]
1✔
3269
                            }, {
1✔
3270
                                "value": 51.6,
1✔
3271
                                "color": [59, 15, 112, 255]
1✔
3272
                            }, {
1✔
3273
                                "value": 68.466_666_666_666_67,
1✔
3274
                                "color": [87, 21, 126, 255]
1✔
3275
                            }, {
1✔
3276
                                "value": 85.333_333_333_333_33,
1✔
3277
                                "color": [114, 31, 129, 255]
1✔
3278
                            }, {
1✔
3279
                                "value": 102.199_999_999_999_99,
1✔
3280
                                "color": [140, 41, 129, 255]
1✔
3281
                            }, {
1✔
3282
                                "value": 119.066_666_666_666_65,
1✔
3283
                                "color": [168, 50, 125, 255]
1✔
3284
                            }, {
1✔
3285
                                "value": 135.933_333_333_333_34,
1✔
3286
                                "color": [196, 60, 117, 255]
1✔
3287
                            }, {
1✔
3288
                                "value": 152.799_999_999_999_98,
1✔
3289
                                "color": [222, 73, 104, 255]
1✔
3290
                            }, {
1✔
3291
                                "value": 169.666_666_666_666_66,
1✔
3292
                                "color": [241, 96, 93, 255]
1✔
3293
                            }, {
1✔
3294
                                "value": 186.533_333_333_333_33,
1✔
3295
                                "color": [250, 127, 94, 255]
1✔
3296
                            }, {
1✔
3297
                                "value": 203.399_999_999_999_98,
1✔
3298
                                "color": [254, 159, 109, 255]
1✔
3299
                            }, {
1✔
3300
                                "value": 220.266_666_666_666_65,
1✔
3301
                                "color": [254, 191, 132, 255]
1✔
3302
                            }, {
1✔
3303
                                "value": 237.133_333_333_333_3,
1✔
3304
                                "color": [253, 222, 160, 255]
1✔
3305
                            }, {
1✔
3306
                                "value": 254,
1✔
3307
                                "color": [252, 253, 191, 255]
1✔
3308
                            }],
1✔
3309
                            "noDataColor": [0, 0, 0, 0],
1✔
3310
                            "overColor": [255, 255, 255, 127],
1✔
3311
                            "underColor": [255, 255, 255, 127]
1✔
3312
                        }
1✔
3313
                    }
1✔
3314
                }]
1✔
3315
            }))
1✔
3316
            .unwrap();
1✔
3317

1✔
3318
            db.update_project(update).await.unwrap();
39✔
3319

1✔
3320
            let update: UpdateProject = serde_json::from_value(json!({
1✔
3321
                "id": project_id.to_string(),
1✔
3322
                "layers": [{
1✔
3323
                    "name": "NDVI",
1✔
3324
                    "workflow": workflow_id.to_string(),
1✔
3325
                    "visibility": {
1✔
3326
                        "data": true,
1✔
3327
                        "legend": false
1✔
3328
                    },
1✔
3329
                    "symbology": {
1✔
3330
                        "type": "raster",
1✔
3331
                        "opacity": 1,
1✔
3332
                        "colorizer": {
1✔
3333
                            "type": "linearGradient",
1✔
3334
                            "breakpoints": [{
1✔
3335
                                "value": 1,
1✔
3336
                                "color": [0, 0, 4, 255]
1✔
3337
                            }, {
1✔
3338
                                "value": 17.866_666_666_666_667,
1✔
3339
                                "color": [11, 9, 36, 255]
1✔
3340
                            }, {
1✔
3341
                                "value": 34.733_333_333_333_334,
1✔
3342
                                "color": [32, 17, 75, 255]
1✔
3343
                            }, {
1✔
3344
                                "value": 51.6,
1✔
3345
                                "color": [59, 15, 112, 255]
1✔
3346
                            }, {
1✔
3347
                                "value": 68.466_666_666_666_67,
1✔
3348
                                "color": [87, 21, 126, 255]
1✔
3349
                            }, {
1✔
3350
                                "value": 85.333_333_333_333_33,
1✔
3351
                                "color": [114, 31, 129, 255]
1✔
3352
                            }, {
1✔
3353
                                "value": 102.199_999_999_999_99,
1✔
3354
                                "color": [140, 41, 129, 255]
1✔
3355
                            }, {
1✔
3356
                                "value": 119.066_666_666_666_65,
1✔
3357
                                "color": [168, 50, 125, 255]
1✔
3358
                            }, {
1✔
3359
                                "value": 135.933_333_333_333_34,
1✔
3360
                                "color": [196, 60, 117, 255]
1✔
3361
                            }, {
1✔
3362
                                "value": 152.799_999_999_999_98,
1✔
3363
                                "color": [222, 73, 104, 255]
1✔
3364
                            }, {
1✔
3365
                                "value": 169.666_666_666_666_66,
1✔
3366
                                "color": [241, 96, 93, 255]
1✔
3367
                            }, {
1✔
3368
                                "value": 186.533_333_333_333_33,
1✔
3369
                                "color": [250, 127, 94, 255]
1✔
3370
                            }, {
1✔
3371
                                "value": 203.399_999_999_999_98,
1✔
3372
                                "color": [254, 159, 109, 255]
1✔
3373
                            }, {
1✔
3374
                                "value": 220.266_666_666_666_65,
1✔
3375
                                "color": [254, 191, 132, 255]
1✔
3376
                            }, {
1✔
3377
                                "value": 237.133_333_333_333_3,
1✔
3378
                                "color": [253, 222, 160, 255]
1✔
3379
                            }, {
1✔
3380
                                "value": 254,
1✔
3381
                                "color": [252, 253, 191, 255]
1✔
3382
                            }],
1✔
3383
                            "noDataColor": [0, 0, 0, 0],
1✔
3384
                            "overColor": [255, 255, 255, 127],
1✔
3385
                            "underColor": [255, 255, 255, 127]
1✔
3386
                        }
1✔
3387
                    }
1✔
3388
                }]
1✔
3389
            }))
1✔
3390
            .unwrap();
1✔
3391

1✔
3392
            db.update_project(update).await.unwrap();
20✔
3393

1✔
3394
            let update: UpdateProject = serde_json::from_value(json!({
1✔
3395
                "id": project_id.to_string(),
1✔
3396
                "layers": [{
1✔
3397
                    "name": "NDVI",
1✔
3398
                    "workflow": workflow_id.to_string(),
1✔
3399
                    "visibility": {
1✔
3400
                        "data": true,
1✔
3401
                        "legend": false
1✔
3402
                    },
1✔
3403
                    "symbology": {
1✔
3404
                        "type": "raster",
1✔
3405
                        "opacity": 1,
1✔
3406
                        "colorizer": {
1✔
3407
                            "type": "linearGradient",
1✔
3408
                            "breakpoints": [{
1✔
3409
                                "value": 1,
1✔
3410
                                "color": [0, 0, 4, 255]
1✔
3411
                            }, {
1✔
3412
                                "value": 17.933_333_333_333_334,
1✔
3413
                                "color": [11, 9, 36, 255]
1✔
3414
                            }, {
1✔
3415
                                "value": 34.866_666_666_666_67,
1✔
3416
                                "color": [32, 17, 75, 255]
1✔
3417
                            }, {
1✔
3418
                                "value": 51.800_000_000_000_004,
1✔
3419
                                "color": [59, 15, 112, 255]
1✔
3420
                            }, {
1✔
3421
                                "value": 68.733_333_333_333_33,
1✔
3422
                                "color": [87, 21, 126, 255]
1✔
3423
                            }, {
1✔
3424
                                "value": 85.666_666_666_666_66,
1✔
3425
                                "color": [114, 31, 129, 255]
1✔
3426
                            }, {
1✔
3427
                                "value": 102.6,
1✔
3428
                                "color": [140, 41, 129, 255]
1✔
3429
                            }, {
1✔
3430
                                "value": 119.533_333_333_333_32,
1✔
3431
                                "color": [168, 50, 125, 255]
1✔
3432
                            }, {
1✔
3433
                                "value": 136.466_666_666_666_67,
1✔
3434
                                "color": [196, 60, 117, 255]
1✔
3435
                            }, {
1✔
3436
                                "value": 153.4,
1✔
3437
                                "color": [222, 73, 104, 255]
1✔
3438
                            }, {
1✔
3439
                                "value": 170.333_333_333_333_31,
1✔
3440
                                "color": [241, 96, 93, 255]
1✔
3441
                            }, {
1✔
3442
                                "value": 187.266_666_666_666_65,
1✔
3443
                                "color": [250, 127, 94, 255]
1✔
3444
                            }, {
1✔
3445
                                "value": 204.2,
1✔
3446
                                "color": [254, 159, 109, 255]
1✔
3447
                            }, {
1✔
3448
                                "value": 221.133_333_333_333_33,
1✔
3449
                                "color": [254, 191, 132, 255]
1✔
3450
                            }, {
1✔
3451
                                "value": 238.066_666_666_666_63,
1✔
3452
                                "color": [253, 222, 160, 255]
1✔
3453
                            }, {
1✔
3454
                                "value": 255,
1✔
3455
                                "color": [252, 253, 191, 255]
1✔
3456
                            }],
1✔
3457
                            "noDataColor": [0, 0, 0, 0],
1✔
3458
                            "overColor": [255, 255, 255, 127],
1✔
3459
                            "underColor": [255, 255, 255, 127]
1✔
3460
                        }
1✔
3461
                    }
1✔
3462
                }]
1✔
3463
            }))
1✔
3464
            .unwrap();
1✔
3465

1✔
3466
            let update = update;
1✔
3467

3468
            // run two updates concurrently
3469
            let (r0, r1) = join!(db.update_project(update.clone()), db.update_project(update));
1✔
3470

3471
            assert!(r0.is_ok());
1✔
3472
            assert!(r1.is_ok());
1✔
3473
        })
1✔
3474
        .await;
11✔
3475
    }
3476
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc