• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

SwissDataScienceCenter / renku-data-services / 14382014257

10 Apr 2025 01:42PM UTC coverage: 86.576% (+0.2%) from 86.351%
14382014257

Pull #759

github

web-flow
Merge 470ff1568 into 74eb7d965
Pull Request #759: feat: add new service cache and migrations

412 of 486 new or added lines in 15 files covered. (84.77%)

18 existing lines in 6 files now uncovered.

20232 of 23369 relevant lines covered (86.58%)

1.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

20.54
/components/renku_data_services/notebooks/core_sessions.py
1
"""A selection of core functions for AmaltheaSessions."""
2

3
import base64
2✔
4
import json
2✔
5
import os
2✔
6
from collections.abc import AsyncIterator
2✔
7
from pathlib import PurePosixPath
2✔
8
from typing import cast
2✔
9
from urllib.parse import urljoin, urlparse
2✔
10

11
import httpx
2✔
12
from kubernetes.client import V1ObjectMeta, V1Secret
2✔
13
from sanic import Request
2✔
14
from toml import dumps
2✔
15
from yaml import safe_dump
2✔
16

17
from renku_data_services.base_models import APIUser
2✔
18
from renku_data_services.base_models.core import AnonymousAPIUser, AuthenticatedAPIUser
2✔
19
from renku_data_services.crc.db import ResourcePoolRepository
2✔
20
from renku_data_services.crc.models import GpuKind, ResourceClass, ResourcePool
2✔
21
from renku_data_services.data_connectors.models import DataConnectorSecret, DataConnectorWithSecrets
2✔
22
from renku_data_services.errors import errors
2✔
23
from renku_data_services.notebooks import apispec
2✔
24
from renku_data_services.notebooks.api.amalthea_patches import git_proxy, init_containers
2✔
25
from renku_data_services.notebooks.api.classes.image import Image
2✔
26
from renku_data_services.notebooks.api.classes.k8s_client import sanitize_for_serialization
2✔
27
from renku_data_services.notebooks.api.classes.repository import GitProvider, Repository
2✔
28
from renku_data_services.notebooks.api.schemas.cloud_storage import RCloneStorage
2✔
29
from renku_data_services.notebooks.config import NotebooksConfig
2✔
30
from renku_data_services.notebooks.crs import (
2✔
31
    AmaltheaSessionV1Alpha1,
32
    AmaltheaSessionV1Alpha1Patch,
33
    AmaltheaSessionV1Alpha1SpecPatch,
34
    AmaltheaSessionV1Alpha1SpecSessionPatch,
35
    Culling,
36
    DataSource,
37
    ExtraContainer,
38
    ExtraVolume,
39
    ExtraVolumeMount,
40
    ImagePullSecret,
41
    InitContainer,
42
    Resources,
43
    SecretAsVolume,
44
    SecretAsVolumeItem,
45
    State,
46
)
47
from renku_data_services.notebooks.models import ExtraSecret
2✔
48
from renku_data_services.notebooks.utils import (
2✔
49
    node_affinity_from_resource_class,
50
    tolerations_from_resource_class,
51
)
52
from renku_data_services.project.db import ProjectRepository
2✔
53
from renku_data_services.project.models import Project, SessionSecret
2✔
54
from renku_data_services.users.db import UserRepo
2✔
55
from renku_data_services.utils.cryptography import get_encryption_key
2✔
56

57

58
async def get_extra_init_containers(
2✔
59
    nb_config: NotebooksConfig,
60
    user: AnonymousAPIUser | AuthenticatedAPIUser,
61
    repositories: list[Repository],
62
    git_providers: list[GitProvider],
63
    storage_mount: PurePosixPath,
64
    work_dir: PurePosixPath,
65
    uid: int = 1000,
66
    gid: int = 1000,
67
) -> tuple[list[InitContainer], list[ExtraVolume]]:
68
    """Get all extra init containers that should be added to an amalthea session."""
69
    cert_init, cert_vols = init_containers.certificates_container(nb_config)
×
NEW
70
    session_init_containers = [InitContainer.model_validate(sanitize_for_serialization(cert_init))]
×
NEW
71
    extra_volumes = [ExtraVolume.model_validate(sanitize_for_serialization(volume)) for volume in cert_vols]
×
UNCOV
72
    git_clone = await init_containers.git_clone_container_v2(
×
73
        user=user,
74
        config=nb_config,
75
        repositories=repositories,
76
        git_providers=git_providers,
77
        workspace_mount_path=storage_mount,
78
        work_dir=work_dir,
79
        uid=uid,
80
        gid=gid,
81
    )
82
    if git_clone is not None:
×
83
        session_init_containers.append(InitContainer.model_validate(git_clone))
×
84
    return session_init_containers, extra_volumes
×
85

86

87
async def get_extra_containers(
2✔
88
    nb_config: NotebooksConfig,
89
    user: AnonymousAPIUser | AuthenticatedAPIUser,
90
    repositories: list[Repository],
91
    git_providers: list[GitProvider],
92
) -> list[ExtraContainer]:
93
    """Get the extra containers added to amalthea sessions."""
94
    conts: list[ExtraContainer] = []
×
95
    git_proxy_container = await git_proxy.main_container(
×
96
        user=user, config=nb_config, repositories=repositories, git_providers=git_providers
97
    )
98
    if git_proxy_container:
×
NEW
99
        conts.append(ExtraContainer.model_validate(sanitize_for_serialization(git_proxy_container)))
×
100
    return conts
×
101

102

103
async def get_auth_secret_authenticated(
2✔
104
    nb_config: NotebooksConfig, user: AuthenticatedAPIUser, server_name: str
105
) -> ExtraSecret:
106
    """Get the extra secrets that need to be added to the session for an authenticated user."""
107
    secret_data = {}
×
108
    base_server_url = nb_config.sessions.ingress.base_url(server_name)
×
109
    base_server_path = nb_config.sessions.ingress.base_path(server_name)
×
110
    base_server_https_url = nb_config.sessions.ingress.base_url(server_name, force_https=True)
×
111
    parsed_proxy_url = urlparse(urljoin(base_server_url + "/", "oauth2"))
×
112
    vol = ExtraVolume(
×
113
        name="renku-authorized-emails",
114
        secret=SecretAsVolume(
115
            secretName=server_name,
116
            items=[SecretAsVolumeItem(key="authorized_emails", path="authorized_emails")],
117
        ),
118
    )
119
    secret_data["auth"] = dumps(
×
120
        {
121
            "provider": "oidc",
122
            "client_id": nb_config.sessions.oidc.client_id,
123
            "oidc_issuer_url": nb_config.sessions.oidc.issuer_url,
124
            "session_cookie_minimal": True,
125
            "skip_provider_button": True,
126
            # NOTE: If the redirect url is not HTTPS then some or identity providers will fail.
127
            "redirect_url": urljoin(base_server_https_url + "/", "oauth2/callback"),
128
            "cookie_path": base_server_path,
129
            "proxy_prefix": parsed_proxy_url.path,
130
            "authenticated_emails_file": "/authorized_emails",
131
            "client_secret": nb_config.sessions.oidc.client_secret,
132
            "cookie_secret": base64.urlsafe_b64encode(os.urandom(32)).decode(),
133
            "insecure_oidc_allow_unverified_email": nb_config.sessions.oidc.allow_unverified_email,
134
        }
135
    )
136
    secret_data["authorized_emails"] = user.email
×
137
    secret = V1Secret(metadata=V1ObjectMeta(name=server_name), string_data=secret_data)
×
138
    vol_mount = ExtraVolumeMount(
×
139
        name="renku-authorized-emails",
140
        mountPath="/authorized_emails",
141
        subPath="authorized_emails",
142
    )
143
    return ExtraSecret(secret, vol, vol_mount)
×
144

145

146
async def get_auth_secret_anonymous(nb_config: NotebooksConfig, server_name: str, request: Request) -> ExtraSecret:
2✔
147
    """Get the extra secrets that need to be added to the session for an anonymous user."""
148
    # NOTE: We extract the session cookie value here in order to avoid creating a cookie.
149
    # The gateway encrypts and signs cookies so the user ID injected in the request headers does not
150
    # match the value of the session cookie.
151
    session_id = cast(str | None, request.cookies.get(nb_config.session_id_cookie_name))
×
152
    if not session_id:
×
153
        raise errors.UnauthorizedError(
×
154
            message=f"You have to have a renku session cookie at {nb_config.session_id_cookie_name} "
155
            "in order to launch an anonymous session."
156
        )
157
    # NOTE: Amalthea looks for the token value first in the cookie and then in the authorization header
158
    secret_data = {}
×
159
    secret_data["auth"] = safe_dump(
×
160
        {
161
            "authproxy": {
162
                "token": session_id,
163
                "cookie_key": nb_config.session_id_cookie_name,
164
                "verbose": True,
165
            }
166
        }
167
    )
168
    secret = V1Secret(metadata=V1ObjectMeta(name=server_name), string_data=secret_data)
×
169
    return ExtraSecret(secret)
×
170

171

172
def get_gitlab_image_pull_secret(
2✔
173
    nb_config: NotebooksConfig, user: AuthenticatedAPIUser, image_pull_secret_name: str, access_token: str
174
) -> ExtraSecret:
175
    """Create a Kubernetes secret for private GitLab registry authentication."""
176

177
    preferred_namespace = nb_config.k8s_client.preferred_namespace
×
178

179
    registry_secret = {
×
180
        "auths": {
181
            nb_config.git.registry: {
182
                "Username": "oauth2",
183
                "Password": access_token,
184
                "Email": user.email,
185
            }
186
        }
187
    }
188
    registry_secret = json.dumps(registry_secret)
×
189

190
    secret_data = {".dockerconfigjson": registry_secret}
×
191
    secret = V1Secret(
×
192
        metadata=V1ObjectMeta(name=image_pull_secret_name, namespace=preferred_namespace),
193
        string_data=secret_data,
194
        type="kubernetes.io/dockerconfigjson",
195
    )
196

197
    return ExtraSecret(secret)
×
198

199

200
async def get_data_sources(
2✔
201
    nb_config: NotebooksConfig,
202
    user: AnonymousAPIUser | AuthenticatedAPIUser,
203
    server_name: str,
204
    data_connectors_stream: AsyncIterator[DataConnectorWithSecrets],
205
    work_dir: PurePosixPath,
206
    cloud_storage_overrides: list[apispec.SessionCloudStoragePost],
207
    user_repo: UserRepo,
208
) -> tuple[list[DataSource], list[ExtraSecret], dict[str, list[DataConnectorSecret]]]:
209
    """Generate cloud storage related resources."""
210
    data_sources: list[DataSource] = []
×
211
    secrets: list[ExtraSecret] = []
×
212
    dcs: dict[str, RCloneStorage] = {}
×
213
    dcs_secrets: dict[str, list[DataConnectorSecret]] = {}
×
214
    async for dc in data_connectors_stream:
×
215
        dcs[str(dc.data_connector.id)] = RCloneStorage(
×
216
            source_path=dc.data_connector.storage.source_path,
217
            mount_folder=dc.data_connector.storage.target_path
218
            if PurePosixPath(dc.data_connector.storage.target_path).is_absolute()
219
            else (work_dir / dc.data_connector.storage.target_path).as_posix(),
220
            configuration=dc.data_connector.storage.configuration,
221
            readonly=dc.data_connector.storage.readonly,
222
            name=dc.data_connector.name,
223
            secrets={str(secret.secret_id): secret.name for secret in dc.secrets},
224
            storage_class=nb_config.cloud_storage.storage_class,
225
        )
226
        if len(dc.secrets) > 0:
×
227
            dcs_secrets[str(dc.data_connector.id)] = dc.secrets
×
228
    if isinstance(user, AuthenticatedAPIUser) and len(dcs_secrets) > 0:
×
229
        secret_key = await user_repo.get_or_create_user_secret_key(user)
×
230
        user_secret_key = get_encryption_key(secret_key.encode(), user.id.encode()).decode("utf-8")
×
231
    # NOTE: Check the cloud storage overrides from the request body and if any match
232
    # then overwrite the projects cloud storages
233
    # NOTE: Cloud storages in the session launch request body that are not from the DB will cause a 404 error
234
    # NOTE: Overriding the configuration when a saved secret is there will cause a 422 error
235
    for csr in cloud_storage_overrides:
×
236
        csr_id = csr.storage_id
×
237
        if csr_id not in dcs:
×
238
            raise errors.MissingResourceError(
×
239
                message=f"You have requested a cloud storage with ID {csr_id} which does not exist "
240
                "or you dont have access to.",
241
                quiet=True,
242
            )
243
        if csr.target_path is not None and not PurePosixPath(csr.target_path).is_absolute():
×
244
            csr.target_path = (work_dir / csr.target_path).as_posix()
×
245
        dcs[csr_id] = dcs[csr_id].with_override(csr)
×
246
    for cs_id, cs in dcs.items():
×
247
        secret_name = f"{server_name}-ds-{cs_id.lower()}"
×
248
        secret_key_needed = len(dcs_secrets.get(cs_id, [])) > 0
×
249
        if secret_key_needed and user_secret_key is None:
×
250
            raise errors.ProgrammingError(
×
251
                message=f"You have saved storage secrets for data connector {cs_id} "
252
                f"associated with your user ID {user.id} but no key to decrypt them, "
253
                "therefore we cannot mount the requested data connector. "
254
                "Please report this to the renku administrators."
255
            )
256
        secret = ExtraSecret(
×
257
            cs.secret(
258
                secret_name,
259
                nb_config.k8s_client.preferred_namespace,
260
                user_secret_key=user_secret_key if secret_key_needed else None,
261
            )
262
        )
263
        secrets.append(secret)
×
264
        data_sources.append(
×
265
            DataSource(
266
                mountPath=cs.mount_folder,
267
                secretRef=secret.ref(),
268
                accessMode="ReadOnlyMany" if cs.readonly else "ReadWriteOnce",
269
            )
270
        )
271
    return data_sources, secrets, dcs_secrets
×
272

273

274
async def request_dc_secret_creation(
2✔
275
    user: AuthenticatedAPIUser | AnonymousAPIUser,
276
    nb_config: NotebooksConfig,
277
    manifest: AmaltheaSessionV1Alpha1,
278
    dc_secrets: dict[str, list[DataConnectorSecret]],
279
) -> None:
280
    """Request the specified data connector secrets to be created by the secret service."""
281
    if isinstance(user, AnonymousAPIUser):
×
282
        return
×
283
    owner_reference = {
×
284
        "apiVersion": manifest.apiVersion,
285
        "kind": manifest.kind,
286
        "name": manifest.metadata.name,
287
        "uid": manifest.metadata.uid,
288
    }
289
    secrets_url = nb_config.user_secrets.secrets_storage_service_url + "/api/secrets/kubernetes"
×
290
    headers = {"Authorization": f"bearer {user.access_token}"}
×
291
    for s_id, secrets in dc_secrets.items():
×
292
        if len(secrets) == 0:
×
293
            continue
×
294
        request_data = {
×
295
            "name": f"{manifest.metadata.name}-ds-{s_id.lower()}-secrets",
296
            "namespace": nb_config.k8s_v2_client.preferred_namespace,
297
            "secret_ids": [str(secret.secret_id) for secret in secrets],
298
            "owner_references": [owner_reference],
299
            "key_mapping": {str(secret.secret_id): secret.name for secret in secrets},
300
        }
301
        async with httpx.AsyncClient(timeout=10) as client:
×
302
            res = await client.post(secrets_url, headers=headers, json=request_data)
×
303
            if res.status_code >= 300 or res.status_code < 200:
×
304
                raise errors.ProgrammingError(
×
305
                    message=f"The secret for data connector with {s_id} could not be "
306
                    f"successfully created, the status code was {res.status_code}."
307
                    "Please contact a Renku administrator.",
308
                    detail=res.text,
309
                )
310

311

312
async def request_session_secret_creation(
2✔
313
    user: AuthenticatedAPIUser | AnonymousAPIUser,
314
    nb_config: NotebooksConfig,
315
    manifest: AmaltheaSessionV1Alpha1,
316
    session_secrets: list[SessionSecret],
317
) -> None:
318
    """Request the specified user session secrets to be created by the secret service."""
319
    if isinstance(user, AnonymousAPIUser):
×
320
        return
×
321
    if not session_secrets:
×
322
        return
×
323
    owner_reference = {
×
324
        "apiVersion": manifest.apiVersion,
325
        "kind": manifest.kind,
326
        "name": manifest.metadata.name,
327
        "uid": manifest.metadata.uid,
328
    }
329
    key_mapping: dict[str, list[str]] = dict()
×
330
    for s in session_secrets:
×
331
        secret_id = str(s.secret_id)
×
332
        if secret_id not in key_mapping:
×
333
            key_mapping[secret_id] = list()
×
334
        key_mapping[secret_id].append(s.secret_slot.filename)
×
335
    request_data = {
×
336
        "name": f"{manifest.metadata.name}-secrets",
337
        "namespace": nb_config.k8s_v2_client.preferred_namespace,
338
        "secret_ids": [str(s.secret_id) for s in session_secrets],
339
        "owner_references": [owner_reference],
340
        "key_mapping": key_mapping,
341
    }
342
    secrets_url = nb_config.user_secrets.secrets_storage_service_url + "/api/secrets/kubernetes"
×
343
    headers = {"Authorization": f"bearer {user.access_token}"}
×
344
    async with httpx.AsyncClient(timeout=10) as client:
×
345
        res = await client.post(secrets_url, headers=headers, json=request_data)
×
346
        if res.status_code >= 300 or res.status_code < 200:
×
347
            raise errors.ProgrammingError(
×
348
                message="The session secrets could not be successfully created, "
349
                f"the status code was {res.status_code}."
350
                "Please contact a Renku administrator.",
351
                detail=res.text,
352
            )
353

354

355
def resources_from_resource_class(resource_class: ResourceClass) -> Resources:
2✔
356
    """Convert the resource class to a k8s resources spec."""
357
    requests: dict[str, str | int] = {
×
358
        "cpu": str(round(resource_class.cpu * 1000)) + "m",
359
        "memory": f"{resource_class.memory}Gi",
360
    }
361
    limits: dict[str, str | int] = {}
×
362
    if resource_class.gpu > 0:
×
363
        gpu_name = GpuKind.NVIDIA.value + "/gpu"
×
364
        requests[gpu_name] = resource_class.gpu
×
365
        # NOTE: GPUs have to be set in limits too since GPUs cannot be overcommited, if
366
        # not on some clusters this will cause the session to fully fail to start.
367
        limits[gpu_name] = resource_class.gpu
×
368
    return Resources(requests=requests, limits=limits if len(limits) > 0 else None)
×
369

370

371
def repositories_from_project(project: Project, git_providers: list[GitProvider]) -> list[Repository]:
2✔
372
    """Get the list of git repositories from a project."""
373
    repositories: list[Repository] = []
×
374
    for repo in project.repositories:
×
375
        found_provider_id: str | None = None
×
376
        for provider in git_providers:
×
377
            if urlparse(provider.url).netloc == urlparse(repo).netloc:
×
378
                found_provider_id = provider.id
×
379
                break
×
380
        repositories.append(Repository(url=repo, provider=found_provider_id))
×
381
    return repositories
×
382

383

384
async def repositories_from_session(
2✔
385
    user: AnonymousAPIUser | AuthenticatedAPIUser,
386
    session: AmaltheaSessionV1Alpha1,
387
    project_repo: ProjectRepository,
388
    git_providers: list[GitProvider],
389
) -> list[Repository]:
390
    """Get the list of git repositories from a session."""
391
    try:
×
392
        project = await project_repo.get_project(user, session.project_id)
×
393
    except errors.MissingResourceError:
×
394
        return []
×
395
    return repositories_from_project(project, git_providers)
×
396

397

398
def get_culling(resource_pool: ResourcePool, nb_config: NotebooksConfig) -> Culling:
2✔
399
    """Create the culling specification for an AmaltheaSession."""
400
    idle_threshold_seconds = resource_pool.idle_threshold or nb_config.sessions.culling.registered.idle_seconds
×
401
    hibernation_threshold_seconds = (
×
402
        resource_pool.hibernation_threshold or nb_config.sessions.culling.registered.hibernated_seconds
403
    )
404
    return Culling(
×
405
        maxAge=f"{nb_config.sessions.culling.registered.max_age_seconds}s",
406
        maxFailedDuration=f"{nb_config.sessions.culling.registered.failed_seconds}s",
407
        maxHibernatedDuration=f"{hibernation_threshold_seconds}s",
408
        maxIdleDuration=f"{idle_threshold_seconds}s",
409
        maxStartingDuration=f"{nb_config.sessions.culling.registered.pending_seconds}s",
410
    )
411

412

413
async def requires_image_pull_secret(nb_config: NotebooksConfig, image: str, internal_gitlab_user: APIUser) -> bool:
2✔
414
    """Determines if an image requires a pull secret based on its visibility and their GitLab access token."""
415

416
    parsed_image = Image.from_path(image)
×
417
    image_repo = parsed_image.repo_api()
×
418

419
    image_exists_publicly = await image_repo.image_exists(parsed_image)
×
420
    if image_exists_publicly:
×
421
        return False
×
422

423
    if parsed_image.hostname == nb_config.git.registry and internal_gitlab_user.access_token:
×
424
        image_repo = image_repo.with_oauth2_token(internal_gitlab_user.access_token)
×
425
        image_exists_privately = await image_repo.image_exists(parsed_image)
×
426
        if image_exists_privately:
×
427
            return True
×
428
    # No pull secret needed if the image is private and the user cannot access it
429
    return False
×
430

431

432
async def patch_session(
2✔
433
    body: apispec.SessionPatchRequest,
434
    session_id: str,
435
    nb_config: NotebooksConfig,
436
    user: AnonymousAPIUser | AuthenticatedAPIUser,
437
    internal_gitlab_user: APIUser,
438
    rp_repo: ResourcePoolRepository,
439
    project_repo: ProjectRepository,
440
) -> AmaltheaSessionV1Alpha1:
441
    """Patch an Amalthea session."""
442
    session = await nb_config.k8s_v2_client.get_server(session_id, user.id)
×
443
    if session is None:
×
444
        raise errors.MissingResourceError(message=f"The session with ID {session_id} does not exist", quiet=True)
×
445
    if session.spec is None:
×
446
        raise errors.ProgrammingError(
×
447
            message=f"The session {session_id} being patched is missing the expected 'spec' field.", quiet=True
448
        )
449

450
    patch = AmaltheaSessionV1Alpha1Patch(spec=AmaltheaSessionV1Alpha1SpecPatch())
×
451
    is_getting_hibernated: bool = False
×
452

453
    # Hibernation
454
    # TODO: Some patching should only be done when the session is in some states to avoid inadvertent restarts
455
    # Refresh tokens for git proxy
456
    if (
×
457
        body.state is not None
458
        and body.state.value.lower() == State.Hibernated.value.lower()
459
        and body.state.value.lower() != session.status.state.value.lower()
460
    ):
461
        # Session is being hibernated
462
        patch.spec.hibernated = True
×
463
        is_getting_hibernated = True
×
464
    elif (
×
465
        body.state is not None
466
        and body.state.value.lower() == State.Running.value.lower()
467
        and session.status.state.value.lower() != body.state.value.lower()
468
    ):
469
        # Session is being resumed
470
        patch.spec.hibernated = False
×
471

472
    # Resource class
473
    if body.resource_class_id is not None:
×
474
        rp = await rp_repo.get_resource_pool_from_class(user, body.resource_class_id)
×
475
        rc = rp.get_resource_class(body.resource_class_id)
×
476
        if not rc:
×
477
            raise errors.MissingResourceError(
×
478
                message=f"The resource class you requested with ID {body.resource_class_id} does not exist",
479
                quiet=True,
480
            )
481
        if not patch.spec.session:
×
482
            patch.spec.session = AmaltheaSessionV1Alpha1SpecSessionPatch()
×
483
        patch.spec.session.resources = resources_from_resource_class(rc)
×
484
        # Tolerations
485
        tolerations = tolerations_from_resource_class(rc, nb_config.sessions.tolerations_model)
×
486
        if tolerations:
×
487
            patch.spec.tolerations = tolerations
×
488
        # Affinities
489
        patch.spec.affinity = node_affinity_from_resource_class(rc, nb_config.sessions.affinity_model)
×
490
        # Priority class (if a quota is being used)
491
        if rc.quota:
×
492
            patch.spec.priorityClassName = rc.quota
×
493
        patch.spec.culling = get_culling(rp, nb_config)
×
494

495
    # If the session is being hibernated we do not need to patch anything else that is
496
    # not specifically called for in the request body, we can refresh things when the user resumes.
497
    if is_getting_hibernated:
×
498
        return await nb_config.k8s_v2_client.patch_server(session_id, user.id, patch.to_rfc7386())
×
499

500
    # Patching the extra containers (includes the git proxy)
501
    git_providers = await nb_config.git_provider_helper.get_providers(user)
×
502
    repositories = await repositories_from_session(user, session, project_repo, git_providers)
×
503
    extra_containers = await get_extra_containers(
×
504
        nb_config,
505
        user,
506
        repositories,
507
        git_providers,
508
    )
509
    if extra_containers:
×
510
        patch.spec.extraContainers = extra_containers
×
511

512
    if isinstance(user, AuthenticatedAPIUser) and internal_gitlab_user.access_token is not None:
×
513
        image = session.spec.session.image
×
514
        server_name = session.metadata.name
×
515
        needs_pull_secret = await requires_image_pull_secret(nb_config, image, internal_gitlab_user)
×
516

517
        if needs_pull_secret:
×
518
            image_pull_secret_name = f"{server_name}-image-secret"
×
519

520
            # Always create a fresh secret to ensure we have the latest token
521
            image_secret = get_gitlab_image_pull_secret(
×
522
                nb_config, user, image_pull_secret_name, internal_gitlab_user.access_token
523
            )
524
            if image_secret:
×
525
                updated_secrets = [
×
526
                    secret
527
                    for secret in (session.spec.imagePullSecrets or [])
528
                    if not secret.name.endswith("-image-secret")
529
                ]
530
                updated_secrets.append(ImagePullSecret(name=image_pull_secret_name, adopt=True))
×
531
                patch.spec.imagePullSecrets = updated_secrets
×
532

533
    patch_serialized = patch.to_rfc7386()
×
534
    if len(patch_serialized) == 0:
×
535
        return session
×
536

537
    return await nb_config.k8s_v2_client.patch_server(session_id, user.id, patch_serialized)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc