• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

run-llama / llama_deploy / 15752385390

19 Jun 2025 07:41AM UTC coverage: 86.985% (+3.7%) from 83.249%
15752385390

push

github

web-flow
refact: Remove Control Plane and Message Queues (#544)

11 of 13 new or added lines in 7 files covered. (84.62%)

20 existing lines in 2 files now uncovered.

1330 of 1529 relevant lines covered (86.98%)

1.74 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.65
/llama_deploy/cli/init.py
1
import shutil
2✔
2
import subprocess
2✔
3
import tempfile
2✔
4
from pathlib import Path
2✔
5
from typing import Any, Dict, Optional, Type
2✔
6

7
import click
2✔
8
import yaml
2✔
9
from pydantic import BaseModel
2✔
10

11
# Import pydantic models
12
from llama_deploy.apiserver.deployment_config_parser import (
2✔
13
    DeploymentConfig,
14
    Service,
15
    ServiceSource,
16
    SourceType,
17
    UIService,
18
)
19

20

21
@click.command()
2✔
22
@click.option(
2✔
23
    "--name",
24
    type=str,
25
    default=None,
26
    help="Name of the project to create",
27
)
28
@click.option(
2✔
29
    "--destination",
30
    type=str,
31
    default=None,
32
    help="Directory where the project will be created",
33
)
34
@click.option(
2✔
35
    "--template",
36
    type=click.Choice(["basic", "none"]),  # For future: add more templates
37
    default=None,
38
    help="Template to use for the workflow (currently only 'basic' is supported)",
39
)
40
def init(
2✔
41
    name: Optional[str] = None,
42
    destination: Optional[str] = None,
43
    template: Optional[str] = None,
44
) -> None:
45
    """Bootstrap a new llama-deploy project with a basic workflow and configuration."""
46

47
    # Interactive walkthrough - get inputs if not provided via CLI
48
    if name is None:
2✔
49
        name = click.prompt(
×
50
            "Project name",
51
            default="llama-deploy-app",
52
            show_default=True,
53
            type=str,
54
        )
NEW
55
        assert name
×
56

57
    if destination is None:
2✔
58
        destination = click.prompt(
×
59
            "Destination directory",
60
            default=".",
61
            type=str,
62
            show_default=True,
63
        )
NEW
64
        assert destination
×
65

66
    if template is None:
2✔
67
        click.echo("\nWorkflow template:")
×
68
        click.echo("  basic     - Basic workflow with OpenAI integration (recommended)")
×
69
        click.echo(
×
70
            "  none      - Do not create any sample workflow code (you just want a deployment.yml)"
71
        )
72

73
        template = click.prompt(
×
74
            "\nSelect workflow template",
75
            default="basic",
76
            type=click.Choice(["basic", "none"]),
77
            show_default=True,
78
        )
79

80
    # Additional configuration options
81
    if template != "none":
2✔
82
        use_ui = click.confirm(
2✔
83
            "\nWould you like to bundle a sample next.js UI with your deployment?",
84
            default=True,
85
        )
86
    else:
87
        use_ui = False
2✔
88

89
    # Create project directory
90
    project_dir = Path(destination) / name
2✔
91
    if project_dir.exists():
2✔
92
        if not click.confirm(f"\nDirectory {project_dir} already exists. Continue?"):
×
93
            raise click.Abort()
×
94
    else:
95
        project_dir.mkdir(parents=True, exist_ok=True)
2✔
96
        click.echo(f"Created project directory: {project_dir}")
2✔
97

98
    # Create deployment.yml using pydantic models
99
    deployment_config = create_deployment_config(name, use_ui)
2✔
100
    deployment_path = project_dir / "deployment.yml"
2✔
101

102
    # Exclude several fields that would only confuse users
103
    deployment_dict = deployment_config.model_dump(
2✔
104
        mode="json",
105
        exclude_none=False,
106
        by_alias=True,
107
        exclude={  # type: ignore
108
            "control_plane": ["running", "internal_host", "internal_port"],
109
            "services": {"__all__": ["host", "port", "ts_dependencies"]},
110
            "ui": ["host", "port", "python_dependencies"],
111
        },
112
    )
113
    write_yaml_with_comments(deployment_path, deployment_dict, deployment_config)
2✔
114
    click.echo(f"Created deployment config: {deployment_path}")
2✔
115

116
    # Download the template from github
117
    if template != "none":
2✔
118
        repo_url = "https://github.com/run-llama/llama_deploy.git"
2✔
119
        template_path = f"templates/{template}"
2✔
120

121
        # Create a temporary directory for cloning
122
        with tempfile.TemporaryDirectory() as temp_dir:
2✔
123
            # Clone only the specific directory (with depth=1 for efficiency)
124
            click.echo("Cloning template files from repository...")
2✔
125

126
            try:
2✔
127
                # Clone the repository
128
                subprocess.run(
2✔
129
                    [
130
                        "git",
131
                        "clone",
132
                        "--depth=1",
133
                        "--filter=blob:none",
134
                        "--sparse",
135
                        repo_url,
136
                        temp_dir,
137
                    ],
138
                    check=True,
139
                )
140

141
                # Set up sparse checkout from the temp directory
142
                subprocess.run(
2✔
143
                    ["git", "sparse-checkout", "set", template_path],
144
                    check=True,
145
                    cwd=temp_dir,
146
                )
147

148
                # Copy template files to the project
149
                template_dir = Path(temp_dir) / template_path
2✔
150
                if not template_dir.exists():
2✔
151
                    raise FileNotFoundError(
×
152
                        f"Template directory not found: {template_dir}"
153
                    )
154

155
                # Copy contents to src directory (using absolute paths)
156
                for item in template_dir.glob("*"):
2✔
157
                    if "deployment.yml" in item.name:
2✔
158
                        # We don't want to copy the template deployment.yml file
159
                        # We generate our own deployment.yml file
160
                        continue
×
161

162
                    if item.is_dir():
2✔
163
                        shutil.copytree(
2✔
164
                            item, project_dir / item.name, dirs_exist_ok=True
165
                        )
166
                    else:
167
                        shutil.copy2(item, project_dir)
×
168

169
                click.echo(f"Template files copied successfully to {project_dir}")
2✔
170

171
            except subprocess.CalledProcessError as e:
×
172
                click.echo(f"Error downloading template: {e}")
×
173
                click.echo(f"Output: {e.stdout.decode() if e.stdout else ''}")
×
174
                click.echo(f"Error: {e.stderr.decode() if e.stderr else ''}")
×
175
            except Exception as e:
×
176
                click.echo(f"Error setting up template: {e}")
×
177

178
    # Delete the UI folder if the user doesn't want it
179
    if not use_ui:
2✔
180
        ui_dir = project_dir / "ui"
2✔
181
        if ui_dir.exists():
2✔
182
            shutil.rmtree(ui_dir)
×
183

184
    # Final instructions
185
    click.echo(f"\nProject {name} created successfully!")
2✔
186
    click.echo("Next steps:")
2✔
187
    click.echo(f"  1. cd {name}")
2✔
188
    click.echo(
2✔
189
        "  2. Edit the deployment.yml file to add your OpenAI API key and set other parameters"
190
    )
191
    click.echo("  3. Start the API server:")
2✔
192
    click.echo("     python -m llama_deploy.apiserver")
2✔
193
    click.echo(
2✔
194
        "     (or with Docker: docker run -p 4501:4501 -v .:/opt/app -w /opt/app llamaindex/llama-deploy)"
195
    )
196
    click.echo("  4. In another terminal, deploy your workflow:")
2✔
197
    click.echo("     llamactl deploy deployment.yml")
2✔
198
    click.echo("  5. Test your workflow:")
2✔
199
    click.echo(f"     llamactl run --deployment {name} --arg message 'Hello!'")
2✔
200

201
    if use_ui:
2✔
202
        click.echo("\nTo use the UI component:")
2✔
203
        click.echo(f"  • Open your browser to: http://localhost:4501/ui/{name}/")
2✔
204
        click.echo("  • The UI will be served automatically by the API server")
2✔
205

206

207
def write_yaml_with_comments(
2✔
208
    file_path: Path, config: Dict[str, Any], model: DeploymentConfig
209
) -> None:
210
    """Write YAML with comments based on pydantic model schemas and field descriptions."""
211

212
    def get_field_description(
2✔
213
        model_class: Type[BaseModel], field_name: str
214
    ) -> Optional[str]:
215
        """Get the description for a field from the model's JSON schema."""
216
        if not hasattr(model_class, "model_json_schema"):
2✔
217
            return None
×
218

219
        schema = model_class.model_json_schema()
2✔
220
        properties = schema.get("properties", {})
2✔
221
        field_schema = properties.get(field_name, {})
2✔
222
        return field_schema.get("description")
2✔
223

224
    def add_comments_to_yaml(
2✔
225
        yaml_content: str,
226
        model_class: Type[BaseModel],
227
        current_config: Dict[str, Any],
228
        indent_level: int = 0,
229
    ) -> str:
230
        """Recursively add inline comments to YAML content based on model schemas."""
231
        lines = yaml_content.split("\n")
2✔
232
        commented_lines = []
2✔
233
        i = 0
2✔
234

235
        while i < len(lines):
2✔
236
            line = lines[i]
2✔
237

238
            # Skip empty lines and comments
239
            if not line.strip() or line.strip().startswith("#"):
2✔
240
                commented_lines.append(line)
2✔
241
                i += 1
2✔
242
                continue
2✔
243

244
            # Check if this line defines a field
245
            if ":" in line and not line.strip().startswith("-"):
2✔
246
                # Extract field name (convert YAML dashes to underscores for pydantic)
247
                yaml_field = line.split(":")[0].strip()
2✔
248
                pydantic_field = yaml_field.replace("-", "_")
2✔
249

250
                # Get description for this field
251
                description = get_field_description(model_class, pydantic_field)
2✔
252

253
                if description:
2✔
254
                    # Add inline comment to the field
UNCOV
255
                    if line.rstrip().endswith(":"):
×
256
                        # Field has no value on same line (nested object)
257
                        commented_line = line.rstrip() + f"  # {description}"
×
258
                    else:
259
                        # Field has value on same line
UNCOV
260
                        commented_line = line.rstrip() + f"  # {description}"
×
UNCOV
261
                    commented_lines.append(commented_line)
×
262
                else:
263
                    # No description, add line as-is
264
                    commented_lines.append(line)
2✔
265

266
                # Handle nested objects
267
                if pydantic_field in current_config and isinstance(
2✔
268
                    current_config[pydantic_field], dict
269
                ):
270
                    # Try to find the nested model class
271
                    if (
2✔
272
                        hasattr(model_class, "model_fields")
273
                        and pydantic_field in model_class.model_fields
274
                    ):
275
                        field_info = model_class.model_fields[pydantic_field]
2✔
276
                        nested_model_class = getattr(field_info, "annotation", None)
2✔
277

278
                        # Look ahead to capture the nested YAML content
279
                        nested_lines = []
2✔
280
                        j = i + 1
2✔
281
                        base_indent = len(line) - len(line.lstrip())
2✔
282

283
                        while j < len(lines):
2✔
284
                            next_line = lines[j]
2✔
285
                            if (
2✔
286
                                next_line.strip()
287
                                and len(next_line) - len(next_line.lstrip())
288
                                > base_indent
289
                            ):
290
                                nested_lines.append(next_line)
2✔
291
                                j += 1
2✔
292
                            elif (
2✔
293
                                next_line.strip()
294
                            ):  # Non-empty line at same or lower indent level
295
                                break
2✔
296
                            else:
297
                                nested_lines.append(next_line)
2✔
298
                                j += 1
2✔
299

300
                        # Process nested content if we have a model class
301
                        if nested_model_class and hasattr(
2✔
302
                            nested_model_class, "model_json_schema"
303
                        ):
UNCOV
304
                            nested_yaml = "\n".join(nested_lines)
×
UNCOV
305
                            processed_nested = add_comments_to_yaml(
×
306
                                nested_yaml,
307
                                nested_model_class,
308
                                current_config[pydantic_field],
309
                                indent_level + 1,
310
                            )
UNCOV
311
                            commented_lines.extend(
×
312
                                processed_nested.split("\n")[:-1]
313
                            )  # Remove last empty line
UNCOV
314
                            i = j - 1
×
315
                        else:
316
                            # Add nested lines without processing
317
                            commented_lines.extend(nested_lines)
2✔
318
                            i = j - 1
2✔
319
            else:
320
                commented_lines.append(line)
×
321

322
            i += 1
2✔
323

324
        return "\n".join(commented_lines)
2✔
325

326
    # Add header comments
327
    header_comments = [
2✔
328
        "# Deployment configuration for llama-deploy",
329
        "#",
330
        "# This file defines your deployment setup including:",
331
        "# - Control plane configuration",
332
        "# - Message queue settings",
333
        "# - Services (workflows and UI components)",
334
        "#",
335
        "# For more information, see: https://github.com/run-llama/llama-deploy",
336
        "",
337
    ]
338

339
    # Convert dictionary to YAML
340
    yaml_content = yaml.safe_dump(config, sort_keys=False)
2✔
341

342
    # Add field-specific comments based on schema
343
    commented_yaml = add_comments_to_yaml(yaml_content, type(model), config)
2✔
344

345
    # Add section comments
346
    section_comments = {
2✔
347
        "default_service:": [
348
            "# The default service to use when no service is specified",
349
        ],
350
        "services:": [
351
            "# Service definitions",
352
            "# Each service represents a workflow or component in your system",
353
        ],
354
        "ui:": [
355
            "# UI component configuration",
356
            "# This defines a web interface for your deployment",
357
        ],
358
    }
359

360
    # Insert section comments into YAML content
361
    for section, comments in section_comments.items():
2✔
362
        if section in commented_yaml:
2✔
363
            insertion_point = commented_yaml.find(section)
2✔
364
            if insertion_point > 0:
2✔
365
                commented_yaml = (
2✔
366
                    commented_yaml[:insertion_point]
367
                    + "\n"
368
                    + "\n".join(comments)
369
                    + "\n"
370
                    + commented_yaml[insertion_point:]
371
                )
372

373
    # Write to file with header comments
374
    with open(file_path, "w") as f:
2✔
375
        f.write("\n".join(header_comments) + "\n")
2✔
376
        f.write(commented_yaml)
2✔
377

378

379
def create_deployment_config(name: str, use_ui: bool = False) -> DeploymentConfig:
2✔
380
    """Create a deployment configuration using pydantic models."""
381
    # Create the example service
382
    service = Service(
2✔
383
        name="Example Workflow",
384
        source=ServiceSource(
385
            type=SourceType.local,
386
            location="src",
387
        ),
388
        import_path="src/workflow:workflow",
389
        python_dependencies=["llama-index-core>=0.12.37", "llama-index-llms-openai"],
390
        env={"OPENAI_API_KEY": "<your-openai-api-key-here>"},
391
        env_files=None,
392
        ts_dependencies=None,
393
    )
394

395
    # Create UI service if requested
396
    ui_service = None
2✔
397
    if use_ui:
2✔
398
        ui_service = UIService(
2✔
399
            name="Example UI",
400
            source=ServiceSource(
401
                type=SourceType.local,
402
                location=".",
403
            ),
404
            import_path="ui",
405
            ts_dependencies={},
406
            env_files=None,
407
            python_dependencies=None,
408
            env=None,
409
        )
410

411
    # Create the deployment config
412
    deployment_config = DeploymentConfig(
2✔
413
        name=name,
414
        default_service="example_workflow",
415
        services={"example_workflow": service},
416
        ui=ui_service,
417
    )
418

419
    return deployment_config
2✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc