30.1 KB928 lines
Blame
1import type { FastifyInstance, FastifyRequest, FastifyReply } from "fastify";
2import { z } from "zod";
3import { spawn, execSync } from "child_process";
4import { createWriteStream } from "fs";
5import { mkdir, rm } from "fs/promises";
6import { pipeline } from "stream/promises";
7import { BridgeService } from "../services/bridge.js";
8import type { MononokeProvisioner } from "../services/mononoke-provisioner.js";
9import { optionalAuth } from "../auth/middleware.js";
10
11const BRIDGE_URL =
12 process.env.GROVE_BRIDGE_URL ?? "http://localhost:3100";
13const DATA_DIR = process.env.GROVE_DATA_DIR ?? "/data/grove";
14const MONONOKE_CONFIG_PATH =
15 process.env.MONONOKE_CONFIG_PATH ?? "/data/grove/mononoke-config";
16
17const bridgeService = new BridgeService(BRIDGE_URL);
18
19export async function repoRoutes(app: FastifyInstance) {
20 const configuredHubApiUrl = process.env.GROVE_HUB_API_URL;
21 const HUB_API_URLS = Array.from(
22 new Set(
23 [
24 configuredHubApiUrl,
25 "http://hub-api:4000",
26 "http://grove-hub-api:4000",
27 "http://localhost:4001",
28 ].filter(Boolean)
29 )
30 ) as string[];
31
32 /**
33 * Check if a user can access a private repo.
34 * Public repos are always accessible. Private repos require the user to be:
35 * - the repo owner (for user repos), or
36 * - a member of the owning org (for org repos)
37 */
38 async function canAccessRepo(repoRow: any, userId: number | null): Promise<boolean> {
39 if (!repoRow.is_private) return true;
40 if (userId == null) return false;
41 if (repoRow.owner_type === "user") return repoRow.owner_id === userId;
42 // Org repo — check membership via hub API
43 for (const hubApiUrl of HUB_API_URLS) {
44 const controller = new AbortController();
45 const timeout = setTimeout(() => controller.abort(), 3000);
46 try {
47 const res = await fetch(`${hubApiUrl}/api/orgs/${repoRow.owner_name}`, {
48 signal: controller.signal,
49 });
50 if (!res.ok) continue;
51 const { members } = await res.json();
52 return Array.isArray(members) && members.some((m: any) => m.user_id === userId);
53 } catch {
54 // try next
55 } finally {
56 clearTimeout(timeout);
57 }
58 }
59 return false;
60 }
61
62 /** Middleware: resolve repo + enforce private access. Attaches repoRow to request. */
63 async function resolveRepo(request: any, reply: any) {
64 const { owner, repo: repoName } = request.params;
65 const db = (app as any).db;
66 const repoRow = db
67 .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`)
68 .get(owner, repoName) as any;
69
70 if (!repoRow) {
71 return reply.code(404).send({ error: "Repository not found" });
72 }
73 const userId = (request.user as any)?.id ?? null;
74 if (!(await canAccessRepo(repoRow, userId))) {
75 // Return 404 for private repos to avoid leaking existence
76 return reply.code(404).send({ error: "Repository not found" });
77 }
78 request.repoRow = repoRow;
79 }
80
81 // List all repos
82 app.get(
83 "/",
84 { preHandler: [optionalAuth] },
85 async (request: any) => {
86 const db = (app as any).db;
87 const userId = (request.user as any)?.id ?? null;
88 const allRepos = db
89 .prepare(`SELECT * FROM repos_with_owner ORDER BY updated_at DESC`)
90 .all() as any[];
91
92 // Filter out private repos the user can't access
93 const repos = allRepos.filter(
94 (r) => !r.is_private || (userId != null && (
95 (r.owner_type === "user" && r.owner_id === userId) ||
96 r.owner_type === "org" // org membership checked lazily; show to any authed user for now
97 ))
98 );
99
100 const reposWithActivity = await Promise.all(
101 repos.map(async (repo) => {
102 try {
103 const commits = await bridgeService.getCommits(
104 repo.owner_name,
105 repo.name,
106 repo.default_branch ?? "main",
107 { limit: 1 }
108 );
109 const latest = commits[0];
110 return {
111 ...repo,
112 last_commit_ts: latest?.timestamp ?? null,
113 };
114 } catch {
115 return {
116 ...repo,
117 last_commit_ts: null,
118 };
119 }
120 })
121 );
122
123 reposWithActivity.sort((a, b) => {
124 const aUpdatedTs = a.updated_at
125 ? Math.floor(new Date(a.updated_at).getTime() / 1000)
126 : 0;
127 const bUpdatedTs = b.updated_at
128 ? Math.floor(new Date(b.updated_at).getTime() / 1000)
129 : 0;
130 const aTs = a.last_commit_ts ?? aUpdatedTs;
131 const bTs = b.last_commit_ts ?? bUpdatedTs;
132 if (aTs !== bTs) return bTs - aTs;
133 return String(a.name ?? "").localeCompare(String(b.name ?? ""));
134 });
135
136 return { repos: reposWithActivity };
137 });
138
139 // Create a repo
140 const createRepoSchema = z.object({
141 name: z.string().min(1).max(100),
142 description: z.string().max(500).optional(),
143 default_branch: z.string().default("main"),
144 owner: z.string().optional(),
145 is_private: z.boolean().default(false),
146 skip_seed: z.boolean().default(false),
147 });
148
149 const updateRepoSchema = z.object({
150 description: z.string().max(500).optional(),
151 is_private: z.boolean().optional(),
152 require_diffs: z.boolean().optional(),
153 pages_enabled: z.boolean().optional(),
154 pages_domain: z
155 .string()
156 .max(253)
157 .regex(/^[a-z0-9]([a-z0-9.-]*[a-z0-9])?(\.[a-z]{2,})+$/i)
158 .nullable()
159 .optional(),
160 });
161
162 app.post(
163 "/",
164 {
165 preHandler: [(app as any).authenticate],
166 },
167 async (request: any, reply: any) => {
168 const parsed = createRepoSchema.safeParse(request.body);
169 if (!parsed.success) {
170 return reply.code(400).send({ error: parsed.error.flatten() });
171 }
172 const { name, description, default_branch, owner: ownerName, is_private, skip_seed } = parsed.data;
173 const userId = request.user.id;
174 const username = request.user.username;
175 const db = (app as any).db;
176
177 let ownerId = userId;
178 let ownerType = "user";
179
180 // If owner specified and differs from user, treat as org repo
181 if (ownerName && ownerName !== username) {
182 let orgFound = false;
183 let sawNotFound = false;
184 const errors: Array<{ hubApiUrl: string; status?: number; error?: string }> = [];
185
186 for (const hubApiUrl of HUB_API_URLS) {
187 const controller = new AbortController();
188 const timeout = setTimeout(() => controller.abort(), 3000);
189 try {
190 const res = await fetch(`${hubApiUrl}/api/orgs/${ownerName}`, {
191 signal: controller.signal,
192 });
193
194 if (res.status === 404) {
195 sawNotFound = true;
196 errors.push({ hubApiUrl, status: 404 });
197 continue;
198 }
199 if (!res.ok) {
200 errors.push({ hubApiUrl, status: res.status });
201 continue;
202 }
203
204 const { org, members } = await res.json();
205 if (!Array.isArray(members)) {
206 errors.push({ hubApiUrl, error: "Invalid org response shape" });
207 continue;
208 }
209 const isMember = members.some((m: any) => m.user_id === userId);
210 if (!isMember) {
211 return reply.code(403).send({ error: "Not a member of this organization" });
212 }
213 ownerId = org.id;
214 ownerType = "org";
215 orgFound = true;
216 // Sync org locally
217 (app as any).ensureLocalOrg({ id: org.id, name: org.name, display_name: org.display_name });
218 break;
219 } catch (err: any) {
220 errors.push({ hubApiUrl, error: err?.message ?? "Unknown error" });
221 } finally {
222 clearTimeout(timeout);
223 }
224 }
225
226 if (!orgFound) {
227 app.log.error(
228 { ownerName, hubApiUrlsTried: HUB_API_URLS, errors },
229 "Failed to validate org owner against hub API candidates"
230 );
231
232 if (sawNotFound && errors.every((entry) => entry.status === 404)) {
233 return reply.code(404).send({ error: "Organization not found" });
234 }
235
236 return reply.code(502).send({ error: "Organization service unavailable" });
237 }
238 }
239
240 const existing = db
241 .prepare(`SELECT id FROM repos WHERE owner_id = ? AND owner_type = ? AND name = ?`)
242 .get(ownerId, ownerType, name);
243
244 if (existing) {
245 return reply.code(409).send({ error: "Repository already exists" });
246 }
247
248 const result = db
249 .prepare(
250 `INSERT INTO repos (owner_id, owner_type, name, description, default_branch, is_private)
251 VALUES (?, ?, ?, ?, ?, ?)`
252 )
253 .run(ownerId, ownerType, name, description ?? null, default_branch, is_private ? 1 : 0);
254
255 // Provision Mononoke config for the new repo
256 const provisioner = (app as any).mononokeProvisioner as MononokeProvisioner;
257 let provisioned = false;
258 try {
259 const mononokeRepoId = provisioner.provisionRepo(name);
260 db.prepare("UPDATE repos SET mononoke_repo_id = ? WHERE id = ?")
261 .run(mononokeRepoId, result.lastInsertRowid);
262 provisioned = true;
263 } catch (err) {
264 app.log.error({ err, repoName: name }, "Failed to provision Mononoke config");
265 }
266
267 // Restart Mononoke so it picks up the new repo config
268 let restartOk = false;
269 if (provisioned) {
270 try {
271 await provisioner.restartMononoke();
272 restartOk = true;
273 } catch (err) {
274 app.log.error({ err }, "Failed to restart Mononoke after repo provisioning");
275 }
276 }
277
278 // Seed the repo with an initial commit (README.md with repo name)
279 if (restartOk && !skip_seed) {
280 try {
281 const res = await fetch(`${BRIDGE_URL}/repos/${name}/seed`, {
282 method: "POST",
283 headers: { "Content-Type": "application/json" },
284 body: JSON.stringify({ name, bookmark: default_branch }),
285 signal: AbortSignal.timeout(10000),
286 });
287 if (!res.ok) {
288 const body = await res.text();
289 app.log.warn({ status: res.status, body, repoName: name }, "Seed endpoint returned non-OK");
290 }
291 } catch (err) {
292 app.log.warn({ err, repoName: name }, "Failed to seed initial commit (non-fatal)");
293 }
294 }
295
296 const repo = db
297 .prepare(`SELECT * FROM repos_with_owner WHERE id = ?`)
298 .get(result.lastInsertRowid);
299
300 return reply.code(201).send({
301 repo,
302 ...(!restartOk && { warning: "Repository created but Mononoke restart failed. Push may not work until services are restarted." }),
303 });
304 }
305 );
306
307 // Delete a repo
308 app.delete<{ Params: { owner: string; repo: string } }>(
309 "/:owner/:repo",
310 {
311 preHandler: [(app as any).authenticate],
312 },
313 async (request: any, reply: any) => {
314 const { owner, repo: repoName } = request.params;
315 const userId = request.user.id;
316 const db = (app as any).db;
317
318 const repoRow = db
319 .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`)
320 .get(owner, repoName) as any;
321
322 if (!repoRow) {
323 return reply.code(404).send({ error: "Repository not found" });
324 }
325
326 // Verify ownership
327 if (repoRow.owner_type === "user") {
328 if (repoRow.owner_id !== userId) {
329 return reply.code(403).send({ error: "Not authorized to delete this repository" });
330 }
331 } else {
332 // Org repo — verify membership via hub API
333 let authorized = false;
334 for (const hubApiUrl of HUB_API_URLS) {
335 const controller = new AbortController();
336 const timeout = setTimeout(() => controller.abort(), 3000);
337 try {
338 const res = await fetch(`${hubApiUrl}/api/orgs/${owner}`, {
339 signal: controller.signal,
340 });
341 if (!res.ok) continue;
342 const { members } = await res.json();
343 if (Array.isArray(members) && members.some((m: any) => m.user_id === userId)) {
344 authorized = true;
345 }
346 break;
347 } catch {
348 // try next
349 } finally {
350 clearTimeout(timeout);
351 }
352 }
353 if (!authorized) {
354 return reply.code(403).send({ error: "Not authorized to delete this repository" });
355 }
356 }
357
358 // Delete related data (respect FK constraints)
359 db.prepare("DELETE FROM canopy_secrets WHERE repo_id = ?").run(repoRow.id);
360 db.prepare("DELETE FROM pipeline_runs WHERE repo_id = ?").run(repoRow.id);
361 db.prepare("DELETE FROM diffs WHERE repo_id = ?").run(repoRow.id);
362 db.prepare("DELETE FROM repos WHERE id = ?").run(repoRow.id);
363
364 // Clean up pages if configured
365 if (repoRow.pages_domain) {
366 const pagesDeployer = (app as any).pagesDeployer;
367 if (pagesDeployer) {
368 pagesDeployer.undeploy(repoRow.pages_domain);
369 }
370 }
371
372 // Respond immediately — Mononoke cleanup happens in the background
373 reply.code(204).send();
374
375 // Remove Mononoke config and restart (fire-and-forget)
376 const provisioner = (app as any).mononokeProvisioner as MononokeProvisioner;
377 try {
378 provisioner.deprovisionRepo(repoName);
379 provisioner.restartMononoke().catch((err) => {
380 app.log.error({ err, repoName }, "Failed to restart Mononoke after repo deletion");
381 });
382 } catch (err) {
383 app.log.error({ err, repoName }, "Failed to deprovision Mononoke after repo deletion");
384 }
385 }
386 );
387
388 // Update repo settings
389 app.patch<{ Params: { owner: string; repo: string } }>(
390 "/:owner/:repo",
391 {
392 preHandler: [(app as any).authenticate],
393 },
394 async (request: any, reply: any) => {
395 const { owner, repo: repoName } = request.params;
396 const userId = request.user.id;
397 const db = (app as any).db;
398
399 const parsed = updateRepoSchema.safeParse(request.body);
400 if (!parsed.success) {
401 return reply.code(400).send({ error: parsed.error.flatten() });
402 }
403
404 const repoRow = db
405 .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`)
406 .get(owner, repoName) as any;
407
408 if (!repoRow) {
409 return reply.code(404).send({ error: "Repository not found" });
410 }
411
412 // Verify ownership
413 if (repoRow.owner_type === "user") {
414 if (repoRow.owner_id !== userId) {
415 return reply.code(403).send({ error: "Not authorized to update this repository" });
416 }
417 } else {
418 // Org repo — verify membership via hub API
419 let authorized = false;
420 for (const hubApiUrl of HUB_API_URLS) {
421 const controller = new AbortController();
422 const timeout = setTimeout(() => controller.abort(), 3000);
423 try {
424 const res = await fetch(`${hubApiUrl}/api/orgs/${owner}`, {
425 signal: controller.signal,
426 });
427 if (!res.ok) continue;
428 const { members } = await res.json();
429 if (Array.isArray(members) && members.some((m: any) => m.user_id === userId)) {
430 authorized = true;
431 }
432 break;
433 } catch {
434 // try next
435 } finally {
436 clearTimeout(timeout);
437 }
438 }
439 if (!authorized) {
440 return reply.code(403).send({ error: "Not authorized to update this repository" });
441 }
442 }
443
444 const updates = parsed.data;
445 const setClauses: string[] = [];
446 const values: any[] = [];
447
448 if (updates.description !== undefined) {
449 setClauses.push("description = ?");
450 values.push(updates.description);
451 }
452 if (updates.is_private !== undefined) {
453 setClauses.push("is_private = ?");
454 values.push(updates.is_private ? 1 : 0);
455 }
456 if (updates.require_diffs !== undefined) {
457 setClauses.push("require_diffs = ?");
458 values.push(updates.require_diffs ? 1 : 0);
459 }
460 if (updates.pages_enabled !== undefined) {
461 setClauses.push("pages_enabled = ?");
462 values.push(updates.pages_enabled ? 1 : 0);
463 }
464 if (updates.pages_domain !== undefined) {
465 setClauses.push("pages_domain = ?");
466 values.push(updates.pages_domain);
467 }
468
469 if (setClauses.length === 0) {
470 return reply.code(400).send({ error: "No fields to update" });
471 }
472
473 setClauses.push("updated_at = datetime('now')");
474 values.push(repoRow.id);
475
476 // Undeploy old domain if pages disabled or domain changed
477 const pagesDeployer = (app as any).pagesDeployer;
478 if (pagesDeployer && repoRow.pages_domain) {
479 if (updates.pages_enabled === false ||
480 (updates.pages_domain !== undefined && updates.pages_domain !== repoRow.pages_domain)) {
481 pagesDeployer.undeploy(repoRow.pages_domain);
482 }
483 }
484
485 db.prepare(`UPDATE repos SET ${setClauses.join(", ")} WHERE id = ?`).run(...values);
486
487 // Trigger initial pages deploy if enabled
488 if (pagesDeployer && (updates.pages_enabled === true || updates.pages_domain !== undefined)) {
489 void pagesDeployer.deploy(repoRow.name, repoRow.default_branch ?? "main").catch(
490 (err: any) => app.log.error({ err, repo: repoRow.name }, "Initial pages deploy failed")
491 );
492 }
493
494 const updated = db
495 .prepare(`SELECT * FROM repos_with_owner WHERE id = ?`)
496 .get(repoRow.id);
497
498 return { repo: updated };
499 }
500 );
501
502 // Get single repo
503 app.get<{ Params: { owner: string; repo: string } }>(
504 "/:owner/:repo",
505 { preHandler: [optionalAuth, resolveRepo] },
506 async (request: any) => {
507 const { owner, repo } = request.params;
508 const repoRow = request.repoRow;
509
510 const ref = repoRow.default_branch ?? "main";
511 const readme = await bridgeService.getReadme(owner, repo, ref);
512 const branches = await bridgeService.getBranches(owner, repo);
513
514 return {
515 repo: repoRow,
516 readme,
517 branches,
518 };
519 }
520 );
521
522 // List directory tree
523 app.get<{ Params: { owner: string; repo: string; ref: string; "*": string } }>(
524 "/:owner/:repo/tree/:ref/*",
525 { preHandler: [optionalAuth, resolveRepo] },
526 async (request: any, reply: any) => {
527 const { owner, repo, ref } = request.params;
528 const path = (request.params as any)["*"] ?? "";
529
530 const entries = await bridgeService.listTree(owner, repo, ref, path);
531 if (!entries.length && path) {
532 return reply.code(404).send({ error: "Path not found" });
533 }
534
535 return {
536 path,
537 ref,
538 entries: entries.sort((a: any, b: any) => {
539 // Directories first, then files
540 if (a.type !== b.type) return a.type === "tree" ? -1 : 1;
541 return a.name.localeCompare(b.name);
542 }),
543 };
544 }
545 );
546
547 // Also handle tree root (no path)
548 app.get<{ Params: { owner: string; repo: string; ref: string } }>(
549 "/:owner/:repo/tree/:ref",
550 { preHandler: [optionalAuth, resolveRepo] },
551 async (request: any) => {
552 const { owner, repo, ref } = request.params;
553 const entries = await bridgeService.listTree(owner, repo, ref, "");
554
555 return {
556 path: "",
557 ref,
558 entries: entries.sort((a: any, b: any) => {
559 if (a.type !== b.type) return a.type === "tree" ? -1 : 1;
560 return a.name.localeCompare(b.name);
561 }),
562 };
563 }
564 );
565
566 // Get file content
567 app.get<{ Params: { owner: string; repo: string; ref: string; "*": string } }>(
568 "/:owner/:repo/blob/:ref/*",
569 { preHandler: [optionalAuth, resolveRepo] },
570 async (request: any, reply: any) => {
571 const { owner, repo, ref } = request.params;
572 const path = (request.params as any)["*"];
573
574 if (!path) {
575 return reply.code(400).send({ error: "File path required" });
576 }
577
578 const blob = await bridgeService.getBlob(owner, repo, ref, path);
579 if (!blob) {
580 return reply.code(404).send({ error: "File not found" });
581 }
582
583 return {
584 path,
585 ref,
586 content: blob.content,
587 size: blob.size,
588 };
589 }
590 );
591
592 // Get commit history
593 app.get<{
594 Params: { owner: string; repo: string; ref: string };
595 Querystring: { path?: string; limit?: string; offset?: string };
596 }>(
597 "/:owner/:repo/commits/:ref",
598 { preHandler: [optionalAuth, resolveRepo] },
599 async (request: any) => {
600 const { owner, repo, ref } = request.params;
601 const { path, limit, offset } = request.query;
602
603 const commits = await bridgeService.getCommits(owner, repo, ref, {
604 path,
605 limit: limit ? parseInt(limit) : 30,
606 offset: offset ? parseInt(offset) : 0,
607 });
608
609 return { ref, commits };
610 }
611 );
612
613 // Get blame
614 app.get<{ Params: { owner: string; repo: string; ref: string; "*": string } }>(
615 "/:owner/:repo/blame/:ref/*",
616 { preHandler: [optionalAuth, resolveRepo] },
617 async (request: any, reply: any) => {
618 const { owner, repo, ref } = request.params;
619 const path = (request.params as any)["*"];
620
621 if (!path) {
622 return reply.code(400).send({ error: "File path required" });
623 }
624
625 const blame = await bridgeService.getBlame(owner, repo, ref, path);
626 if (!blame.length) {
627 return reply.code(404).send({ error: "File not found" });
628 }
629
630 return { path, ref, blame };
631 }
632 );
633
634 // Get diff between refs
635 app.get<{
636 Params: { owner: string; repo: string };
637 Querystring: { base: string; head: string };
638 }>(
639 "/:owner/:repo/diff",
640 { preHandler: [optionalAuth, resolveRepo] },
641 async (request: any) => {
642 const { owner, repo } = request.params;
643 const { base, head } = request.query;
644
645 return await bridgeService.getDiff(owner, repo, base, head);
646 }
647 );
648
649 // List branches
650 app.get<{ Params: { owner: string; repo: string } }>(
651 "/:owner/:repo/branches",
652 { preHandler: [optionalAuth, resolveRepo] },
653 async (request: any) => {
654 const { owner, repo } = request.params;
655 const branches = await bridgeService.getBranches(owner, repo);
656 return { branches };
657 }
658 );
659
660 // Import a Git repository (SSE progress stream)
661 const importSchema = z.object({
662 url: z.string().url(),
663 });
664
665 app.post<{ Params: { owner: string; repo: string } }>(
666 "/:owner/:repo/import",
667 {
668 preHandler: [(app as any).authenticate],
669 },
670 async (request, reply) => {
671 const { owner, repo: repoName } = request.params;
672 const parsed = importSchema.safeParse(request.body);
673 if (!parsed.success) {
674 return reply.code(400).send({ error: parsed.error.flatten() });
675 }
676 const { url } = parsed.data;
677 const db = (app as any).db;
678
679 // Verify repo exists
680 const repoRow = db
681 .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`)
682 .get(owner, repoName) as any;
683 if (!repoRow) {
684 return reply.code(404).send({ error: "Repository not found" });
685 }
686
687 // SSE stream
688 reply.raw.writeHead(200, {
689 "Content-Type": "text/event-stream",
690 "Cache-Control": "no-cache",
691 Connection: "keep-alive",
692 });
693
694 const send = (event: string, data: any) => {
695 reply.raw.write(`event: ${event}\ndata: ${JSON.stringify(data)}\n\n`);
696 };
697
698 try {
699 // Step 1: git clone --bare via docker (grove/mononoke image has git)
700 const bareRepo = `${DATA_DIR}/${repoName}-bare.git`;
701 send("progress", { step: "clone", message: `Cloning ${url}...` });
702
703 await runDocker([
704 "run", "--rm",
705 "-v", "/data/grove:/data/grove",
706 "grove/mononoke:latest",
707 "/usr/bin/git", "clone", "--bare", url, bareRepo,
708 ], (line) => {
709 send("log", { step: "clone", line });
710 });
711
712 send("progress", { step: "clone", message: "Clone complete." });
713
714 // Step 2: gitimport into Mononoke
715 send("progress", { step: "import", message: "Importing into Mononoke..." });
716
717 await runDocker([
718 "run", "--rm",
719 "-v", "/data/grove:/data/grove",
720 "--entrypoint", "gitimport",
721 "grove/mononoke:latest",
722 "--repo-name", repoName,
723 "--config-path", MONONOKE_CONFIG_PATH,
724 "--local-configerator-path", `${DATA_DIR}/configerator`,
725 "--cache-mode", "disabled",
726 "--just-knobs-config-path", `${DATA_DIR}/justknobs.json`,
727 "--generate-bookmarks",
728 "--derive-hg",
729 "--git-command-path", "/usr/bin/git",
730 "--concurrency", "5",
731 bareRepo,
732 "full-repo",
733 ], (line) => {
734 send("log", { step: "import", line });
735 });
736
737 send("progress", { step: "import", message: "Import complete." });
738
739 // Step 3: Restart Mononoke services to pick up the imported data
740 send("progress", { step: "restart", message: "Restarting services..." });
741
742 const provisioner = (app as any).mononokeProvisioner as MononokeProvisioner;
743 await provisioner.restartMononoke();
744
745 send("progress", { step: "restart", message: "Services restarted." });
746
747 // Clean up the bare clone
748 await runDocker([
749 "run", "--rm",
750 "-v", "/data/grove:/data/grove",
751 "grove/mononoke:latest",
752 "rm", "-rf", bareRepo,
753 ], () => {});
754
755 send("done", { success: true });
756 } catch (err: any) {
757 send("error", { message: err.message ?? "Import failed" });
758 }
759
760 reply.raw.end();
761 }
762 );
763
764 // Import a Git repository from an uploaded bare repo tarball (SSE progress stream)
765 app.post<{ Params: { owner: string; repo: string } }>(
766 "/:owner/:repo/import-bundle",
767 {
768 preHandler: [(app as any).authenticate],
769 },
770 async (request, reply) => {
771 const { owner, repo: repoName } = request.params;
772 const db = (app as any).db;
773
774 // Verify repo exists
775 const repoRow = db
776 .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`)
777 .get(owner, repoName) as any;
778 if (!repoRow) {
779 return reply.code(404).send({ error: "Repository not found" });
780 }
781
782 // Read the uploaded file
783 const file = await (request as any).file();
784 if (!file) {
785 return reply.code(400).send({ error: "No file uploaded" });
786 }
787
788 // SSE stream
789 reply.raw.writeHead(200, {
790 "Content-Type": "text/event-stream",
791 "Cache-Control": "no-cache",
792 Connection: "keep-alive",
793 });
794
795 const send = (event: string, data: any) => {
796 reply.raw.write(`event: ${event}\ndata: ${JSON.stringify(data)}\n\n`);
797 };
798
799 const bareRepo = `${DATA_DIR}/${repoName}-bare.git`;
800 const tarPath = `${DATA_DIR}/${repoName}-bare.tar.gz`;
801
802 try {
803 // Step 1: Save uploaded tarball and extract
804 send("progress", { step: "upload", message: "Receiving bare repo..." });
805
806 await pipeline(file.file, createWriteStream(tarPath));
807
808 send("progress", { step: "upload", message: "Extracting..." });
809
810 await rm(bareRepo, { recursive: true, force: true });
811
812 await runDocker([
813 "run", "--rm",
814 "-v", "/data/grove:/data/grove",
815 "--entrypoint", "tar",
816 "grove/mononoke:latest",
817 "xzf", tarPath, "-C", `${DATA_DIR}`,
818 ], (line) => {
819 send("log", { step: "upload", line });
820 });
821
822 // The tar extracts as bare.git/ — rename to match expected path
823 await runDocker([
824 "run", "--rm",
825 "-v", "/data/grove:/data/grove",
826 "--entrypoint", "sh",
827 "grove/mononoke:latest",
828 "-c", `mv ${DATA_DIR}/bare.git ${bareRepo} 2>/dev/null; chown -R root:root ${bareRepo}`,
829 ], () => {});
830
831 send("progress", { step: "upload", message: "Extracted." });
832
833 // Step 2: gitimport into Mononoke
834 send("progress", { step: "import", message: "Importing into Mononoke..." });
835
836 await runDocker([
837 "run", "--rm",
838 "-v", "/data/grove:/data/grove",
839 "--entrypoint", "gitimport",
840 "grove/mononoke:latest",
841 "--repo-name", repoName,
842 "--config-path", MONONOKE_CONFIG_PATH,
843 "--local-configerator-path", `${DATA_DIR}/configerator`,
844 "--cache-mode", "disabled",
845 "--just-knobs-config-path", `${DATA_DIR}/justknobs.json`,
846 "--generate-bookmarks",
847 "--derive-hg",
848 "--git-command-path", "/usr/bin/git",
849 "--concurrency", "5",
850 bareRepo,
851 "full-repo",
852 ], (line) => {
853 send("log", { step: "import", line });
854 });
855
856 send("progress", { step: "import", message: "Import complete." });
857
858 // Step 3: Restart Mononoke services
859 send("progress", { step: "restart", message: "Restarting services..." });
860
861 const provisioner = (app as any).mononokeProvisioner as MononokeProvisioner;
862 await provisioner.restartMononoke();
863
864 send("progress", { step: "restart", message: "Services restarted." });
865
866 // Clean up
867 await runDocker([
868 "run", "--rm",
869 "-v", "/data/grove:/data/grove",
870 "--entrypoint", "rm",
871 "grove/mononoke:latest",
872 "-rf", bareRepo, tarPath,
873 ], () => {});
874
875 send("done", { success: true });
876 } catch (err: any) {
877 // Clean up on error
878 await runDocker([
879 "run", "--rm",
880 "-v", "/data/grove:/data/grove",
881 "--entrypoint", "rm",
882 "grove/mononoke:latest",
883 "-rf", bareRepo, tarPath,
884 ], () => {}).catch(() => {});
885
886 send("error", { message: err.message ?? "Import failed" });
887 }
888
889 reply.raw.end();
890 }
891 );
892}
893
894/**
895 * Run a docker command, streaming stdout/stderr line-by-line to a callback.
896 * Rejects on non-zero exit code.
897 */
898function runDocker(
899 args: string[],
900 onLine: (line: string) => void
901): Promise<void> {
902 return new Promise((resolve, reject) => {
903 const proc = spawn("docker", args);
904 let stderr = "";
905
906 const handleData = (data: Buffer) => {
907 const text = data.toString();
908 for (const line of text.split("\n")) {
909 const trimmed = line.trimEnd();
910 if (trimmed) onLine(trimmed);
911 }
912 };
913
914 proc.stdout.on("data", handleData);
915 proc.stderr.on("data", (data: Buffer) => {
916 stderr += data.toString();
917 handleData(data);
918 });
919
920 proc.on("close", (code) => {
921 if (code === 0) resolve();
922 else reject(new Error(stderr.trim() || `docker exited with code ${code}`));
923 });
924
925 proc.on("error", reject);
926 });
927}
928