| 1 | import type { FastifyInstance, FastifyRequest, FastifyReply } from "fastify"; |
| 2 | import { z } from "zod"; |
| 3 | import { spawn, execSync } from "child_process"; |
| 4 | import { createWriteStream } from "fs"; |
| 5 | import { mkdir, rm } from "fs/promises"; |
| 6 | import { pipeline } from "stream/promises"; |
| 7 | import { BridgeService } from "../services/bridge.js"; |
| 8 | import type { MononokeProvisioner } from "../services/mononoke-provisioner.js"; |
| 9 | import { optionalAuth } from "../auth/middleware.js"; |
| 10 | |
| 11 | const BRIDGE_URL = |
| 12 | process.env.GROVE_BRIDGE_URL ?? "http://localhost:3100"; |
| 13 | const DATA_DIR = process.env.GROVE_DATA_DIR ?? "/data/grove"; |
| 14 | const MONONOKE_CONFIG_PATH = |
| 15 | process.env.MONONOKE_CONFIG_PATH ?? "/data/grove/mononoke-config"; |
| 16 | |
| 17 | const bridgeService = new BridgeService(BRIDGE_URL); |
| 18 | |
| 19 | export async function repoRoutes(app: FastifyInstance) { |
| 20 | const configuredHubApiUrl = process.env.GROVE_HUB_API_URL; |
| 21 | const HUB_API_URLS = Array.from( |
| 22 | new Set( |
| 23 | [ |
| 24 | configuredHubApiUrl, |
| 25 | "http://hub-api:4000", |
| 26 | "http://grove-hub-api:4000", |
| 27 | "http://localhost:4001", |
| 28 | ].filter(Boolean) |
| 29 | ) |
| 30 | ) as string[]; |
| 31 | |
| 32 | /** |
| 33 | * Check if a user can access a private repo. |
| 34 | * Public repos are always accessible. Private repos require the user to be: |
| 35 | * - the repo owner (for user repos), or |
| 36 | * - a member of the owning org (for org repos) |
| 37 | */ |
| 38 | async function canAccessRepo(repoRow: any, userId: number | null): Promise<boolean> { |
| 39 | if (!repoRow.is_private) return true; |
| 40 | if (userId == null) return false; |
| 41 | if (repoRow.owner_type === "user") return repoRow.owner_id === userId; |
| 42 | // Org repo — check membership via hub API |
| 43 | for (const hubApiUrl of HUB_API_URLS) { |
| 44 | const controller = new AbortController(); |
| 45 | const timeout = setTimeout(() => controller.abort(), 3000); |
| 46 | try { |
| 47 | const res = await fetch(`${hubApiUrl}/api/orgs/${repoRow.owner_name}`, { |
| 48 | signal: controller.signal, |
| 49 | }); |
| 50 | if (!res.ok) continue; |
| 51 | const { members } = await res.json(); |
| 52 | return Array.isArray(members) && members.some((m: any) => m.user_id === userId); |
| 53 | } catch { |
| 54 | // try next |
| 55 | } finally { |
| 56 | clearTimeout(timeout); |
| 57 | } |
| 58 | } |
| 59 | return false; |
| 60 | } |
| 61 | |
| 62 | /** Middleware: resolve repo + enforce private access. Attaches repoRow to request. */ |
| 63 | async function resolveRepo(request: any, reply: any) { |
| 64 | const { owner, repo: repoName } = request.params; |
| 65 | const db = (app as any).db; |
| 66 | const repoRow = db |
| 67 | .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`) |
| 68 | .get(owner, repoName) as any; |
| 69 | |
| 70 | if (!repoRow) { |
| 71 | return reply.code(404).send({ error: "Repository not found" }); |
| 72 | } |
| 73 | const userId = (request.user as any)?.id ?? null; |
| 74 | if (!(await canAccessRepo(repoRow, userId))) { |
| 75 | // Return 404 for private repos to avoid leaking existence |
| 76 | return reply.code(404).send({ error: "Repository not found" }); |
| 77 | } |
| 78 | request.repoRow = repoRow; |
| 79 | } |
| 80 | |
| 81 | // List all repos |
| 82 | app.get( |
| 83 | "/", |
| 84 | { preHandler: [optionalAuth] }, |
| 85 | async (request: any) => { |
| 86 | const db = (app as any).db; |
| 87 | const userId = (request.user as any)?.id ?? null; |
| 88 | const allRepos = db |
| 89 | .prepare(`SELECT * FROM repos_with_owner ORDER BY updated_at DESC`) |
| 90 | .all() as any[]; |
| 91 | |
| 92 | // Filter out private repos the user can't access |
| 93 | const repos = allRepos.filter( |
| 94 | (r) => !r.is_private || (userId != null && ( |
| 95 | (r.owner_type === "user" && r.owner_id === userId) || |
| 96 | r.owner_type === "org" // org membership checked lazily; show to any authed user for now |
| 97 | )) |
| 98 | ); |
| 99 | |
| 100 | const reposWithActivity = await Promise.all( |
| 101 | repos.map(async (repo) => { |
| 102 | try { |
| 103 | const commits = await bridgeService.getCommits( |
| 104 | repo.owner_name, |
| 105 | repo.name, |
| 106 | repo.default_branch ?? "main", |
| 107 | { limit: 1 } |
| 108 | ); |
| 109 | const latest = commits[0]; |
| 110 | return { |
| 111 | ...repo, |
| 112 | last_commit_ts: latest?.timestamp ?? null, |
| 113 | }; |
| 114 | } catch { |
| 115 | return { |
| 116 | ...repo, |
| 117 | last_commit_ts: null, |
| 118 | }; |
| 119 | } |
| 120 | }) |
| 121 | ); |
| 122 | |
| 123 | reposWithActivity.sort((a, b) => { |
| 124 | const aUpdatedTs = a.updated_at |
| 125 | ? Math.floor(new Date(a.updated_at).getTime() / 1000) |
| 126 | : 0; |
| 127 | const bUpdatedTs = b.updated_at |
| 128 | ? Math.floor(new Date(b.updated_at).getTime() / 1000) |
| 129 | : 0; |
| 130 | const aTs = a.last_commit_ts ?? aUpdatedTs; |
| 131 | const bTs = b.last_commit_ts ?? bUpdatedTs; |
| 132 | if (aTs !== bTs) return bTs - aTs; |
| 133 | return String(a.name ?? "").localeCompare(String(b.name ?? "")); |
| 134 | }); |
| 135 | |
| 136 | return { repos: reposWithActivity }; |
| 137 | }); |
| 138 | |
| 139 | // Create a repo |
| 140 | const createRepoSchema = z.object({ |
| 141 | name: z.string().min(1).max(100), |
| 142 | description: z.string().max(500).optional(), |
| 143 | default_branch: z.string().default("main"), |
| 144 | owner: z.string().optional(), |
| 145 | is_private: z.boolean().default(false), |
| 146 | skip_seed: z.boolean().default(false), |
| 147 | }); |
| 148 | |
| 149 | const updateRepoSchema = z.object({ |
| 150 | description: z.string().max(500).optional(), |
| 151 | is_private: z.boolean().optional(), |
| 152 | require_diffs: z.boolean().optional(), |
| 153 | pages_enabled: z.boolean().optional(), |
| 154 | pages_domain: z |
| 155 | .string() |
| 156 | .max(253) |
| 157 | .regex(/^[a-z0-9]([a-z0-9.-]*[a-z0-9])?(\.[a-z]{2,})+$/i) |
| 158 | .nullable() |
| 159 | .optional(), |
| 160 | }); |
| 161 | |
| 162 | app.post( |
| 163 | "/", |
| 164 | { |
| 165 | preHandler: [(app as any).authenticate], |
| 166 | }, |
| 167 | async (request: any, reply: any) => { |
| 168 | const parsed = createRepoSchema.safeParse(request.body); |
| 169 | if (!parsed.success) { |
| 170 | return reply.code(400).send({ error: parsed.error.flatten() }); |
| 171 | } |
| 172 | const { name, description, default_branch, owner: ownerName, is_private, skip_seed } = parsed.data; |
| 173 | const userId = request.user.id; |
| 174 | const username = request.user.username; |
| 175 | const db = (app as any).db; |
| 176 | |
| 177 | let ownerId = userId; |
| 178 | let ownerType = "user"; |
| 179 | |
| 180 | // If owner specified and differs from user, treat as org repo |
| 181 | if (ownerName && ownerName !== username) { |
| 182 | let orgFound = false; |
| 183 | let sawNotFound = false; |
| 184 | const errors: Array<{ hubApiUrl: string; status?: number; error?: string }> = []; |
| 185 | |
| 186 | for (const hubApiUrl of HUB_API_URLS) { |
| 187 | const controller = new AbortController(); |
| 188 | const timeout = setTimeout(() => controller.abort(), 3000); |
| 189 | try { |
| 190 | const res = await fetch(`${hubApiUrl}/api/orgs/${ownerName}`, { |
| 191 | signal: controller.signal, |
| 192 | }); |
| 193 | |
| 194 | if (res.status === 404) { |
| 195 | sawNotFound = true; |
| 196 | errors.push({ hubApiUrl, status: 404 }); |
| 197 | continue; |
| 198 | } |
| 199 | if (!res.ok) { |
| 200 | errors.push({ hubApiUrl, status: res.status }); |
| 201 | continue; |
| 202 | } |
| 203 | |
| 204 | const { org, members } = await res.json(); |
| 205 | if (!Array.isArray(members)) { |
| 206 | errors.push({ hubApiUrl, error: "Invalid org response shape" }); |
| 207 | continue; |
| 208 | } |
| 209 | const isMember = members.some((m: any) => m.user_id === userId); |
| 210 | if (!isMember) { |
| 211 | return reply.code(403).send({ error: "Not a member of this organization" }); |
| 212 | } |
| 213 | ownerId = org.id; |
| 214 | ownerType = "org"; |
| 215 | orgFound = true; |
| 216 | // Sync org locally |
| 217 | (app as any).ensureLocalOrg({ id: org.id, name: org.name, display_name: org.display_name }); |
| 218 | break; |
| 219 | } catch (err: any) { |
| 220 | errors.push({ hubApiUrl, error: err?.message ?? "Unknown error" }); |
| 221 | } finally { |
| 222 | clearTimeout(timeout); |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | if (!orgFound) { |
| 227 | app.log.error( |
| 228 | { ownerName, hubApiUrlsTried: HUB_API_URLS, errors }, |
| 229 | "Failed to validate org owner against hub API candidates" |
| 230 | ); |
| 231 | |
| 232 | if (sawNotFound && errors.every((entry) => entry.status === 404)) { |
| 233 | return reply.code(404).send({ error: "Organization not found" }); |
| 234 | } |
| 235 | |
| 236 | return reply.code(502).send({ error: "Organization service unavailable" }); |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | const existing = db |
| 241 | .prepare(`SELECT id FROM repos WHERE owner_id = ? AND owner_type = ? AND name = ?`) |
| 242 | .get(ownerId, ownerType, name); |
| 243 | |
| 244 | if (existing) { |
| 245 | return reply.code(409).send({ error: "Repository already exists" }); |
| 246 | } |
| 247 | |
| 248 | const result = db |
| 249 | .prepare( |
| 250 | `INSERT INTO repos (owner_id, owner_type, name, description, default_branch, is_private) |
| 251 | VALUES (?, ?, ?, ?, ?, ?)` |
| 252 | ) |
| 253 | .run(ownerId, ownerType, name, description ?? null, default_branch, is_private ? 1 : 0); |
| 254 | |
| 255 | // Provision Mononoke config for the new repo |
| 256 | const provisioner = (app as any).mononokeProvisioner as MononokeProvisioner; |
| 257 | let provisioned = false; |
| 258 | try { |
| 259 | const mononokeRepoId = provisioner.provisionRepo(name); |
| 260 | db.prepare("UPDATE repos SET mononoke_repo_id = ? WHERE id = ?") |
| 261 | .run(mononokeRepoId, result.lastInsertRowid); |
| 262 | provisioned = true; |
| 263 | } catch (err) { |
| 264 | app.log.error({ err, repoName: name }, "Failed to provision Mononoke config"); |
| 265 | } |
| 266 | |
| 267 | // Restart Mononoke so it picks up the new repo config |
| 268 | let restartOk = false; |
| 269 | if (provisioned) { |
| 270 | try { |
| 271 | await provisioner.restartMononoke(); |
| 272 | restartOk = true; |
| 273 | } catch (err) { |
| 274 | app.log.error({ err }, "Failed to restart Mononoke after repo provisioning"); |
| 275 | } |
| 276 | } |
| 277 | |
| 278 | // Seed the repo with an initial commit (README.md with repo name) |
| 279 | if (restartOk && !skip_seed) { |
| 280 | try { |
| 281 | const res = await fetch(`${BRIDGE_URL}/repos/${name}/seed`, { |
| 282 | method: "POST", |
| 283 | headers: { "Content-Type": "application/json" }, |
| 284 | body: JSON.stringify({ name, bookmark: default_branch }), |
| 285 | signal: AbortSignal.timeout(10000), |
| 286 | }); |
| 287 | if (!res.ok) { |
| 288 | const body = await res.text(); |
| 289 | app.log.warn({ status: res.status, body, repoName: name }, "Seed endpoint returned non-OK"); |
| 290 | } |
| 291 | } catch (err) { |
| 292 | app.log.warn({ err, repoName: name }, "Failed to seed initial commit (non-fatal)"); |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | const repo = db |
| 297 | .prepare(`SELECT * FROM repos_with_owner WHERE id = ?`) |
| 298 | .get(result.lastInsertRowid); |
| 299 | |
| 300 | return reply.code(201).send({ |
| 301 | repo, |
| 302 | ...(!restartOk && { warning: "Repository created but Mononoke restart failed. Push may not work until services are restarted." }), |
| 303 | }); |
| 304 | } |
| 305 | ); |
| 306 | |
| 307 | // Delete a repo |
| 308 | app.delete<{ Params: { owner: string; repo: string } }>( |
| 309 | "/:owner/:repo", |
| 310 | { |
| 311 | preHandler: [(app as any).authenticate], |
| 312 | }, |
| 313 | async (request: any, reply: any) => { |
| 314 | const { owner, repo: repoName } = request.params; |
| 315 | const userId = request.user.id; |
| 316 | const db = (app as any).db; |
| 317 | |
| 318 | const repoRow = db |
| 319 | .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`) |
| 320 | .get(owner, repoName) as any; |
| 321 | |
| 322 | if (!repoRow) { |
| 323 | return reply.code(404).send({ error: "Repository not found" }); |
| 324 | } |
| 325 | |
| 326 | // Verify ownership |
| 327 | if (repoRow.owner_type === "user") { |
| 328 | if (repoRow.owner_id !== userId) { |
| 329 | return reply.code(403).send({ error: "Not authorized to delete this repository" }); |
| 330 | } |
| 331 | } else { |
| 332 | // Org repo — verify membership via hub API |
| 333 | let authorized = false; |
| 334 | for (const hubApiUrl of HUB_API_URLS) { |
| 335 | const controller = new AbortController(); |
| 336 | const timeout = setTimeout(() => controller.abort(), 3000); |
| 337 | try { |
| 338 | const res = await fetch(`${hubApiUrl}/api/orgs/${owner}`, { |
| 339 | signal: controller.signal, |
| 340 | }); |
| 341 | if (!res.ok) continue; |
| 342 | const { members } = await res.json(); |
| 343 | if (Array.isArray(members) && members.some((m: any) => m.user_id === userId)) { |
| 344 | authorized = true; |
| 345 | } |
| 346 | break; |
| 347 | } catch { |
| 348 | // try next |
| 349 | } finally { |
| 350 | clearTimeout(timeout); |
| 351 | } |
| 352 | } |
| 353 | if (!authorized) { |
| 354 | return reply.code(403).send({ error: "Not authorized to delete this repository" }); |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | // Delete related data (respect FK constraints) |
| 359 | db.prepare("DELETE FROM canopy_secrets WHERE repo_id = ?").run(repoRow.id); |
| 360 | db.prepare("DELETE FROM pipeline_runs WHERE repo_id = ?").run(repoRow.id); |
| 361 | db.prepare("DELETE FROM diffs WHERE repo_id = ?").run(repoRow.id); |
| 362 | db.prepare("DELETE FROM repos WHERE id = ?").run(repoRow.id); |
| 363 | |
| 364 | // Clean up pages if configured |
| 365 | if (repoRow.pages_domain) { |
| 366 | const pagesDeployer = (app as any).pagesDeployer; |
| 367 | if (pagesDeployer) { |
| 368 | pagesDeployer.undeploy(repoRow.pages_domain); |
| 369 | } |
| 370 | } |
| 371 | |
| 372 | // Respond immediately — Mononoke cleanup happens in the background |
| 373 | reply.code(204).send(); |
| 374 | |
| 375 | // Remove Mononoke config and restart (fire-and-forget) |
| 376 | const provisioner = (app as any).mononokeProvisioner as MononokeProvisioner; |
| 377 | try { |
| 378 | provisioner.deprovisionRepo(repoName); |
| 379 | provisioner.restartMononoke().catch((err) => { |
| 380 | app.log.error({ err, repoName }, "Failed to restart Mononoke after repo deletion"); |
| 381 | }); |
| 382 | } catch (err) { |
| 383 | app.log.error({ err, repoName }, "Failed to deprovision Mononoke after repo deletion"); |
| 384 | } |
| 385 | } |
| 386 | ); |
| 387 | |
| 388 | // Update repo settings |
| 389 | app.patch<{ Params: { owner: string; repo: string } }>( |
| 390 | "/:owner/:repo", |
| 391 | { |
| 392 | preHandler: [(app as any).authenticate], |
| 393 | }, |
| 394 | async (request: any, reply: any) => { |
| 395 | const { owner, repo: repoName } = request.params; |
| 396 | const userId = request.user.id; |
| 397 | const db = (app as any).db; |
| 398 | |
| 399 | const parsed = updateRepoSchema.safeParse(request.body); |
| 400 | if (!parsed.success) { |
| 401 | return reply.code(400).send({ error: parsed.error.flatten() }); |
| 402 | } |
| 403 | |
| 404 | const repoRow = db |
| 405 | .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`) |
| 406 | .get(owner, repoName) as any; |
| 407 | |
| 408 | if (!repoRow) { |
| 409 | return reply.code(404).send({ error: "Repository not found" }); |
| 410 | } |
| 411 | |
| 412 | // Verify ownership |
| 413 | if (repoRow.owner_type === "user") { |
| 414 | if (repoRow.owner_id !== userId) { |
| 415 | return reply.code(403).send({ error: "Not authorized to update this repository" }); |
| 416 | } |
| 417 | } else { |
| 418 | // Org repo — verify membership via hub API |
| 419 | let authorized = false; |
| 420 | for (const hubApiUrl of HUB_API_URLS) { |
| 421 | const controller = new AbortController(); |
| 422 | const timeout = setTimeout(() => controller.abort(), 3000); |
| 423 | try { |
| 424 | const res = await fetch(`${hubApiUrl}/api/orgs/${owner}`, { |
| 425 | signal: controller.signal, |
| 426 | }); |
| 427 | if (!res.ok) continue; |
| 428 | const { members } = await res.json(); |
| 429 | if (Array.isArray(members) && members.some((m: any) => m.user_id === userId)) { |
| 430 | authorized = true; |
| 431 | } |
| 432 | break; |
| 433 | } catch { |
| 434 | // try next |
| 435 | } finally { |
| 436 | clearTimeout(timeout); |
| 437 | } |
| 438 | } |
| 439 | if (!authorized) { |
| 440 | return reply.code(403).send({ error: "Not authorized to update this repository" }); |
| 441 | } |
| 442 | } |
| 443 | |
| 444 | const updates = parsed.data; |
| 445 | const setClauses: string[] = []; |
| 446 | const values: any[] = []; |
| 447 | |
| 448 | if (updates.description !== undefined) { |
| 449 | setClauses.push("description = ?"); |
| 450 | values.push(updates.description); |
| 451 | } |
| 452 | if (updates.is_private !== undefined) { |
| 453 | setClauses.push("is_private = ?"); |
| 454 | values.push(updates.is_private ? 1 : 0); |
| 455 | } |
| 456 | if (updates.require_diffs !== undefined) { |
| 457 | setClauses.push("require_diffs = ?"); |
| 458 | values.push(updates.require_diffs ? 1 : 0); |
| 459 | } |
| 460 | if (updates.pages_enabled !== undefined) { |
| 461 | setClauses.push("pages_enabled = ?"); |
| 462 | values.push(updates.pages_enabled ? 1 : 0); |
| 463 | } |
| 464 | if (updates.pages_domain !== undefined) { |
| 465 | setClauses.push("pages_domain = ?"); |
| 466 | values.push(updates.pages_domain); |
| 467 | } |
| 468 | |
| 469 | if (setClauses.length === 0) { |
| 470 | return reply.code(400).send({ error: "No fields to update" }); |
| 471 | } |
| 472 | |
| 473 | setClauses.push("updated_at = datetime('now')"); |
| 474 | values.push(repoRow.id); |
| 475 | |
| 476 | // Undeploy old deploy path if pages disabled or domain changed |
| 477 | const pagesDeployer = (app as any).pagesDeployer; |
| 478 | const oldDeployInfo = pagesDeployer?.getDeployPath(repoRow.owner_name, repoRow.name); |
| 479 | if (pagesDeployer && oldDeployInfo) { |
| 480 | if (updates.pages_enabled === false || |
| 481 | (updates.pages_domain !== undefined && updates.pages_domain !== repoRow.pages_domain)) { |
| 482 | pagesDeployer.undeploy(oldDeployInfo.path); |
| 483 | } |
| 484 | } |
| 485 | |
| 486 | db.prepare(`UPDATE repos SET ${setClauses.join(", ")} WHERE id = ?`).run(...values); |
| 487 | |
| 488 | // Trigger pages deploy if enabled |
| 489 | if (pagesDeployer && (updates.pages_enabled === true || updates.pages_domain !== undefined)) { |
| 490 | void pagesDeployer.deploy(repoRow.owner_name, repoRow.name, repoRow.default_branch ?? "main").catch( |
| 491 | (err: any) => app.log.error({ err, repo: repoRow.name }, "Initial pages deploy failed") |
| 492 | ); |
| 493 | } |
| 494 | |
| 495 | const updated = db |
| 496 | .prepare(`SELECT * FROM repos_with_owner WHERE id = ?`) |
| 497 | .get(repoRow.id); |
| 498 | |
| 499 | return { repo: updated }; |
| 500 | } |
| 501 | ); |
| 502 | |
| 503 | // Get single repo |
| 504 | app.get<{ Params: { owner: string; repo: string } }>( |
| 505 | "/:owner/:repo", |
| 506 | { preHandler: [optionalAuth, resolveRepo] }, |
| 507 | async (request: any) => { |
| 508 | const { owner, repo } = request.params; |
| 509 | const repoRow = request.repoRow; |
| 510 | |
| 511 | const ref = repoRow.default_branch ?? "main"; |
| 512 | const readme = await bridgeService.getReadme(owner, repo, ref); |
| 513 | const branches = await bridgeService.getBranches(owner, repo); |
| 514 | |
| 515 | return { |
| 516 | repo: repoRow, |
| 517 | readme, |
| 518 | branches, |
| 519 | }; |
| 520 | } |
| 521 | ); |
| 522 | |
| 523 | // List directory tree |
| 524 | app.get<{ Params: { owner: string; repo: string; ref: string; "*": string } }>( |
| 525 | "/:owner/:repo/tree/:ref/*", |
| 526 | { preHandler: [optionalAuth, resolveRepo] }, |
| 527 | async (request: any, reply: any) => { |
| 528 | const { owner, repo, ref } = request.params; |
| 529 | const path = (request.params as any)["*"] ?? ""; |
| 530 | |
| 531 | const entries = await bridgeService.listTree(owner, repo, ref, path); |
| 532 | if (!entries.length && path) { |
| 533 | return reply.code(404).send({ error: "Path not found" }); |
| 534 | } |
| 535 | |
| 536 | return { |
| 537 | path, |
| 538 | ref, |
| 539 | entries: entries.sort((a: any, b: any) => { |
| 540 | // Directories first, then files |
| 541 | if (a.type !== b.type) return a.type === "tree" ? -1 : 1; |
| 542 | return a.name.localeCompare(b.name); |
| 543 | }), |
| 544 | }; |
| 545 | } |
| 546 | ); |
| 547 | |
| 548 | // Also handle tree root (no path) |
| 549 | app.get<{ Params: { owner: string; repo: string; ref: string } }>( |
| 550 | "/:owner/:repo/tree/:ref", |
| 551 | { preHandler: [optionalAuth, resolveRepo] }, |
| 552 | async (request: any) => { |
| 553 | const { owner, repo, ref } = request.params; |
| 554 | const entries = await bridgeService.listTree(owner, repo, ref, ""); |
| 555 | |
| 556 | return { |
| 557 | path: "", |
| 558 | ref, |
| 559 | entries: entries.sort((a: any, b: any) => { |
| 560 | if (a.type !== b.type) return a.type === "tree" ? -1 : 1; |
| 561 | return a.name.localeCompare(b.name); |
| 562 | }), |
| 563 | }; |
| 564 | } |
| 565 | ); |
| 566 | |
| 567 | // Get file content |
| 568 | app.get<{ Params: { owner: string; repo: string; ref: string; "*": string } }>( |
| 569 | "/:owner/:repo/blob/:ref/*", |
| 570 | { preHandler: [optionalAuth, resolveRepo] }, |
| 571 | async (request: any, reply: any) => { |
| 572 | const { owner, repo, ref } = request.params; |
| 573 | const path = (request.params as any)["*"]; |
| 574 | |
| 575 | if (!path) { |
| 576 | return reply.code(400).send({ error: "File path required" }); |
| 577 | } |
| 578 | |
| 579 | const blob = await bridgeService.getBlob(owner, repo, ref, path); |
| 580 | if (!blob) { |
| 581 | return reply.code(404).send({ error: "File not found" }); |
| 582 | } |
| 583 | |
| 584 | return { |
| 585 | path, |
| 586 | ref, |
| 587 | content: blob.content, |
| 588 | size: blob.size, |
| 589 | }; |
| 590 | } |
| 591 | ); |
| 592 | |
| 593 | // Get commit history |
| 594 | app.get<{ |
| 595 | Params: { owner: string; repo: string; ref: string }; |
| 596 | Querystring: { path?: string; limit?: string; offset?: string }; |
| 597 | }>( |
| 598 | "/:owner/:repo/commits/:ref", |
| 599 | { preHandler: [optionalAuth, resolveRepo] }, |
| 600 | async (request: any) => { |
| 601 | const { owner, repo, ref } = request.params; |
| 602 | const { path, limit, offset } = request.query; |
| 603 | |
| 604 | const commits = await bridgeService.getCommits(owner, repo, ref, { |
| 605 | path, |
| 606 | limit: limit ? parseInt(limit) : 30, |
| 607 | offset: offset ? parseInt(offset) : 0, |
| 608 | }); |
| 609 | |
| 610 | return { ref, commits }; |
| 611 | } |
| 612 | ); |
| 613 | |
| 614 | // Get blame |
| 615 | app.get<{ Params: { owner: string; repo: string; ref: string; "*": string } }>( |
| 616 | "/:owner/:repo/blame/:ref/*", |
| 617 | { preHandler: [optionalAuth, resolveRepo] }, |
| 618 | async (request: any, reply: any) => { |
| 619 | const { owner, repo, ref } = request.params; |
| 620 | const path = (request.params as any)["*"]; |
| 621 | |
| 622 | if (!path) { |
| 623 | return reply.code(400).send({ error: "File path required" }); |
| 624 | } |
| 625 | |
| 626 | const blame = await bridgeService.getBlame(owner, repo, ref, path); |
| 627 | if (!blame.length) { |
| 628 | return reply.code(404).send({ error: "File not found" }); |
| 629 | } |
| 630 | |
| 631 | return { path, ref, blame }; |
| 632 | } |
| 633 | ); |
| 634 | |
| 635 | // Get diff between refs |
| 636 | app.get<{ |
| 637 | Params: { owner: string; repo: string }; |
| 638 | Querystring: { base: string; head: string }; |
| 639 | }>( |
| 640 | "/:owner/:repo/diff", |
| 641 | { preHandler: [optionalAuth, resolveRepo] }, |
| 642 | async (request: any) => { |
| 643 | const { owner, repo } = request.params; |
| 644 | const { base, head } = request.query; |
| 645 | |
| 646 | return await bridgeService.getDiff(owner, repo, base, head); |
| 647 | } |
| 648 | ); |
| 649 | |
| 650 | // List branches |
| 651 | app.get<{ Params: { owner: string; repo: string } }>( |
| 652 | "/:owner/:repo/branches", |
| 653 | { preHandler: [optionalAuth, resolveRepo] }, |
| 654 | async (request: any) => { |
| 655 | const { owner, repo } = request.params; |
| 656 | const branches = await bridgeService.getBranches(owner, repo); |
| 657 | return { branches }; |
| 658 | } |
| 659 | ); |
| 660 | |
| 661 | // Import a Git repository (SSE progress stream) |
| 662 | const importSchema = z.object({ |
| 663 | url: z.string().url(), |
| 664 | }); |
| 665 | |
| 666 | app.post<{ Params: { owner: string; repo: string } }>( |
| 667 | "/:owner/:repo/import", |
| 668 | { |
| 669 | preHandler: [(app as any).authenticate], |
| 670 | }, |
| 671 | async (request, reply) => { |
| 672 | const { owner, repo: repoName } = request.params; |
| 673 | const parsed = importSchema.safeParse(request.body); |
| 674 | if (!parsed.success) { |
| 675 | return reply.code(400).send({ error: parsed.error.flatten() }); |
| 676 | } |
| 677 | const { url } = parsed.data; |
| 678 | const db = (app as any).db; |
| 679 | |
| 680 | // Verify repo exists |
| 681 | const repoRow = db |
| 682 | .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`) |
| 683 | .get(owner, repoName) as any; |
| 684 | if (!repoRow) { |
| 685 | return reply.code(404).send({ error: "Repository not found" }); |
| 686 | } |
| 687 | |
| 688 | // SSE stream |
| 689 | reply.raw.writeHead(200, { |
| 690 | "Content-Type": "text/event-stream", |
| 691 | "Cache-Control": "no-cache", |
| 692 | Connection: "keep-alive", |
| 693 | }); |
| 694 | |
| 695 | const send = (event: string, data: any) => { |
| 696 | reply.raw.write(`event: ${event}\ndata: ${JSON.stringify(data)}\n\n`); |
| 697 | }; |
| 698 | |
| 699 | try { |
| 700 | // Step 1: git clone --bare via docker (grove/mononoke image has git) |
| 701 | const bareRepo = `${DATA_DIR}/${repoName}-bare.git`; |
| 702 | send("progress", { step: "clone", message: `Cloning ${url}...` }); |
| 703 | |
| 704 | await runDocker([ |
| 705 | "run", "--rm", |
| 706 | "-v", "/data/grove:/data/grove", |
| 707 | "grove/mononoke:latest", |
| 708 | "/usr/bin/git", "clone", "--bare", url, bareRepo, |
| 709 | ], (line) => { |
| 710 | send("log", { step: "clone", line }); |
| 711 | }); |
| 712 | |
| 713 | send("progress", { step: "clone", message: "Clone complete." }); |
| 714 | |
| 715 | // Step 2: gitimport into Mononoke |
| 716 | send("progress", { step: "import", message: "Importing into Mononoke..." }); |
| 717 | |
| 718 | await runDocker([ |
| 719 | "run", "--rm", |
| 720 | "-v", "/data/grove:/data/grove", |
| 721 | "--entrypoint", "gitimport", |
| 722 | "grove/mononoke:latest", |
| 723 | "--repo-name", repoName, |
| 724 | "--config-path", MONONOKE_CONFIG_PATH, |
| 725 | "--local-configerator-path", `${DATA_DIR}/configerator`, |
| 726 | "--cache-mode", "disabled", |
| 727 | "--just-knobs-config-path", `${DATA_DIR}/justknobs.json`, |
| 728 | "--generate-bookmarks", |
| 729 | "--derive-hg", |
| 730 | "--git-command-path", "/usr/bin/git", |
| 731 | "--concurrency", "5", |
| 732 | bareRepo, |
| 733 | "full-repo", |
| 734 | ], (line) => { |
| 735 | send("log", { step: "import", line }); |
| 736 | }); |
| 737 | |
| 738 | send("progress", { step: "import", message: "Import complete." }); |
| 739 | |
| 740 | // Step 3: Restart Mononoke services to pick up the imported data |
| 741 | send("progress", { step: "restart", message: "Restarting services..." }); |
| 742 | |
| 743 | const provisioner = (app as any).mononokeProvisioner as MononokeProvisioner; |
| 744 | await provisioner.restartMononoke(); |
| 745 | |
| 746 | send("progress", { step: "restart", message: "Services restarted." }); |
| 747 | |
| 748 | // Clean up the bare clone |
| 749 | await runDocker([ |
| 750 | "run", "--rm", |
| 751 | "-v", "/data/grove:/data/grove", |
| 752 | "grove/mononoke:latest", |
| 753 | "rm", "-rf", bareRepo, |
| 754 | ], () => {}); |
| 755 | |
| 756 | send("done", { success: true }); |
| 757 | } catch (err: any) { |
| 758 | send("error", { message: err.message ?? "Import failed" }); |
| 759 | } |
| 760 | |
| 761 | reply.raw.end(); |
| 762 | } |
| 763 | ); |
| 764 | |
| 765 | // Import a Git repository from an uploaded bare repo tarball (SSE progress stream) |
| 766 | app.post<{ Params: { owner: string; repo: string } }>( |
| 767 | "/:owner/:repo/import-bundle", |
| 768 | { |
| 769 | preHandler: [(app as any).authenticate], |
| 770 | }, |
| 771 | async (request, reply) => { |
| 772 | const { owner, repo: repoName } = request.params; |
| 773 | const db = (app as any).db; |
| 774 | |
| 775 | // Verify repo exists |
| 776 | const repoRow = db |
| 777 | .prepare(`SELECT * FROM repos_with_owner WHERE owner_name = ? AND name = ?`) |
| 778 | .get(owner, repoName) as any; |
| 779 | if (!repoRow) { |
| 780 | return reply.code(404).send({ error: "Repository not found" }); |
| 781 | } |
| 782 | |
| 783 | // Read the uploaded file |
| 784 | const file = await (request as any).file(); |
| 785 | if (!file) { |
| 786 | return reply.code(400).send({ error: "No file uploaded" }); |
| 787 | } |
| 788 | |
| 789 | // SSE stream |
| 790 | reply.raw.writeHead(200, { |
| 791 | "Content-Type": "text/event-stream", |
| 792 | "Cache-Control": "no-cache", |
| 793 | Connection: "keep-alive", |
| 794 | }); |
| 795 | |
| 796 | const send = (event: string, data: any) => { |
| 797 | reply.raw.write(`event: ${event}\ndata: ${JSON.stringify(data)}\n\n`); |
| 798 | }; |
| 799 | |
| 800 | const bareRepo = `${DATA_DIR}/${repoName}-bare.git`; |
| 801 | const tarPath = `${DATA_DIR}/${repoName}-bare.tar.gz`; |
| 802 | |
| 803 | try { |
| 804 | // Step 1: Save uploaded tarball and extract |
| 805 | send("progress", { step: "upload", message: "Receiving bare repo..." }); |
| 806 | |
| 807 | await pipeline(file.file, createWriteStream(tarPath)); |
| 808 | |
| 809 | send("progress", { step: "upload", message: "Extracting..." }); |
| 810 | |
| 811 | await rm(bareRepo, { recursive: true, force: true }); |
| 812 | |
| 813 | await runDocker([ |
| 814 | "run", "--rm", |
| 815 | "-v", "/data/grove:/data/grove", |
| 816 | "--entrypoint", "tar", |
| 817 | "grove/mononoke:latest", |
| 818 | "xzf", tarPath, "-C", `${DATA_DIR}`, |
| 819 | ], (line) => { |
| 820 | send("log", { step: "upload", line }); |
| 821 | }); |
| 822 | |
| 823 | // The tar extracts as bare.git/ — rename to match expected path |
| 824 | await runDocker([ |
| 825 | "run", "--rm", |
| 826 | "-v", "/data/grove:/data/grove", |
| 827 | "--entrypoint", "sh", |
| 828 | "grove/mononoke:latest", |
| 829 | "-c", `mv ${DATA_DIR}/bare.git ${bareRepo} 2>/dev/null; chown -R root:root ${bareRepo}`, |
| 830 | ], () => {}); |
| 831 | |
| 832 | send("progress", { step: "upload", message: "Extracted." }); |
| 833 | |
| 834 | // Step 2: gitimport into Mononoke |
| 835 | send("progress", { step: "import", message: "Importing into Mononoke..." }); |
| 836 | |
| 837 | await runDocker([ |
| 838 | "run", "--rm", |
| 839 | "-v", "/data/grove:/data/grove", |
| 840 | "--entrypoint", "gitimport", |
| 841 | "grove/mononoke:latest", |
| 842 | "--repo-name", repoName, |
| 843 | "--config-path", MONONOKE_CONFIG_PATH, |
| 844 | "--local-configerator-path", `${DATA_DIR}/configerator`, |
| 845 | "--cache-mode", "disabled", |
| 846 | "--just-knobs-config-path", `${DATA_DIR}/justknobs.json`, |
| 847 | "--generate-bookmarks", |
| 848 | "--derive-hg", |
| 849 | "--git-command-path", "/usr/bin/git", |
| 850 | "--concurrency", "5", |
| 851 | bareRepo, |
| 852 | "full-repo", |
| 853 | ], (line) => { |
| 854 | send("log", { step: "import", line }); |
| 855 | }); |
| 856 | |
| 857 | // Create Sapling-style bookmark (gitimport creates "heads/main", Sapling needs "main") |
| 858 | send("progress", { step: "import", message: "Creating bookmarks..." }); |
| 859 | |
| 860 | await runDocker([ |
| 861 | "run", "--rm", |
| 862 | "-v", "/data/grove:/data/grove", |
| 863 | "--entrypoint", "sh", |
| 864 | "grove/mononoke:latest", |
| 865 | "-c", |
| 866 | `CSID=$(admin --config-path ${MONONOKE_CONFIG_PATH} --local-configerator-path ${DATA_DIR}/configerator --cache-mode disabled --just-knobs-config-path ${DATA_DIR}/justknobs.json bookmarks --repo-name ${repoName} list 2>/dev/null | grep 'heads/main' | awk '{print $1}') && admin --config-path ${MONONOKE_CONFIG_PATH} --local-configerator-path ${DATA_DIR}/configerator --cache-mode disabled --just-knobs-config-path ${DATA_DIR}/justknobs.json bookmarks --repo-name ${repoName} set main $CSID`, |
| 867 | ], (line) => { |
| 868 | send("log", { step: "import", line }); |
| 869 | }); |
| 870 | |
| 871 | send("progress", { step: "import", message: "Import complete." }); |
| 872 | |
| 873 | // Step 3: Restart Mononoke services |
| 874 | send("progress", { step: "restart", message: "Restarting services..." }); |
| 875 | |
| 876 | const provisioner = (app as any).mononokeProvisioner as MononokeProvisioner; |
| 877 | await provisioner.restartMononoke(); |
| 878 | |
| 879 | send("progress", { step: "restart", message: "Services restarted." }); |
| 880 | |
| 881 | // Clean up |
| 882 | await runDocker([ |
| 883 | "run", "--rm", |
| 884 | "-v", "/data/grove:/data/grove", |
| 885 | "--entrypoint", "rm", |
| 886 | "grove/mononoke:latest", |
| 887 | "-rf", bareRepo, tarPath, |
| 888 | ], () => {}); |
| 889 | |
| 890 | send("done", { success: true }); |
| 891 | } catch (err: any) { |
| 892 | // Clean up on error |
| 893 | await runDocker([ |
| 894 | "run", "--rm", |
| 895 | "-v", "/data/grove:/data/grove", |
| 896 | "--entrypoint", "rm", |
| 897 | "grove/mononoke:latest", |
| 898 | "-rf", bareRepo, tarPath, |
| 899 | ], () => {}).catch(() => {}); |
| 900 | |
| 901 | send("error", { message: err.message ?? "Import failed" }); |
| 902 | } |
| 903 | |
| 904 | reply.raw.end(); |
| 905 | } |
| 906 | ); |
| 907 | } |
| 908 | |
| 909 | /** |
| 910 | * Run a docker command, streaming stdout/stderr line-by-line to a callback. |
| 911 | * Rejects on non-zero exit code. |
| 912 | */ |
| 913 | function runDocker( |
| 914 | args: string[], |
| 915 | onLine: (line: string) => void |
| 916 | ): Promise<void> { |
| 917 | return new Promise((resolve, reject) => { |
| 918 | const proc = spawn("docker", args); |
| 919 | let stderr = ""; |
| 920 | |
| 921 | const handleData = (data: Buffer) => { |
| 922 | const text = data.toString(); |
| 923 | for (const line of text.split("\n")) { |
| 924 | const trimmed = line.trimEnd(); |
| 925 | if (trimmed) onLine(trimmed); |
| 926 | } |
| 927 | }; |
| 928 | |
| 929 | proc.stdout.on("data", handleData); |
| 930 | proc.stderr.on("data", (data: Buffer) => { |
| 931 | stderr += data.toString(); |
| 932 | handleData(data); |
| 933 | }); |
| 934 | |
| 935 | proc.on("close", (code) => { |
| 936 | if (code === 0) resolve(); |
| 937 | else reject(new Error(stderr.trim() || `docker exited with code ${code}`)); |
| 938 | }); |
| 939 | |
| 940 | proc.on("error", reject); |
| 941 | }); |
| 942 | } |
| 943 | |